From a16533c43e91ce95266d653fd8f4f7e1706f1b5b Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 24 Mar 2023 13:35:33 -0700 Subject: [PATCH] freedreno/drm: Make threaded-submit optional We've had gpu-sched support in the kernel for a while now, so our fence waits are not synchronous in the ioctl path. The only reason this path still exists is that virtgpu does not have gpu-sched. So lets disable it on msm. Signed-off-by: Rob Clark Part-of: --- src/freedreno/drm/freedreno_device.c | 2 +- src/freedreno/drm/freedreno_priv.h | 6 ++++++ src/freedreno/drm/freedreno_ringbuffer_sp.c | 21 ++++++++++++++++----- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/src/freedreno/drm/freedreno_device.c b/src/freedreno/drm/freedreno_device.c index a51b133..cff09fd 100644 --- a/src/freedreno/drm/freedreno_device.c +++ b/src/freedreno/drm/freedreno_device.c @@ -197,7 +197,7 @@ fd_device_del(struct fd_device *dev) _mesa_hash_table_destroy(dev->handle_table, NULL); _mesa_hash_table_destroy(dev->name_table, NULL); - if (util_queue_is_initialized(&dev->submit_queue)) + if (fd_device_threaded_submit(dev)) util_queue_destroy(&dev->submit_queue); if (dev->closefd) diff --git a/src/freedreno/drm/freedreno_priv.h b/src/freedreno/drm/freedreno_priv.h index 9c17312..5926162 100644 --- a/src/freedreno/drm/freedreno_priv.h +++ b/src/freedreno/drm/freedreno_priv.h @@ -271,6 +271,12 @@ struct fd_device { struct util_queue submit_queue; }; +static inline bool +fd_device_threaded_submit(struct fd_device *dev) +{ + return util_queue_is_initialized(&dev->submit_queue); +} + #define foreach_submit(name, list) \ list_for_each_entry(struct fd_submit, name, list, node) #define foreach_submit_safe(name, list) \ diff --git a/src/freedreno/drm/freedreno_ringbuffer_sp.c b/src/freedreno/drm/freedreno_ringbuffer_sp.c index 12de02a..57057c8 100644 --- a/src/freedreno/drm/freedreno_ringbuffer_sp.c +++ b/src/freedreno/drm/freedreno_ringbuffer_sp.c @@ -292,11 +292,16 @@ flush_deferred_submits(struct fd_device *dev) DEBUG_MSG("enqueue: %u", submit->fence); - util_queue_add_job(&submit->pipe->dev->submit_queue, - submit, fence, - fd_submit_sp_flush_execute, - fd_submit_sp_flush_cleanup, - 0); + if (fd_device_threaded_submit(submit->pipe->dev)) { + util_queue_add_job(&submit->pipe->dev->submit_queue, + submit, fence, + fd_submit_sp_flush_execute, + fd_submit_sp_flush_cleanup, + 0); + } else { + fd_submit_sp_flush_execute(submit, NULL, 0); + fd_submit_sp_flush_cleanup(submit, NULL, 0); + } } static bool @@ -393,6 +398,9 @@ fd_pipe_sp_flush(struct fd_pipe *pipe, uint32_t fence) { struct fd_device *dev = pipe->dev; + if (!fd_fence_before(pipe->last_submit_fence, fence)) + return; + MESA_TRACE_FUNC(); simple_mtx_lock(&dev->submit_lock); @@ -403,6 +411,9 @@ fd_pipe_sp_flush(struct fd_pipe *pipe, uint32_t fence) simple_mtx_unlock(&dev->submit_lock); + if (!fd_device_threaded_submit(pipe->dev)) + return; + /* Once we are sure that we've enqueued at least up to the requested * submit, we need to be sure that submitq has caught up and flushed * them to the kernel -- 2.7.4