From a2d12ce970827a2c1a1daa9748b70fe505b56200 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 18 Jan 2024 15:05:36 +0900 Subject: [PATCH] wl_egl: improve end-of-use guarantee for previous buffers AS-IS - Dequeue thread waits for the wl-egl-thread to send signal after commit the buffer that was last enqueued. PROBLEMS - Complex mutex locking is required to access last_enq_buffer. - Complex process is required to validate last_enq_buffer. TO-BE - The list of buffers in use is traversed by wl-egl-thread and signal is sent when all buffers enqeueued after queue reset are committed. Change-Id: Ib81186c2cc1faf856372c0a668b47ffc615258eb Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 144 ++++++++++++++++++---------------------- 1 file changed, 64 insertions(+), 80 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 474d7d6..14c3473 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -128,7 +128,6 @@ struct _tpl_wl_egl_surface { tpl_list_t *buffers; int buffer_cnt; /* the number of using wl_egl_buffers */ tpl_gmutex_rec buffers_mutex; - tbm_surface_h last_enq_buffer; tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */ @@ -158,6 +157,7 @@ struct _tpl_wl_egl_surface { tpl_bool_t frontbuffer_activated; tpl_bool_t buffers_finalize_done; tpl_bool_t need_force_release; + tpl_bool_t buffers_commit_done; /* To make sure that tpl_gsource has been successfully finalized. */ tpl_bool_t gsource_finalized; @@ -271,8 +271,6 @@ static int _get_tbm_surface_bo_name(tbm_surface_h tbm_surface); static void _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface); -static tpl_bool_t -_check_tbm_surface_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface); static void __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer); static tpl_wl_egl_buffer_t * @@ -1830,7 +1828,6 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->presentation_sync.fd = -1; wl_egl_surface->sent_message = NONE_MESSAGE; - wl_egl_surface->last_enq_buffer = NULL; wl_egl_surface->buffers = __tpl_list_alloc(); @@ -2382,8 +2379,6 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) wl_egl_surface->wl_egl_window = NULL; } - wl_egl_surface->last_enq_buffer = NULL; - wl_egl_surface->wl_surface = NULL; wl_egl_surface->wl_egl_display = NULL; wl_egl_surface->tpl_surface = NULL; @@ -2638,7 +2633,6 @@ _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, wl_egl_buffer->w_transform = -1; tpl_gmutex_init(&wl_egl_buffer->mutex); - tpl_gcond_init(&wl_egl_buffer->cond); tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); __tpl_list_push_back(wl_egl_surface->buffers, (void *)wl_egl_buffer); @@ -2655,8 +2649,55 @@ _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, return wl_egl_buffer; } +static int +__idle_cb_check_buffers_commit(void *data) +{ + tpl_wl_egl_surface_t wl_egl_surface(data); + tpl_bool_t is_waiting_commit = TPL_FALSE; + tpl_bool_t ret = TPL_FALSE; + tpl_list_node_t *node; + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + if (wl_egl_surface->buffers_commit_done) { + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + return ret; + } + + tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); + node = __tpl_list_get_front_node(wl_egl_surface->buffers); + do { + if (!node) break; + + tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_node_get_data(node)); + tpl_gmutex_lock(&wl_egl_buffer->mutex); + is_waiting_commit = (wl_egl_buffer->status >= ENQUEUED && + wl_egl_buffer->status < COMMITTED); + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + if (is_waiting_commit) break; + } while ((node = __tpl_list_node_next(node))); + + tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); + + wl_egl_surface->buffers_commit_done = !is_waiting_commit; + + if (!is_waiting_commit) { + TPL_INFO("[ALL BUFFERS COMMITTED]", + "wl_egl_surface(%p) consumed all previous buffers", + wl_egl_surface); + ret = TPL_FALSE; + tpl_gcond_signal(&wl_egl_surface->surf_cond); + } else { + ret = TPL_TRUE; + } + + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + + return ret; +} + #define CAN_DEQUEUE_TIMEOUT_MS 10000 #define FORCE_FLUSH_TIMEOUT_MS 1000 +#define CHECK_COMMIT_TIMEOUT_MS 200 static tbm_surface_h __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, @@ -2677,39 +2718,25 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_OBJECT_UNLOCK(surface); tpl_gmutex_lock(&wl_egl_surface->surf_mutex); if (wl_egl_surface->reset == TPL_TRUE) { - if (_check_tbm_surface_validate(wl_egl_surface, wl_egl_surface->last_enq_buffer)) { - tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer; - tpl_wl_egl_buffer_t *enqueued_buffer = - _get_wl_egl_buffer(last_enq_buffer); - - if (enqueued_buffer) { - tbm_surface_internal_ref(last_enq_buffer); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - tpl_gmutex_lock(&enqueued_buffer->mutex); - while (enqueued_buffer->status >= ENQUEUED && - enqueued_buffer->status < COMMITTED) { - tpl_result_t wait_result; - TPL_INFO("[DEQ_AFTER_RESET]", - "wl_egl_surface(%p) waiting for previous wl_egl_buffer(%p) commit", - wl_egl_surface, enqueued_buffer); - - wait_result = tpl_gcond_timed_wait(&enqueued_buffer->cond, - &enqueued_buffer->mutex, - 200); /* 200ms */ - if (wait_result == TPL_ERROR_TIME_OUT) { - TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", - enqueued_buffer); - break; - } - } - tpl_gmutex_unlock(&enqueued_buffer->mutex); - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - tbm_surface_internal_unref(last_enq_buffer); - } + tpl_result_t result_of_waiting = TPL_ERROR_NONE; + wl_egl_surface->buffers_commit_done = TPL_FALSE; + tpl_gthread_add_idle(wl_egl_display->thread, + __idle_cb_check_buffers_commit, wl_egl_surface); + do { + result_of_waiting = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond, + &wl_egl_surface->surf_mutex, + CHECK_COMMIT_TIMEOUT_MS); + } while (result_of_waiting != TPL_ERROR_TIME_OUT && + !wl_egl_surface->buffers_commit_done); + + if (result_of_waiting == TPL_ERROR_TIME_OUT) { + TPL_WARN("wl_egl_surface(%p) timeout error occured", wl_egl_surface); + _print_buffer_lists(wl_egl_surface); } - wl_egl_surface->last_enq_buffer = NULL; + wl_egl_surface->buffers_commit_done = TPL_TRUE; } + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( @@ -3017,10 +3044,6 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, tpl_gmutex_unlock(&wl_egl_buffer->mutex); - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - wl_egl_surface->last_enq_buffer = tbm_surface; - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { @@ -3071,7 +3094,6 @@ __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message) if (!wl_egl_surface->vblank_enable || wl_egl_surface->vblank_done) { _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer); - tpl_gcond_signal(&wl_egl_buffer->cond); } else { tpl_gmutex_lock(&wl_egl_surface->vblank->mutex); __tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers, @@ -3172,7 +3194,6 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) if (ready_to_commit) { _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer); - tpl_gcond_signal(&wl_egl_buffer->cond); } tpl_gmutex_unlock(&wl_egl_buffer->mutex); @@ -3212,7 +3233,6 @@ __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, tpl_gmutex_lock(&wl_egl_buffer->mutex); _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer); - tpl_gcond_signal(&wl_egl_buffer->cond); tpl_gmutex_unlock(&wl_egl_buffer->mutex); /* If tdm error such as TIMEOUT occured, @@ -3680,8 +3700,6 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, wl_egl_buffer->need_to_commit = TPL_FALSE; wl_egl_buffer->status = COMMITTED; - if (wl_egl_surface->last_enq_buffer == wl_egl_buffer->tbm_surface) - wl_egl_surface->last_enq_buffer = NULL; TPL_LOG_T("WL_EGL", "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -3854,7 +3872,6 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) tpl_gmutex_unlock(&wl_egl_buffer->mutex); tpl_gmutex_clear(&wl_egl_buffer->mutex); - tpl_gcond_clear(&wl_egl_buffer->cond); free(wl_egl_buffer); } @@ -3887,36 +3904,3 @@ _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); } -static tpl_bool_t -_check_tbm_surface_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface) -{ - tpl_list_node_t *node = NULL; - tpl_bool_t ret = TPL_FALSE; - - /* silent return */ - if (!wl_egl_surface || !tbm_surface) - return ret; - - if (!tbm_surface_internal_is_valid(tbm_surface)) - return ret; - - tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); - node = __tpl_list_get_front_node(wl_egl_surface->buffers); - do { - if (!node) break; - tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_node_get_data(node)); - if (wl_egl_buffer->tbm_surface == tbm_surface) { - ret = TPL_TRUE; - break; - } - } while ((node = __tpl_list_node_next(node))); - - if (ret == TPL_FALSE) { - TPL_ERR("tbm_surface(%p) is not owned by wl_egl_surface(%p)", - tbm_surface, wl_egl_surface); - } - - tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); - - return ret; -} -- 2.34.1