From: TaeHyeon Jeong Date: Thu, 10 Oct 2024 05:39:35 +0000 (+0900) Subject: tpl: Modify tpl_gcond_timed_wait() X-Git-Tag: accepted/tizen/unified/20241017.114708~4 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=refs%2Fchanges%2F81%2F318881%2F1;p=platform%2Fcore%2Fuifw%2Flibtpl-egl.git tpl: Modify tpl_gcond_timed_wait() AS-IS - Calculate the end_time for g_cond_wait_until() in tpl_gcond_timed_wait(). PROBLEM - When a spurious wakeup occurs, the end_time is recalculated. so, it may wait longer than expected waiting time. TO-BE - Calculating the end_time is performed only once. Change-Id: Ia7c85e94549469ce3feae4b47b75945e23b7c88d --- diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 466baef..1befeda 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -486,11 +486,8 @@ tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex) } tpl_result_t -tpl_gcond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, - int64_t timeout_ms) +tpl_gcond_wait_until(tpl_gcond *gcond, tpl_gmutex *gmutex, gint64 end_time) { - gint64 end_time = g_get_monotonic_time() + - (timeout_ms * G_TIME_SPAN_MILLISECOND); if (!g_cond_wait_until(gcond, gmutex, end_time)) return TPL_ERROR_TIME_OUT; diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 40954c0..304fa74 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -42,6 +42,26 @@ struct _tpl_gsource_functions { void (*finalize) (tpl_gsource *source); }; +/** + * wrapping tpl_gcond_wait_until() + * + * Calculate the end_time to be used in tpl_gcond_wait_until() using timeout_ms + * + * It can use the if-statement to specify the code block to execute according to the result. + * - TRUE : gcond is signalled before the end_time OR it is a spurious wakeup. + * - FALSE : end_time has passed. + * + * gcond : Pointer to tpl_gcond. + * gmutex : Pointer to tpl_gmutex. + * timeout_ms : int64_t time(ms) to wait. + */ +#define tpl_gcond_timed_wait(gcond, gmutex, timeout_ms) \ + for (gint64 end_time = g_get_monotonic_time() + \ + ((timeout_ms) * G_TIME_SPAN_MILLISECOND), result = 1, prev = 1; \ + prev; prev = result, \ + result = result ? !tpl_gcond_wait_until(gcond, gmutex, end_time) : 0) \ + if (result) + /** * Create a new tpl_gthread * @@ -229,12 +249,12 @@ tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex); * * @param gcond Pointer to tpl_gcond. * @param gmutex Pointer to tpl_gmutex. - * @param timeout_ms int64_t time(ms) to wait. + * @param end_time The monotonic time to wait until. * * @return tpl_result_t TPL_ERROR_NONE or TPL_ERROR_TIME_OUT */ tpl_result_t -tpl_gcond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, int64_t timeout_ms); +tpl_gcond_wait_until(tpl_gcond *gcond, tpl_gmutex *gmutex, gint64 end_time); /** * wrapping g_cond_signal() diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 276e266..763959f 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2457,8 +2457,6 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) wl_egl_display = wl_egl_surface->wl_egl_display; TPL_CHECK_ON_NULL_RETURN(wl_egl_display); - tpl_result_t result_of_waiting = TPL_ERROR_NONE; - TPL_INFO("[SURFACE_FINI][BEGIN]", "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", wl_egl_surface, @@ -2472,16 +2470,17 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) wl_egl_surface->buffers_finalize_done = TPL_FALSE; tpl_gthread_add_idle(wl_egl_display->thread, __idle_cb_buffers_finalize, wl_egl_surface); - while (!wl_egl_surface->buffers_finalize_done && - result_of_waiting != TPL_ERROR_TIME_OUT) { - result_of_waiting = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond, - &wl_egl_surface->surf_mutex, - BUFFER_CLEAR_WAITING_TIMEOUT_MS); - if (result_of_waiting == TPL_ERROR_TIME_OUT) - TPL_WARN("buffer clear timeout. wl_egl_surface(%p)", wl_egl_surface); + tpl_gcond_timed_wait(&wl_egl_surface->surf_cond, + &wl_egl_surface->surf_mutex, + BUFFER_CLEAR_WAITING_TIMEOUT_MS) + { + if (wl_egl_surface->buffers_finalize_done) + break; + } else { + TPL_WARN("buffer clear timeout. wl_egl_surface(%p)", wl_egl_surface); wl_egl_surface->buffers_finalize_done = TPL_TRUE; - wl_egl_surface->need_force_release = (result_of_waiting != TPL_ERROR_NONE); + wl_egl_surface->need_force_release = TPL_TRUE; } if (wl_egl_surface->surf_source) { @@ -2864,18 +2863,16 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_OBJECT_UNLOCK(surface); tpl_gmutex_lock(&wl_egl_surface->surf_mutex); if (wl_egl_surface->reset == TPL_TRUE) { - tpl_result_t result_of_waiting = TPL_ERROR_NONE; wl_egl_surface->buffers_commit_done = TPL_FALSE; tpl_gthread_add_idle(wl_egl_display->thread, __idle_cb_check_buffers_commit, wl_egl_surface); - do { - result_of_waiting = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond, - &wl_egl_surface->surf_mutex, - CHECK_COMMIT_TIMEOUT_MS); - } while (result_of_waiting != TPL_ERROR_TIME_OUT && - !wl_egl_surface->buffers_commit_done); - - if (result_of_waiting == TPL_ERROR_TIME_OUT) { + tpl_gcond_timed_wait(&wl_egl_surface->surf_cond, + &wl_egl_surface->surf_mutex, + CHECK_COMMIT_TIMEOUT_MS) + { + if (wl_egl_surface->buffers_commit_done) + break; + } else { TPL_WARN("wl_egl_surface(%p) timeout error occured", wl_egl_surface); _print_buffer_lists(wl_egl_surface); } @@ -2889,34 +2886,33 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS); if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { - tpl_result_t wait_result; - tpl_bool_t is_empty = TPL_FALSE; TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", wl_egl_surface->tbm_queue, surface); tpl_gmutex_lock(&wl_egl_surface->surf_mutex); tpl_gsource_send_message(wl_egl_surface->surf_source, FORCE_FLUSH); - do { - wait_result = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond, - &wl_egl_surface->surf_mutex, - FORCE_FLUSH_TIMEOUT_MS); - if (wait_result == TPL_ERROR_TIME_OUT) break; - + tpl_gcond_timed_wait(&wl_egl_surface->surf_cond, + &wl_egl_surface->surf_mutex, + FORCE_FLUSH_TIMEOUT_MS) + { tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); - is_empty = __tpl_list_is_empty(wl_egl_surface->buffers); + if (__tpl_list_is_empty(wl_egl_surface->buffers)) { + tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); + break; + } tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); - } while (!is_empty); - - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } else { + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - if (wait_result == TPL_ERROR_TIME_OUT) { TPL_ERR("Failed to queue force flush. wl_egl_surface(%p) tbm_queue(%p)", wl_egl_surface, wl_egl_surface->tbm_queue); TPL_OBJECT_LOCK(surface); return NULL; } + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + wl_egl_surface->vblank_done = TPL_TRUE; if (tbm_surface_queue_can_dequeue(wl_egl_surface->tbm_queue, 0)) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 9c96a3e..500b574 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -1059,9 +1059,15 @@ _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface) if (need_to_wait) { tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); - wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond, - &wl_vk_buffer->mutex, - 16); /* 16ms */ + tpl_gcond_timed_wait(&wl_vk_buffer->cond, + &wl_vk_buffer->mutex, + 16) /* 16ms */ + { + wait_result = TPL_ERROR_NONE; + break; + } else { + wait_result = TPL_ERROR_TIME_OUT; + } tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); status = wl_vk_buffer->status;