void (*finalize) (tpl_gsource *source);
};
+/**
+ * wrapping tpl_gcond_wait_until()
+ *
+ * Calculate the end_time to be used in tpl_gcond_wait_until() using timeout_ms
+ *
+ * It can use the if-statement to specify the code block to execute according to the result.
+ * - TRUE : gcond is signalled before the end_time OR it is a spurious wakeup.
+ * - FALSE : end_time has passed.
+ *
+ * gcond : Pointer to tpl_gcond.
+ * gmutex : Pointer to tpl_gmutex.
+ * timeout_ms : int64_t time(ms) to wait.
+ */
+#define tpl_gcond_timed_wait(gcond, gmutex, timeout_ms) \
+ for (gint64 end_time = g_get_monotonic_time() + \
+ ((timeout_ms) * G_TIME_SPAN_MILLISECOND), result = 1, prev = 1; \
+ prev; prev = result, \
+ result = result ? !tpl_gcond_wait_until(gcond, gmutex, end_time) : 0) \
+ if (result)
+
/**
* Create a new tpl_gthread
*
*
* @param gcond Pointer to tpl_gcond.
* @param gmutex Pointer to tpl_gmutex.
- * @param timeout_ms int64_t time(ms) to wait.
+ * @param end_time The monotonic time to wait until.
*
* @return tpl_result_t TPL_ERROR_NONE or TPL_ERROR_TIME_OUT
*/
tpl_result_t
-tpl_gcond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, int64_t timeout_ms);
+tpl_gcond_wait_until(tpl_gcond *gcond, tpl_gmutex *gmutex, gint64 end_time);
/**
* wrapping g_cond_signal()
wl_egl_display = wl_egl_surface->wl_egl_display;
TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
- tpl_result_t result_of_waiting = TPL_ERROR_NONE;
-
TPL_INFO("[SURFACE_FINI][BEGIN]",
"wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
wl_egl_surface,
wl_egl_surface->buffers_finalize_done = TPL_FALSE;
tpl_gthread_add_idle(wl_egl_display->thread,
__idle_cb_buffers_finalize, wl_egl_surface);
- while (!wl_egl_surface->buffers_finalize_done &&
- result_of_waiting != TPL_ERROR_TIME_OUT) {
- result_of_waiting = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
- &wl_egl_surface->surf_mutex,
- BUFFER_CLEAR_WAITING_TIMEOUT_MS);
- if (result_of_waiting == TPL_ERROR_TIME_OUT)
- TPL_WARN("buffer clear timeout. wl_egl_surface(%p)", wl_egl_surface);
+ tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
+ &wl_egl_surface->surf_mutex,
+ BUFFER_CLEAR_WAITING_TIMEOUT_MS)
+ {
+ if (wl_egl_surface->buffers_finalize_done)
+ break;
+ } else {
+ TPL_WARN("buffer clear timeout. wl_egl_surface(%p)", wl_egl_surface);
wl_egl_surface->buffers_finalize_done = TPL_TRUE;
- wl_egl_surface->need_force_release = (result_of_waiting != TPL_ERROR_NONE);
+ wl_egl_surface->need_force_release = TPL_TRUE;
}
if (wl_egl_surface->surf_source) {
TPL_OBJECT_UNLOCK(surface);
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
if (wl_egl_surface->reset == TPL_TRUE) {
- tpl_result_t result_of_waiting = TPL_ERROR_NONE;
wl_egl_surface->buffers_commit_done = TPL_FALSE;
tpl_gthread_add_idle(wl_egl_display->thread,
__idle_cb_check_buffers_commit, wl_egl_surface);
- do {
- result_of_waiting = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
- &wl_egl_surface->surf_mutex,
- CHECK_COMMIT_TIMEOUT_MS);
- } while (result_of_waiting != TPL_ERROR_TIME_OUT &&
- !wl_egl_surface->buffers_commit_done);
-
- if (result_of_waiting == TPL_ERROR_TIME_OUT) {
+ tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
+ &wl_egl_surface->surf_mutex,
+ CHECK_COMMIT_TIMEOUT_MS)
+ {
+ if (wl_egl_surface->buffers_commit_done)
+ break;
+ } else {
TPL_WARN("wl_egl_surface(%p) timeout error occured", wl_egl_surface);
_print_buffer_lists(wl_egl_surface);
}
wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
- tpl_result_t wait_result;
- tpl_bool_t is_empty = TPL_FALSE;
TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
wl_egl_surface->tbm_queue, surface);
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
tpl_gsource_send_message(wl_egl_surface->surf_source,
FORCE_FLUSH);
- do {
- wait_result = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
- &wl_egl_surface->surf_mutex,
- FORCE_FLUSH_TIMEOUT_MS);
- if (wait_result == TPL_ERROR_TIME_OUT) break;
-
+ tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
+ &wl_egl_surface->surf_mutex,
+ FORCE_FLUSH_TIMEOUT_MS)
+ {
tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
- is_empty = __tpl_list_is_empty(wl_egl_surface->buffers);
+ if (__tpl_list_is_empty(wl_egl_surface->buffers)) {
+ tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
+ break;
+ }
tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
- } while (!is_empty);
-
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ } else {
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- if (wait_result == TPL_ERROR_TIME_OUT) {
TPL_ERR("Failed to queue force flush. wl_egl_surface(%p) tbm_queue(%p)",
wl_egl_surface, wl_egl_surface->tbm_queue);
TPL_OBJECT_LOCK(surface);
return NULL;
}
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
wl_egl_surface->vblank_done = TPL_TRUE;
if (tbm_surface_queue_can_dequeue(wl_egl_surface->tbm_queue, 0))