typedef enum surf_message {
NONE_MESSAGE = 0,
- INIT_SURFACE,
- ACQUIRABLE,
+ INIT_SURFACE = 1,
+ ACQUIRABLE = 2,
+ FORCE_FLUSH = 4,
} surf_message;
struct _tpl_wl_egl_surface {
static tpl_result_t
_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface);
static void
+_thread_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface);
+static void
_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
tpl_wl_egl_buffer_t *wl_egl_buffer);
static void
if (wl_egl_surface->sent_message == NONE_MESSAGE) {
wl_egl_surface->sent_message = ACQUIRABLE;
tpl_gsource_send_message(wl_egl_surface->surf_source,
- wl_egl_surface->sent_message);
+ wl_egl_surface->sent_message);
}
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
tpl_wl_egl_surface_t wl_egl_surface(tpl_gsource_get_data(gsource));
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- if (message == INIT_SURFACE) { /* Initialize surface */
+ if (message & INIT_SURFACE) { /* Initialize surface */
TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) initialize message received!",
wl_egl_surface);
_thread_wl_egl_surface_init(wl_egl_surface);
wl_egl_surface->initialized_in_thread = TPL_TRUE;
tpl_gcond_signal(&wl_egl_surface->surf_cond);
- } else if (message == ACQUIRABLE) { /* Acquirable */
+ }
+
+ if (message & ACQUIRABLE) { /* Acquirable */
TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) acquirable message received!",
wl_egl_surface);
_thread_surface_queue_acquire(wl_egl_surface);
}
+ if (message & FORCE_FLUSH) {
+ TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) force flush message received!",
+ wl_egl_surface);
+ _thread_tbm_queue_force_flush(wl_egl_surface);
+ tpl_gcond_signal(&wl_egl_surface->surf_cond);
+ }
+
wl_egl_surface->sent_message = NONE_MESSAGE;
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
/* Initialize in thread */
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- wl_egl_surface->sent_message = INIT_SURFACE;
tpl_gsource_send_message(wl_egl_surface->surf_source,
- wl_egl_surface->sent_message);
+ INIT_SURFACE);
while (!wl_egl_surface->initialized_in_thread)
tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
return !wl_egl_surface->frontbuffer_activated;
}
-tpl_result_t
-_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
+void
+_thread_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
{
tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ TPL_INFO("[FORCE_FLUSH BEGIN]",
+ "wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
+
_print_buffer_lists(wl_egl_surface);
if (wl_egl_surface->vblank) {
!= TBM_SURFACE_QUEUE_ERROR_NONE) {
TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
wl_egl_surface->tbm_queue, tsq_err);
- return TPL_ERROR_INVALID_OPERATION;
}
tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
}
tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
- TPL_INFO("[FORCE_FLUSH]",
+ TPL_INFO("[FORCE_FLUSH END]",
"wl_egl_surface(%p) tbm_queue(%p)",
wl_egl_surface, wl_egl_surface->tbm_queue);
-
- _print_buffer_lists(wl_egl_surface);
-
- return TPL_ERROR_NONE;
}
static void
}
#define CAN_DEQUEUE_TIMEOUT_MS 10000
+#define FORCE_FLUSH_TIMEOUT_MS 1000
static tbm_surface_h
__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
- TPL_OBJECT_LOCK(surface);
-
if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
+ tpl_result_t wait_result;
+ tpl_bool_t is_empty = TPL_FALSE;
TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
wl_egl_surface->tbm_queue, surface);
- tpl_gthread_pause_in_idle(wl_egl_display->thread);
- /* Locking wl_event_mutex is a secondary means of preparing for
- * the failure of tpl_gthread_pause_in_idle().
- * If tpl_gthread_pause_in_idle()is successful,
- * locking wl_event_mutex does not affect. */
- tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
- if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
- TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
- wl_egl_surface->tbm_queue, surface);
- tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
- tpl_gthread_continue(wl_egl_display->thread);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ tpl_gsource_send_message(wl_egl_surface->surf_source,
+ FORCE_FLUSH);
+ do {
+ wait_result = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
+ &wl_egl_surface->surf_mutex,
+ FORCE_FLUSH_TIMEOUT_MS);
+ if (wait_result == TPL_ERROR_TIME_OUT) break;
+
+ tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
+ is_empty = __tpl_list_is_empty(wl_egl_surface->buffers);
+ tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
+ } while (!is_empty);
+
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+ if (wait_result == TPL_ERROR_TIME_OUT) {
+ TPL_ERR("Failed to queue force flush. wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
+ TPL_OBJECT_LOCK(surface);
return NULL;
- } else {
- tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
}
wl_egl_surface->vblank_done = TPL_TRUE;
- tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
- tpl_gthread_continue(wl_egl_display->thread);
+ if (tbm_surface_queue_can_dequeue(wl_egl_surface->tbm_queue, 0))
+ tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
}
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
wl_egl_surface->tbm_queue, surface);
+ TPL_OBJECT_LOCK(surface);
return NULL;
}
+ TPL_OBJECT_LOCK(surface);
+
/* After the can dequeue state, lock the wl_event_mutex to prevent other
* events from being processed in wayland_egl_thread
* during below dequeue procedure. */