tpl_result_t
_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
{
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- tpl_gthread_pause_in_idle(wl_egl_display->thread);
-
_print_buffer_lists(wl_egl_surface);
if (wl_egl_surface->vblank) {
!= TBM_SURFACE_QUEUE_ERROR_NONE) {
TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
wl_egl_surface->tbm_queue, tsq_err);
- tpl_gthread_continue(wl_egl_display->thread);
return TPL_ERROR_INVALID_OPERATION;
}
_print_buffer_lists(wl_egl_surface);
- tpl_gthread_continue(wl_egl_display->thread);
-
return TPL_ERROR_NONE;
}
if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
wl_egl_surface->tbm_queue, surface);
+
+ tpl_gthread_pause_in_idle(wl_egl_display->thread);
+ /* Locking wl_event_mutex is a secondary means of preparing for
+ * the failure of tpl_gthread_pause_in_idle().
+ * If tpl_gthread_pause_in_idle()is successful,
+ * locking wl_event_mutex does not affect. */
+ tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
wl_egl_surface->tbm_queue, surface);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+ tpl_gthread_continue(wl_egl_display->thread);
return NULL;
} else {
tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
}
+
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+ tpl_gthread_continue(wl_egl_display->thread);
}
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {