wl_egl_thread: prepare for the failure of pause_in_idle 44/286744/1
authorJoonbum Ko <joonbum.ko@samsung.com>
Wed, 4 Jan 2023 07:49:41 +0000 (16:49 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Thu, 12 Jan 2023 07:13:44 +0000 (16:13 +0900)
 - The calling tpl_gthread_pause_in_idle() move from force_flush()
  to outside.
 - Add locking wl_event_mutex after trying tpl_gthread_pause_in_idle.
 - Locking wl_event_mutex is a secondary means of preparing for
  the failure of tpl_gthread_pause_in_idle().
   If tpl_gthread_pause_in_idle()is successful,
  locking wl_event_mutex does not affect.

Change-Id: I35132da013f67921c0f6deecc0909118461f3872
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wl_egl_thread.c

index e780787..10ff5f5 100755 (executable)
@@ -2330,11 +2330,8 @@ __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
 tpl_result_t
 _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
 {
-       tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
        tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
 
-       tpl_gthread_pause_in_idle(wl_egl_display->thread);
-
        _print_buffer_lists(wl_egl_surface);
 
        if (wl_egl_surface->vblank) {
@@ -2350,7 +2347,6 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
                != TBM_SURFACE_QUEUE_ERROR_NONE) {
                TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
                                wl_egl_surface->tbm_queue, tsq_err);
-               tpl_gthread_continue(wl_egl_display->thread);
                return TPL_ERROR_INVALID_OPERATION;
        }
 
@@ -2378,8 +2374,6 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
 
        _print_buffer_lists(wl_egl_surface);
 
-       tpl_gthread_continue(wl_egl_display->thread);
-
        return TPL_ERROR_NONE;
 }
 
@@ -2550,13 +2544,25 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
        if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
                TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
                                 wl_egl_surface->tbm_queue, surface);
+
+               tpl_gthread_pause_in_idle(wl_egl_display->thread);
+               /* Locking wl_event_mutex is a secondary means of preparing for
+                * the failure of tpl_gthread_pause_in_idle().
+                * If tpl_gthread_pause_in_idle()is successful,
+                * locking wl_event_mutex does not affect. */
+               tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
                if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
                        TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
                                        wl_egl_surface->tbm_queue, surface);
+                       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+                       tpl_gthread_continue(wl_egl_display->thread);
                        return NULL;
                } else {
                        tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
                }
+
+               tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+               tpl_gthread_continue(wl_egl_display->thread);
        }
 
        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {