wl_egl: queue force flush will be run in thread 03/304503/4
authorJoonbum Ko <joonbum.ko@samsung.com>
Tue, 16 Jan 2024 09:03:53 +0000 (18:03 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Thu, 18 Jan 2024 11:06:19 +0000 (20:06 +0900)
 AS-IS
  - In case where can_dequeue returns timeout error,
   the operation of forcibly flushing the tbm_queue and
   emptying buffer list of wl_egl_surface were performed in
   the parent thread (dequeue thread).
 PROBLEMS
  - Whenever a buffer lis is traversed, the thread should be paused,
   but the timeout error is often occured in pausing operation.
  - In order to flush tbm_queue, it is necessary to use complex
   mutex locking.
 TO-BE
  - The operation of emptying the queue and buffer list is performed
   in the wl-egl-thread.
  - The parent thread waits for the operation in the wl-egl-thread
   to be completed, and in the following cases, it is regarded as an
   error situation and causes the dequeue to return NULL.
   1. If the result of tpl_gcond_timed_wait is a timeout error
   2. If the result of tbm_surface_queue_can_dequeue is not 1 even
     after receiving the signal from the wl-egl-thread.

Change-Id: Ibe65330f508d2193a02fd5fe43ba1b651dc0499c
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wl_egl_thread.c

index d35467d169b1adb5d94ae88d3ed06a1d03b25f3b..cc431b6b1e1e1d42108a088384a061c8ffbb8c36 100755 (executable)
@@ -88,8 +88,9 @@ struct _tpl_wl_egl_display {
 
 typedef enum surf_message {
        NONE_MESSAGE = 0,
-       INIT_SURFACE,
-       ACQUIRABLE,
+       INIT_SURFACE = 1,
+       ACQUIRABLE = 2,
+       FORCE_FLUSH = 4,
 } surf_message;
 
 struct _tpl_wl_egl_surface {
@@ -283,6 +284,8 @@ _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface);
 static tpl_result_t
 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface);
 static void
+_thread_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface);
+static void
 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
                                                  tpl_wl_egl_buffer_t *wl_egl_buffer);
 static void
@@ -1581,7 +1584,7 @@ __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
        if (wl_egl_surface->sent_message == NONE_MESSAGE) {
                wl_egl_surface->sent_message = ACQUIRABLE;
                tpl_gsource_send_message(wl_egl_surface->surf_source,
-                                                        wl_egl_surface->sent_message);
+                                                                wl_egl_surface->sent_message);
        }
        tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
 }
@@ -1674,18 +1677,27 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
        tpl_wl_egl_surface_t wl_egl_surface(tpl_gsource_get_data(gsource));
 
        tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-       if (message == INIT_SURFACE) { /* Initialize surface */
+       if (message & INIT_SURFACE) { /* Initialize surface */
                TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) initialize message received!",
                                  wl_egl_surface);
                _thread_wl_egl_surface_init(wl_egl_surface);
                wl_egl_surface->initialized_in_thread = TPL_TRUE;
                tpl_gcond_signal(&wl_egl_surface->surf_cond);
-       } else if (message == ACQUIRABLE) { /* Acquirable */
+       }
+
+       if (message & ACQUIRABLE) { /* Acquirable */
                TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) acquirable message received!",
                                  wl_egl_surface);
                _thread_surface_queue_acquire(wl_egl_surface);
        }
 
+       if (message & FORCE_FLUSH) {
+               TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) force flush message received!",
+                                 wl_egl_surface);
+               _thread_tbm_queue_force_flush(wl_egl_surface);
+               tpl_gcond_signal(&wl_egl_surface->surf_cond);
+       }
+
        wl_egl_surface->sent_message = NONE_MESSAGE;
 
        tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
@@ -1834,9 +1846,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface)
 
        /* Initialize in thread */
        tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-       wl_egl_surface->sent_message = INIT_SURFACE;
        tpl_gsource_send_message(wl_egl_surface->surf_source,
-                                                        wl_egl_surface->sent_message);
+                                                        INIT_SURFACE);
        while (!wl_egl_surface->initialized_in_thread)
                tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
        tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
@@ -2447,11 +2458,15 @@ __tpl_wl_egl_surface_fence_sync_is_available(tpl_surface_t *surface)
        return !wl_egl_surface->frontbuffer_activated;
 }
 
-tpl_result_t
-_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
+void
+_thread_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
 {
        tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
 
+       TPL_INFO("[FORCE_FLUSH BEGIN]",
+                        "wl_egl_surface(%p) tbm_queue(%p)",
+                        wl_egl_surface, wl_egl_surface->tbm_queue);
+
        _print_buffer_lists(wl_egl_surface);
 
        if (wl_egl_surface->vblank) {
@@ -2467,7 +2482,6 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
                != TBM_SURFACE_QUEUE_ERROR_NONE) {
                TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
                                wl_egl_surface->tbm_queue, tsq_err);
-               return TPL_ERROR_INVALID_OPERATION;
        }
 
        tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
@@ -2489,13 +2503,9 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
        }
        tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
 
-       TPL_INFO("[FORCE_FLUSH]",
+       TPL_INFO("[FORCE_FLUSH END]",
                         "wl_egl_surface(%p) tbm_queue(%p)",
                         wl_egl_surface, wl_egl_surface->tbm_queue);
-
-       _print_buffer_lists(wl_egl_surface);
-
-       return TPL_ERROR_NONE;
 }
 
 static void
@@ -2598,6 +2608,7 @@ _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
 }
 
 #define CAN_DEQUEUE_TIMEOUT_MS 10000
+#define FORCE_FLUSH_TIMEOUT_MS 1000
 
 static tbm_surface_h
 __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
@@ -2656,41 +2667,51 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
 
        tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
                                wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
-       TPL_OBJECT_LOCK(surface);
-
 
        if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
+               tpl_result_t wait_result;
+               tpl_bool_t is_empty = TPL_FALSE;
                TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
                                 wl_egl_surface->tbm_queue, surface);
 
-               tpl_gthread_pause_in_idle(wl_egl_display->thread);
-               /* Locking wl_event_mutex is a secondary means of preparing for
-                * the failure of tpl_gthread_pause_in_idle().
-                * If tpl_gthread_pause_in_idle()is successful,
-                * locking wl_event_mutex does not affect. */
-               tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
-               if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
-                       TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
-                                       wl_egl_surface->tbm_queue, surface);
-                       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
-                       tpl_gthread_continue(wl_egl_display->thread);
+               tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+               tpl_gsource_send_message(wl_egl_surface->surf_source,
+                                                                FORCE_FLUSH);
+               do {
+                       wait_result = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
+                                                                                          &wl_egl_surface->surf_mutex,
+                                                                                          FORCE_FLUSH_TIMEOUT_MS);
+                       if (wait_result == TPL_ERROR_TIME_OUT) break;
+
+                       tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
+                       is_empty = __tpl_list_is_empty(wl_egl_surface->buffers);
+                       tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
+               } while (!is_empty);
+
+               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+               if (wait_result == TPL_ERROR_TIME_OUT) {
+                       TPL_ERR("Failed to queue force flush. wl_egl_surface(%p) tbm_queue(%p)",
+                                       wl_egl_surface, wl_egl_surface->tbm_queue);
+                       TPL_OBJECT_LOCK(surface);
                        return NULL;
-               } else {
-                       tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
                }
 
                wl_egl_surface->vblank_done = TPL_TRUE;
 
-               tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
-               tpl_gthread_continue(wl_egl_display->thread);
+               if (tbm_surface_queue_can_dequeue(wl_egl_surface->tbm_queue, 0))
+                       tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
        }
 
        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
                TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
                                wl_egl_surface->tbm_queue, surface);
+               TPL_OBJECT_LOCK(surface);
                return NULL;
        }
 
+       TPL_OBJECT_LOCK(surface);
+
        /* After the can dequeue state, lock the wl_event_mutex to prevent other
         * events from being processed in wayland_egl_thread
         * during below dequeue procedure. */