wl_egl: Apply pause and wait_idle at buffer clear 13/284513/1
authorJoonbum Ko <joonbum.ko@samsung.com>
Thu, 17 Nov 2022 08:33:11 +0000 (17:33 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Fri, 18 Nov 2022 08:41:02 +0000 (17:41 +0900)
 AS-IS
  Using wl_event_mutex lock to pause thread.
  Using wl_event_mutex is not good way to pause thread
 because locking wl_event_mutex can effect other wayland threads.

 TO-BE
  Using tpl_gthread_pause_in_idle is better way than before.
  Because locking on idle allows thread to handle other tasks.

Change-Id: I47a1b82a91ede648ceb8a8a1e7967fa56950ba00
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wl_egl_thread.c

index dc0e55a..a00116a 100755 (executable)
@@ -2089,24 +2089,21 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
        buffer_status_t status                  = RELEASED;
        int idx                                 = 0;
 
+       tpl_gthread_pause_in_idle(wl_egl_display->thread);
+
        TPL_INFO("[BUFFER_CLEAR]", "BEGIN | wl_egl_surface(%p)", wl_egl_surface);
+
        while (wl_egl_surface->buffer_cnt) {
-               tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
-               tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
                wl_egl_buffer = wl_egl_surface->buffers[idx];
 
                if (wl_egl_buffer) {
                        wl_egl_surface->buffers[idx] = NULL;
                        wl_egl_surface->buffer_cnt--;
                } else {
-                       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
-                       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
                        idx++;
                        continue;
                }
 
-               tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
-
                tpl_gmutex_lock(&wl_egl_buffer->mutex);
 
                status = wl_egl_buffer->status;
@@ -2120,13 +2117,11 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
                        tpl_result_t wait_result = TPL_ERROR_NONE;
 
                        while (status < COMMITTED && wait_result != TPL_ERROR_TIME_OUT) {
-                               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
-                               /* The lock/unlock order of buffer->mutex and display->wl_event_mutex
-                                * is important. display->mutex must surround buffer->mutex */
+                               tpl_gthread_continue(wl_egl_display->thread);
                                wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond,
-                                                                                                  &wl_egl_display->wl_event_mutex,
+                                                                                                  &wl_egl_buffer->mutex,
                                                                                                   500); /* 500ms */
-                               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+                               tpl_gthread_pause_in_idle(wl_egl_display->thread);
                                status = wl_egl_buffer->status; /* update status */
 
                                if (wait_result == TPL_ERROR_TIME_OUT) {
@@ -2168,11 +2163,11 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
                if (need_to_release || need_to_cancel)
                        tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
 
-               tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
-
                idx++;
        }
        TPL_INFO("[BUFFER_CLEAR]", "END | wl_egl_surface(%p)", wl_egl_surface);
+
+       tpl_gthread_continue(wl_egl_display->thread);
 }
 
 static void
@@ -2197,6 +2192,8 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
                         wl_egl_surface,
                         wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
 
+       tpl_gthread_wait_idle(wl_egl_display->thread);
+
        _tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
 
        if (wl_egl_surface->surf_source) {
@@ -2344,14 +2341,18 @@ __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
 tpl_result_t
 _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
 {
+       tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
        tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
 
+       tpl_gthread_pause_in_idle(wl_egl_display->thread);
+
        _print_buffer_lists(wl_egl_surface);
 
        if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue))
                != TBM_SURFACE_QUEUE_ERROR_NONE) {
                TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
                                wl_egl_surface->tbm_queue, tsq_err);
+               tpl_gthread_continue(wl_egl_display->thread);
                return TPL_ERROR_INVALID_OPERATION;
        }
 
@@ -2360,13 +2361,9 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
                tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
                for (i = 0; i < BUFFER_ARRAY_SIZE; i++) {
                        buffer_status_t status;
-                       tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
                        wl_egl_buffer = wl_egl_surface->buffers[i];
-                       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
                        if (wl_egl_buffer) {
-                               tpl_gmutex_lock(&wl_egl_buffer->mutex);
                                status = wl_egl_buffer->status;
-                               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
                        } else {
                                continue;
                        }
@@ -2388,6 +2385,8 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
 
        _print_buffer_lists(wl_egl_surface);
 
+       tpl_gthread_continue(wl_egl_display->thread);
+
        return TPL_ERROR_NONE;
 }
 
@@ -2584,10 +2583,6 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
                                wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
        TPL_OBJECT_LOCK(surface);
 
-       /* After the can dequeue state, lock the wl_event_mutex to prevent other
-        * events from being processed in wayland_egl_thread
-        * during below dequeue procedure. */
-       tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
 
        if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
                TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
@@ -2595,7 +2590,6 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
                if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
                        TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
                                        wl_egl_surface->tbm_queue, surface);
-                       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
                        return NULL;
                } else {
                        tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
@@ -2605,10 +2599,14 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
                TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
                                wl_egl_surface->tbm_queue, surface);
-               tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
                return NULL;
        }
 
+       /* After the can dequeue state, lock the wl_event_mutex to prevent other
+        * events from being processed in wayland_egl_thread
+        * during below dequeue procedure. */
+       tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
+
        /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
         * below function [wayland_tbm_client_queue_check_activate()].
         * This function has to be called before tbm_surface_queue_dequeue()