Revert "wl_egl: Apply status_sync to buffers finalize logic" 13/320213/1
authorJoonbum Ko <joonbum.ko@samsung.com>
Mon, 11 Nov 2024 04:58:39 +0000 (13:58 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Mon, 11 Nov 2024 05:01:28 +0000 (14:01 +0900)
This reverts commit 152d154f716f2a6176224d423b39c659b03d44a8.

Change-Id: I8bb1dda851b2c4f398465583d077c81fa6d44ed1

src/tpl_wl_egl_thread.c

index cf064a9db8c84f1c381775d5fa58fb82f35a1ba7..3eb76f47b35ae5521a5922c73b4190d3e93e3f2b 100755 (executable)
@@ -166,6 +166,7 @@ struct _tpl_wl_egl_surface {
        tpl_bool_t                    serial_updated;
        tpl_bool_t                    initialized_in_thread;
        tpl_bool_t                    frontbuffer_activated;
+       tpl_bool_t                    buffers_finalize_done;
        tpl_bool_t                    need_force_release;
        tpl_bool_t                    buffers_commit_done;
 
@@ -343,9 +344,9 @@ static void
 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
                                                  tpl_wl_egl_buffer_t *wl_egl_buffer);
 static void
-_thread_buffers_finalize(tpl_wl_egl_surface_t *wl_egl_surface);
-static void
 __cb_surface_vblank_free(void *data);
+static void
+_buffers_force_release(tpl_wl_egl_surface_t *wl_egl_surface);
 
 #define RELEASE_FROM_LAST_COMMIT 20
 #define DEQUEUE_FROM_LAST_RELEASE 20
@@ -428,12 +429,7 @@ _update_buffer_status(tpl_wl_egl_buffer_t *wl_egl_buffer, buffer_status status)
        wl_egl_buffer->status = status;
        wl_egl_buffer->changed_time = current_time;
 
-       if (wl_egl_surface->is_finalized) {
-               tpl_gmutex_unlock(&wl_egl_surface->status_sync.mutex);
-               tpl_gsource_send_message(wl_egl_surface->surf_source, BUFFERS_FINALIZE);
-       } else {
-               tpl_gmutex_unlock(&wl_egl_surface->status_sync.mutex);
-       }
+       tpl_gmutex_unlock(&wl_egl_surface->status_sync.mutex);
 }
 
 static void
@@ -1828,6 +1824,9 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
 #endif
 
        if (wl_egl_surface->tbm_queue) {
+               if (wl_egl_surface->need_force_release)
+                       _buffers_force_release(wl_egl_surface);
+
                TPL_INFO("[TBM_QUEUE_DESTROY]",
                                 "wl_egl_surface(%p) tbm_queue(%p)",
                                 wl_egl_surface, wl_egl_surface->tbm_queue);
@@ -1885,22 +1884,6 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
                tpl_gcond_signal(&wl_egl_surface->surf_cond);
        }
 
-       if (message & BUFFERS_FINALIZE) {
-               TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) buffers finalize message received!",
-                                 wl_egl_surface);
-               tpl_gmutex_lock(&wl_egl_surface->status_sync.mutex);
-               wl_egl_surface->is_finalized = TPL_FALSE;
-               tpl_gmutex_unlock(&wl_egl_surface->status_sync.mutex);
-
-               _thread_buffers_finalize(wl_egl_surface);
-
-               tpl_gmutex_lock(&wl_egl_surface->status_sync.mutex);
-               wl_egl_surface->is_finalized = TPL_TRUE;
-               tpl_gcond_signal(&wl_egl_surface->status_sync.cond);
-
-               tpl_gmutex_unlock(&wl_egl_surface->status_sync.mutex);
-       }
-
        wl_egl_surface->sent_message = NONE_MESSAGE;
 
        tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
@@ -2320,14 +2303,85 @@ _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
 }
 
 static void
-_thread_buffers_finalize(tpl_wl_egl_surface_t *wl_egl_surface)
+_buffers_force_release(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+       tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
+       tpl_bool_t need_to_release              = TPL_FALSE;
+       tpl_bool_t need_to_cancel               = TPL_FALSE;
+       tpl_list_node_t *node                   = NULL;
+
+       TPL_INFO("[BUFFER_FORCE_RELEASE_BEGIN]", "wl_egl_surface(%p)", wl_egl_surface);
+
+       tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
+       node = __tpl_list_get_front_node(wl_egl_surface->buffers);
+       do {
+               if (!node) break;
+
+               tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_node_get_data(node));
+
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+               buffer_status status = wl_egl_buffer->status;
+
+               if (status == RELEASED) {
+                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+                       continue;
+               }
+
+               need_to_release = (status >= ACQUIRED && status <= COMMITTED);
+               need_to_cancel = (status == DEQUEUED);
+
+               if (need_to_release) {
+                       tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+                                                                                               wl_egl_buffer->tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+                                               wl_egl_buffer->tbm_surface, tsq_err);
+               }
+
+               if (need_to_cancel) {
+                       tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
+                                                                                                          wl_egl_buffer->tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("Failed to cancel dequeue tbm_surface(%p) tsq_err(%d)",
+                                               wl_egl_buffer->tbm_surface, tsq_err);
+               }
+
+               _update_buffer_status(wl_egl_buffer, RELEASED);
+
+               TPL_INFO("[FORCE_RELEASE]", "wl_egl_buffer(%p) status(%s -> %s)",
+                                wl_egl_buffer,
+                                buffer_status_info[status].status_str,
+                                buffer_status_info[RELEASED].status_str);
+
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+               if (need_to_release || need_to_cancel)
+                       tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
+
+       } while ((node = __tpl_list_node_next(node)));
+       tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
+
+       wl_egl_surface->need_force_release = TPL_FALSE;
+       TPL_INFO("[BUFFER_FORCE_RELEASE_END]", "wl_egl_surface(%p)", wl_egl_surface);
+}
+
+static int
+__idle_cb_buffers_finalize(void *data)
 {
+       tpl_wl_egl_surface_t wl_egl_surface(data);
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_FALSE);
+
        int pending_cnt = 0;
-       tbm_surface_queue_error_e tsq_err;
-       tpl_bool_t need_to_release;
-       tpl_bool_t need_to_cancel;
-       tpl_list_node_t *node;
-       tpl_list_node_t *next;
+       tpl_list_node_t *node = NULL;
+       tpl_list_node_t *next = NULL;
+       tpl_bool_t ret = TPL_TRUE;
+
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+
+       if (wl_egl_surface->buffers_finalize_done) {
+               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+               return TPL_FALSE;
+       }
 
        tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
        node = __tpl_list_get_front_node(wl_egl_surface->buffers);
@@ -2342,12 +2396,29 @@ _thread_buffers_finalize(tpl_wl_egl_surface_t *wl_egl_surface)
                tpl_gmutex_lock(&wl_egl_buffer->mutex);
                buffer_status status = wl_egl_buffer->status;
 
-               need_to_release = ((wl_egl_surface->need_force_release &&
-                                                   status >= ACQUIRED && status <= VBLANK_DONE) ||
-                                                  (status == COMMITTED));
-               need_to_cancel = (status == DEQUEUED);
+               if (status == CREATED ||
+                       status == RELEASED ||
+                       status == CANCELED) {
+                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+                       continue;
+               }
 
-               if (need_to_release) {
+               if (status > DEQUEUED && status < COMMITTED) {
+                       if (!wl_egl_buffer->release_pending) {
+                               TPL_INFO("[RELEASE_PENDING]", "wl_egl_surface(%p) wl_egl_buffer(%p) status(%s)",
+                                                wl_egl_surface, wl_egl_buffer, buffer_status_info[status].status_str);
+                               TPL_INFO("[RELEASE_PENDING]", "tbm_surface(%p) bo(%d)",
+                                                wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name);
+                               wl_egl_buffer->release_pending = TPL_TRUE;
+                       }
+
+                       pending_cnt++;
+                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+                       continue;
+               }
+
+               if (status == COMMITTED) {
+                       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
                        tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
 
                        tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
@@ -2356,62 +2427,47 @@ _thread_buffers_finalize(tpl_wl_egl_surface_t *wl_egl_surface)
                                TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
                                                wl_egl_buffer->tbm_surface, tsq_err);
 
-                       if (status == COMMITTED && wl_egl_display->wl_tbm_client && wl_egl_buffer->wl_buffer) {
+                       if (wl_egl_display->wl_tbm_client && wl_egl_buffer->wl_buffer) {
                                wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
                                                                                                  (void *)wl_egl_buffer->wl_buffer);
                                wl_egl_buffer->wl_buffer = NULL;
                                wl_display_flush(wl_egl_display->wl_display);
                        }
-               } else if (need_to_cancel) {
+
+               } else if (status == DEQUEUED) {
+                       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
                        tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
                                                                                                           wl_egl_buffer->tbm_surface);
                        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
                                TPL_ERR("Failed to cancel dequeue. tbm_surface(%p) tsq_err(%d)",
                                                wl_egl_buffer->tbm_surface, tsq_err);
-               } else if (status > DEQUEUED && status < COMMITTED) {
-                       if (!wl_egl_buffer->release_pending) {
-                               TPL_INFO("[RELEASE_PENDING]", "wl_egl_surface(%p) wl_egl_buffer(%p) status(%s)",
-                                                wl_egl_surface, wl_egl_buffer, buffer_status_info[status].status_str);
-                               TPL_INFO("[RELEASE_PENDING]", "tbm_surface(%p) bo(%d)",
-                                                wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name);
-                               wl_egl_buffer->release_pending = TPL_TRUE;
-                       }
-                       pending_cnt++;
                }
 
-               if (need_to_release || need_to_cancel) {
-                       if (wl_egl_surface->need_force_release) {
-                               TPL_INFO("[FORCE_RELEASE]", "wl_egl_buffer(%p) status(%s -> %s)",
-                                                wl_egl_buffer,
-                                                buffer_status_info[status].status_str,
-                                                buffer_status_info[RELEASED].status_str);
-                       } else {
-                               TPL_INFO("[RELEASE]", "wl_egl_buffer(%p) status(%s -> %s)",
-                                                wl_egl_buffer,
-                                                buffer_status_info[status].status_str,
-                                                buffer_status_info[RELEASED].status_str);
-                       }
+               TPL_INFO("[RELEASE]", "wl_egl_buffer(%p) status(%s -> %s)",
+                                wl_egl_buffer,
+                                buffer_status_info[status].status_str,
+                                buffer_status_info[RELEASED].status_str);
 
-                       _update_buffer_status(wl_egl_buffer, RELEASED);
+               _update_buffer_status(wl_egl_buffer, RELEASED);
 
-                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
 
-                       tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
-               } else {
-                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
-               }
+               tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
 
        } while ((node = next));
 
        tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
 
-       if (pending_cnt == 0)
+       if (pending_cnt == 0) {
+               wl_egl_surface->buffers_finalize_done = TPL_TRUE;
+               tpl_gcond_signal(&wl_egl_surface->surf_cond);
                TPL_INFO("[BUFFERS_FINALIZE DONE]", "wl_egl_surface(%p)", wl_egl_surface);
-       else
-               TPL_INFO("[BUFFERS_FINALIZE INCOMPLETE]", "wl_egl_surface(%p), pending_cnt: (%d)",
-                                wl_egl_surface, pending_cnt);
+               ret = TPL_FALSE;
+       }
 
-       return;
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+       return ret;
 }
 
 #define BUFFER_CLEAR_WAITING_TIMEOUT_MS 1000
@@ -2420,6 +2476,7 @@ static void
 __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
 {
        tpl_wl_egl_display_t *wl_egl_display = NULL;
+       tpl_gid_t id;
 
        TPL_ASSERT(surface);
        TPL_ASSERT(surface->display);
@@ -2437,37 +2494,33 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
                         wl_egl_surface,
                         wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
 
-       _print_buffer_lists(wl_egl_surface);
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
 
-       tpl_gmutex_lock(&wl_egl_surface->status_sync.mutex);
-       wl_egl_surface->is_finalized = TPL_TRUE;
-       tpl_gsource_send_message(wl_egl_surface->surf_source, BUFFERS_FINALIZE);
-       tpl_gcond_wait(&wl_egl_surface->status_sync.cond, &wl_egl_surface->status_sync.mutex);
+       _print_buffer_lists(wl_egl_surface);
 
-       if (!wl_egl_surface->status_sync.condition) {
-               tpl_gcond_timed_wait(&wl_egl_surface->status_sync.cond,
-                                                        &wl_egl_surface->status_sync.mutex,
+       wl_egl_surface->need_force_release = TPL_FALSE;
+       wl_egl_surface->buffers_finalize_done = TPL_FALSE;
+       id = tpl_gthread_add_idle(wl_egl_display->thread,
+                                                         __idle_cb_buffers_finalize, wl_egl_surface);
+       if (!id) {
+               TPL_WARN("Failed to attach idle gsource. wl_egl_surface(%p)",
+                                wl_egl_surface);
+               wl_egl_surface->need_force_release = TPL_TRUE;
+       } else {
+               tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
+                                                        &wl_egl_surface->surf_mutex,
                                                         BUFFER_CLEAR_WAITING_TIMEOUT_MS)
                {
-                       if (!wl_egl_surface->status_sync.condition)
+                       if (wl_egl_surface->buffers_finalize_done)
                                break;
-
                } else {
+                       tpl_gsource_remove(wl_egl_display->thread, id);
                        TPL_WARN("buffer clear timeout. wl_egl_surface(%p)", wl_egl_surface);
                        wl_egl_surface->need_force_release = TPL_TRUE;
-                       tpl_gsource_send_message(wl_egl_surface->surf_source, BUFFERS_FINALIZE);
-                       tpl_gcond_wait(&wl_egl_surface->status_sync.cond, &wl_egl_surface->status_sync.mutex);
-
-                       if(!wl_egl_surface->status_sync.condition) {
-                               TPL_ERR("Failed to release buffers. wl_egl_surface(%p)",
-                                               wl_egl_surface);
-                       }
                }
        }
-       wl_egl_surface->is_finalized = TPL_FALSE;
-       tpl_gmutex_unlock(&wl_egl_surface->status_sync.mutex);
+       wl_egl_surface->buffers_finalize_done = TPL_TRUE;
 
-       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
        if (wl_egl_surface->surf_source) {
                // Send destroy mesage to thread
                tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);