Clear all buffers before destroying surf_source. 95/254995/1
authorJoonbum Ko <joonbum.ko@samsung.com>
Thu, 11 Mar 2021 02:11:28 +0000 (11:11 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Thu, 11 Mar 2021 02:25:47 +0000 (11:25 +0900)
Change-Id: Ifc52b83af3eb193915090369f3e9985048a702fb
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wl_egl_thread.c

index 2cbb72d..da85a82 100755 (executable)
@@ -1433,76 +1433,6 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
 
        tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
 
-
-       {
-               tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
-               tpl_wl_egl_buffer_t *wl_egl_buffer  = NULL;
-               int idx                             = 0;
-               tpl_bool_t need_to_release          = TPL_FALSE;
-               tpl_bool_t need_to_cancel           = TPL_FALSE;
-
-               while (wl_egl_surface->buffer_cnt) {
-                       tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
-                       wl_egl_buffer  = wl_egl_surface->buffers[idx];
-                       if (wl_egl_buffer) {
-                               TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
-                                                 idx, wl_egl_buffer,
-                                                 wl_egl_buffer->tbm_surface,
-                                                 status_to_string[wl_egl_buffer->status]);
-
-                               wl_egl_surface->buffers[idx] = NULL;
-                               wl_egl_surface->buffer_cnt--;
-                       } else {
-                               tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
-                               idx++;
-                               continue;
-                       }
-                       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
-
-                       tpl_gmutex_lock(&wl_egl_buffer->mutex);
-
-                       need_to_release = (wl_egl_buffer->status == ACQUIRED ||
-                                                          wl_egl_buffer->status == WAITING_SIGNALED ||
-                                                          wl_egl_buffer->status == WAITING_VBLANK ||
-                                                          wl_egl_buffer->status == COMMITTED);
-
-                       need_to_cancel = wl_egl_buffer->status == DEQUEUED;
-
-                       if (wl_egl_buffer->status == WAITING_SIGNALED) {
-                               tpl_result_t wait_result = TPL_ERROR_NONE;
-                               wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
-                                                                                                 &wl_egl_buffer->mutex,
-                                                                                                 16);
-                               if (wait_result == TPL_ERROR_TIME_OUT)
-                                       TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
-                                                        wl_egl_buffer);
-                       }
-
-                       if (need_to_release) {
-                               tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
-                                                                                                       wl_egl_buffer->tbm_surface);
-                               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
-                                       TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
-                                                       wl_egl_buffer->tbm_surface, tsq_err);
-                       }
-
-                       if (need_to_cancel) {
-                               tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
-                                                                                                                  wl_egl_buffer->tbm_surface);
-                               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
-                                       TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
-                                                       wl_egl_buffer->tbm_surface, tsq_err);
-                       }
-
-                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
-
-                       if (need_to_release || need_to_cancel)
-                               tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
-
-                       idx++;
-               }
-       }
-
        if (wl_egl_surface->surface_sync) {
                TPL_INFO("[SURFACE_SYNC_DESTROY]",
                                 "wl_egl_surface(%p) surface_sync(%p)",
@@ -1901,6 +1831,89 @@ _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
 }
 
 static void
+_tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+       tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
+       tpl_wl_egl_display_t *wl_egl_display    = wl_egl_surface->wl_egl_display;
+       tpl_wl_egl_buffer_t *wl_egl_buffer      = NULL;
+       tpl_bool_t need_to_release              = TPL_FALSE;
+       tpl_bool_t need_to_cancel               = TPL_FALSE;
+       int idx                                 = 0;
+
+       while (wl_egl_surface->buffer_cnt) {
+               tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
+               tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+               wl_egl_buffer = wl_egl_surface->buffers[idx];
+
+               if (wl_egl_buffer) {
+                       TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
+                                         idx, wl_egl_buffer,
+                                         wl_egl_buffer->tbm_surface,
+                                         status_to_string[wl_egl_buffer->status]);
+                       wl_egl_surface->buffers[idx] = NULL;
+                       wl_egl_surface->buffer_cnt--;
+               } else {
+                       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+                       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+                       idx++;
+                       continue;
+               }
+
+               tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
+               /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
+               /* It has been acquired but has not yet been released, so this
+                * buffer must be released. */
+               need_to_release = (wl_egl_buffer->status == ACQUIRED ||
+                                                  wl_egl_buffer->status == WAITING_SIGNALED ||
+                                                  wl_egl_buffer->status == WAITING_VBLANK ||
+                                                  wl_egl_buffer->status == COMMITTED);
+               /* After dequeue, it has not been enqueued yet
+                * so cancel_dequeue must be performed. */
+               need_to_cancel = wl_egl_buffer->status == DEQUEUED;
+
+               if (wl_egl_buffer->status == WAITING_SIGNALED) {
+                       tpl_result_t wait_result = TPL_ERROR_NONE;
+                       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+                       wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
+                                                                                         &wl_egl_buffer->mutex,
+                                                                                         16); /* 16ms */
+                       tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
+                       if (wait_result == TPL_ERROR_TIME_OUT)
+                               TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
+                                                wl_egl_buffer);
+               }
+
+               if (need_to_release) {
+                       tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+                                                                                               wl_egl_buffer->tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+                                               wl_egl_buffer->tbm_surface, tsq_err);
+               }
+
+               if (need_to_cancel) {
+                       tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
+                                                                                                          wl_egl_buffer->tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
+                                               wl_egl_buffer->tbm_surface, tsq_err);
+               }
+
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+               if (need_to_release || need_to_cancel)
+                       tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
+
+               tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+
+               idx++;
+       }
+}
+
+static void
 __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
 {
        tpl_wl_egl_surface_t *wl_egl_surface = NULL;
@@ -1922,6 +1935,8 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
                         wl_egl_surface,
                         wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
 
+       _tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
+
        if (wl_egl_surface->surf_source)
                tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
        wl_egl_surface->surf_source = NULL;