Change the handle of last_enq_buffer to tbm_surface_h 61/269561/1
authorJoonbum Ko <joonbum.ko@samsung.com>
Tue, 18 Jan 2022 06:17:49 +0000 (15:17 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Tue, 18 Jan 2022 07:51:51 +0000 (16:51 +0900)
 - to check validation for the handle of last_enq_buffer.

Change-Id: Ib92b28cd3bc6bfa553fb5de57afd9fc8cfcf0cdc
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wl_egl_thread.c

index 18652be..44d8f92 100755 (executable)
@@ -116,7 +116,7 @@ struct _tpl_wl_egl_surface {
        tpl_wl_egl_buffer_t          *buffers[BUFFER_ARRAY_SIZE];
        int                           buffer_cnt; /* the number of using wl_egl_buffers */
        tpl_gmutex                    buffers_mutex;
-       tpl_wl_egl_buffer_t          *last_enq_buffer;
+       tbm_surface_h                 last_enq_buffer;
 
        tpl_list_t                   *presentation_feedbacks; /* for tracing presentation feedbacks */
 
@@ -2443,28 +2443,33 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
 
        TPL_OBJECT_UNLOCK(surface);
        tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-       if (wl_egl_surface->reset == TPL_TRUE && wl_egl_surface->last_enq_buffer) {
-               tpl_wl_egl_buffer_t *last_enq_buffer = wl_egl_surface->last_enq_buffer;
-
-               tpl_gmutex_lock(&last_enq_buffer->mutex);
-               if (last_enq_buffer->status > ENQUEUED &&
-                       last_enq_buffer->status < COMMITTED) {
-                       tpl_result_t wait_result;
-                       TPL_INFO("[DEQ_AFTER_RESET]",
-                                        "waiting for previous buffer(%p) commit", last_enq_buffer);
-                       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
-                       wait_result = tpl_cond_timed_wait(&last_enq_buffer->cond,
-                                                                                         &last_enq_buffer->mutex,
-                                                                                         200); /* 200ms */
-                       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-                       if (wait_result == TPL_ERROR_TIME_OUT) {
-                               TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
-                                                last_enq_buffer);
-                               wl_egl_surface->last_enq_buffer = NULL;
+       if (wl_egl_surface->reset == TPL_TRUE &&
+               tbm_surface_internal_is_valid(wl_egl_surface->last_enq_buffer)) {
+               tpl_wl_egl_buffer_t *enqueued_buffer =
+                       _get_wl_egl_buffer(wl_egl_surface->last_enq_buffer);
+
+               if (enqueued_buffer) {
+                       tpl_gmutex_lock(&enqueued_buffer->mutex);
+                       if (enqueued_buffer->status >= ENQUEUED &&
+                               enqueued_buffer->status < COMMITTED) {
+                               tpl_result_t wait_result;
+                               TPL_INFO("[DEQ_AFTER_RESET]",
+                                                "waiting for previous wl_egl_buffer(%p) commit",
+                                                enqueued_buffer);
+                               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+                               wait_result = tpl_cond_timed_wait(&enqueued_buffer->cond,
+                                                                                                 &enqueued_buffer->mutex,
+                                                                                                 200); /* 200ms */
+                               tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+                               if (wait_result == TPL_ERROR_TIME_OUT) {
+                                       TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
+                                                        enqueued_buffer);
+                               }
                        }
+                       tpl_gmutex_unlock(&enqueued_buffer->mutex);
                }
-               tpl_gmutex_unlock(&last_enq_buffer->mutex);
 
+               wl_egl_surface->last_enq_buffer = NULL;
        }
        tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
 
@@ -2757,7 +2762,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
        }
 
        tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-       wl_egl_surface->last_enq_buffer = wl_egl_buffer;
+       wl_egl_surface->last_enq_buffer = tbm_surface;
        tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
 
        tbm_surface_internal_unref(tbm_surface);
@@ -3422,8 +3427,6 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
 
        tpl_gmutex_unlock(&wl_egl_buffer->mutex);
 
-       wl_egl_surface->last_enq_buffer = NULL;
-
        TPL_LOG_T("WL_EGL",
                          "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
                          wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface,