Change to use buffers array instead of tpl_list. 78/254778/1
authorJoonbum Ko <joonbum.ko@samsung.com>
Mon, 1 Feb 2021 03:25:02 +0000 (12:25 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Tue, 9 Mar 2021 08:50:57 +0000 (17:50 +0900)
 - There is no need to maintain the tpl_list
  which was divided according to usage and divided into several.
 - One wl_egl_surface->buffers is created,
  but each buffer has a buffer_status.

Change-Id: I4d148fcbb2e13fab9d0904b2ab1b61604ee5895a
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wl_egl.c

index 21f8073..320827d 100644 (file)
@@ -32,6 +32,7 @@ static int wl_egl_buffer_key;
 
 /* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
 #define CLIENT_QUEUE_SIZE 3
+#define BUFFER_ARRAY_SIZE (CLIENT_QUEUE_SIZE * 2)
 
 typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
 typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
@@ -88,10 +89,11 @@ struct _tpl_wl_egl_surface {
        tpl_wl_egl_display_t         *wl_egl_display;
        tpl_surface_t                *tpl_surface;
 
-       /* the lists for buffer tracing */
-       tpl_list_t                   *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */
-       tpl_list_t                   *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */
-       tpl_list_t                   *fence_waiting_buffers; /* Trace buffers from ENQUEUE to fence signaled */
+       /* wl_egl_buffer array for buffer tracing */
+       tpl_wl_egl_buffer_t          *buffers[BUFFER_ARRAY_SIZE];
+       int                           buffer_cnt; /* the number of using wl_egl_buffers */
+       tpl_gmutex                    buffers_mutex;
+
        tpl_list_t                   *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
        tpl_list_t                   *presentation_feedbacks; /* for tracing presentation feedbacks */
 
@@ -118,6 +120,16 @@ struct _tpl_wl_egl_surface {
        tpl_bool_t                    set_serial_is_used;
 };
 
+typedef enum buffer_status {
+       RELEASED = 0,
+       DEQUEUED,
+       ENQUEUED,
+       ACQUIRED,
+       WAITING_SIGNALED,
+       WAITING_VBLANK,
+       COMMITTED,
+} buffer_status_t;
+
 struct _tpl_wl_egl_buffer {
        tbm_surface_h                 tbm_surface;
 
@@ -125,6 +137,9 @@ struct _tpl_wl_egl_buffer {
        int                           dx, dy; /* position to attach to wl_surface */
        int                           width, height; /* size to attach to wl_surface */
 
+       buffer_status_t               status; /* for tracing buffer status */
+       int                           idx; /* position index in buffers array of wl_egl_surface */
+
        /* for damage region */
        int                           num_rects;
        int                          *rects;
@@ -142,9 +157,6 @@ struct _tpl_wl_egl_buffer {
        /* for checking need_to_commit (frontbuffer mode) */
        tpl_bool_t                    need_to_commit;
 
-       /* for checking need to release */
-       tpl_bool_t                    need_to_release;
-
        /* for checking draw done */
        tpl_bool_t                    draw_done;
 
@@ -175,12 +187,12 @@ struct _tpl_wl_egl_buffer {
 
        tpl_gsource                  *waiting_source;
 
+       tpl_gmutex                    mutex;
+       tpl_gcond                     cond;
+
        tpl_wl_egl_surface_t         *wl_egl_surface;
 };
 
-
-static void
-__cb_buffer_remove_from_list(void *data);
 static int
 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
 static void
@@ -422,7 +434,7 @@ _thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display)
        }
 
        wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display);
-       if (wl_egl_display->ev_queue) {
+       if (!wl_egl_display->ev_queue) {
                TPL_ERR("Failed to create wl_queue wl_display(%p)",
                                wl_egl_display->wl_display);
                result = TPL_ERROR_INVALID_OPERATION;
@@ -1352,52 +1364,65 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
 
        tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
 
-       if (wl_egl_surface->in_use_buffers) {
-               __tpl_list_free(wl_egl_surface->in_use_buffers,
-                                               (tpl_free_func_t)__cb_buffer_remove_from_list);
-               wl_egl_surface->in_use_buffers = NULL;
-       }
 
-       if (wl_egl_surface->committed_buffers) {
-               while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) {
-                       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
-                       tbm_surface_h tbm_surface =
-                               __tpl_list_pop_front(wl_egl_surface->committed_buffers,
-                                       (tpl_free_func_t)__cb_buffer_remove_from_list);
+       {
+               tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
+               tpl_wl_egl_buffer_t *wl_egl_buffer  = NULL;
+               int idx                             = 0;
+               tpl_bool_t need_to_release          = TPL_FALSE;
+               tpl_bool_t need_to_cancel           = TPL_FALSE;
+
+               while (wl_egl_surface->buffer_cnt) {
+                       tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+                       wl_egl_buffer  = wl_egl_surface->buffers[idx];
+                       if (wl_egl_buffer) {
+                               TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%d)",
+                                                 idx, wl_egl_buffer,
+                                                 wl_egl_buffer->tbm_surface, wl_egl_buffer->status);
+
+                               wl_egl_surface->buffers[idx] = NULL;
+                               wl_egl_surface->buffer_cnt--;
+                       } else {
+                               tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+                               idx++;
+                               continue;
+                       }
+                       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
 
-                               TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
-                                                               _get_tbm_surface_bo_name(tbm_surface));
-                               tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
-                               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
-                                       TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
-                                                       tbm_surface, tsq_err);
-               }
-               __tpl_list_free(wl_egl_surface->committed_buffers, NULL);
-               wl_egl_surface->committed_buffers = NULL;
-       }
+                       tpl_gmutex_lock(&wl_egl_buffer->mutex);
 
-       if (wl_egl_surface->vblank_waiting_buffers) {
-               while (!__tpl_list_is_empty(wl_egl_surface->vblank_waiting_buffers)) {
-                       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
-                       tbm_surface_h tbm_surface =
-                               __tpl_list_pop_front(wl_egl_surface->vblank_waiting_buffers,
-                                       (tpl_free_func_t)__cb_buffer_remove_from_list);
+                       need_to_release = (wl_egl_buffer->status == ACQUIRED ||
+                                                          wl_egl_buffer->status == WAITING_SIGNALED ||
+                                                          wl_egl_buffer->status == WAITING_VBLANK ||
+                                                          wl_egl_buffer->status == COMMITTED);
+
+                       need_to_cancel = wl_egl_buffer->status == DEQUEUED;
 
-                               tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
+                       if (wl_egl_buffer->status == WAITING_SIGNALED)
+                               tpl_gcond_wait(&wl_egl_buffer->cond, &wl_egl_buffer->mutex);
+
+                       if (need_to_release) {
+                               tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+                                                                                                       wl_egl_buffer->tbm_surface);
                                if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
                                        TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
-                                                       tbm_surface, tsq_err);
-               }
-               __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL);
-               wl_egl_surface->vblank_waiting_buffers = NULL;
-       }
+                                                       wl_egl_buffer->tbm_surface, tsq_err);
+                       }
 
-       if (wl_egl_surface->fence_waiting_buffers) {
-               while (!__tpl_list_is_empty(wl_egl_surface->fence_waiting_buffers)) {
-                       tbm_surface_h tbm_surface =
-                               __tpl_list_pop_front(wl_egl_surface->fence_waiting_buffers,
-                                                                        NULL);
-                       /* TODO */
+                       if (need_to_cancel) {
+                               tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
+                                                                                                                  wl_egl_buffer->tbm_surface);
+                               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                                       TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
+                                                       wl_egl_buffer->tbm_surface, tsq_err);
+                       }
+
+                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+                       if (need_to_release || need_to_cancel)
+                               tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
+
+                       idx++;
                }
        }
 
@@ -1559,6 +1584,13 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface)
        wl_egl_surface->presentation_sync.fd   = -1;
 
        {
+               int i = 0;
+               for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
+                       wl_egl_surface->buffers[i]     = NULL;
+               wl_egl_surface->buffer_cnt         = 0;
+       }
+
+       {
                struct tizen_private *tizen_private = NULL;
 
                if (wl_egl_window->driver_private)
@@ -1586,6 +1618,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface)
        tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
        tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
 
+       tpl_gmutex_init(&wl_egl_surface->buffers_mutex);
+
        tpl_gmutex_init(&wl_egl_surface->surf_mutex);
        tpl_gcond_init(&wl_egl_surface->surf_cond);
 
@@ -1785,9 +1819,6 @@ _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
                }
        }
 
-       wl_egl_surface->committed_buffers      = __tpl_list_alloc();
-       wl_egl_surface->in_use_buffers         = __tpl_list_alloc();
-       wl_egl_surface->fence_waiting_buffers  = __tpl_list_alloc();
        wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc();
        wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
 }
@@ -1952,21 +1983,24 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
                return TPL_ERROR_INVALID_OPERATION;
        }
 
-       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-       if (wl_egl_surface->committed_buffers) {
-               while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) {
-                       tbm_surface_h tbm_surface =
-                               __tpl_list_pop_front(wl_egl_surface->committed_buffers,
-                                                                        (tpl_free_func_t)__cb_buffer_remove_from_list);
-                       TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
-                                                       _get_tbm_surface_bo_name(tbm_surface));
-                       tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
-                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
-                               TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
-                                               tbm_surface, tsq_err);
+       {
+               int i;
+               tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+               for (i = 0; i < BUFFER_ARRAY_SIZE; i++) {
+                       tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+                       wl_egl_buffer = wl_egl_surface->buffers[i];
+                       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+                       if (wl_egl_buffer && wl_egl_buffer->status == COMMITTED) {
+                               wl_egl_buffer->status = RELEASED;
+                               tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+                                                                                                       wl_egl_buffer->tbm_surface);
+                               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                                       TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+                                                       wl_egl_buffer->tbm_surface, tsq_err);
+                               tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
+                       }
                }
        }
-       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
 
        TPL_INFO("[FORCE_FLUSH]",
                         "wl_egl_surface(%p) tbm_queue(%p)",
@@ -2047,11 +2081,28 @@ _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
                wl_egl_buffer->tbm_surface              = tbm_surface;
                wl_egl_buffer->wl_egl_surface           = wl_egl_surface;
 
+               wl_egl_buffer->status                   = RELEASED;
+
                wl_egl_buffer->dx                       = wl_egl_window->dx;
                wl_egl_buffer->dy                       = wl_egl_window->dy;
                wl_egl_buffer->width                    = tbm_surface_get_width(tbm_surface);
                wl_egl_buffer->height                   = tbm_surface_get_height(tbm_surface);
 
+               tpl_gmutex_init(&wl_egl_buffer->mutex);
+               tpl_gcond_init(&wl_egl_buffer->cond);
+
+               tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+               {
+                       int i;
+                       for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
+                               if (wl_egl_surface->buffers[i] == NULL) break;
+
+                       wl_egl_surface->buffer_cnt++;
+                       wl_egl_surface->buffers[i]          = wl_egl_buffer;
+                       wl_egl_buffer->idx                  = i;
+               }
+               tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
                TPL_INFO("[WL_EGL_BUFFER_CREATE]",
                                 "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
                                 wl_egl_surface, wl_egl_buffer, tbm_surface,
@@ -2175,6 +2226,9 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
        wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
        TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
 
+       tpl_gmutex_lock(&wl_egl_buffer->mutex);
+       wl_egl_buffer->status = DEQUEUED;
+
        /* If wl_egl_buffer->release_fence_fd is -1,
         * the tbm_surface can be used immediately.
         * If not, user(EGL) have to wait until signaled. */
@@ -2194,6 +2248,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
        TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
                          wl_egl_buffer, tbm_surface, bo_name, release_fence ? *release_fence : -1);
 
+       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
        tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
 
        return tbm_surface;
@@ -2208,6 +2263,7 @@ __tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
 
        tpl_wl_egl_surface_t *wl_egl_surface    =
                (tpl_wl_egl_surface_t *)surface->backend.data;
+       tpl_wl_egl_buffer_t *wl_egl_buffer      = NULL;
        tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
 
        if (!tbm_surface_internal_is_valid(tbm_surface)) {
@@ -2215,11 +2271,12 @@ __tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
                return TPL_ERROR_INVALID_PARAMETER;
        }
 
-       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-       /* Stop tracking of this canceled tbm_surface */
-       __tpl_list_remove_data(wl_egl_surface->in_use_buffers,
-                                                  (void *)tbm_surface, TPL_FIRST, NULL);
-       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+       wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+       if (wl_egl_buffer) {
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+               wl_egl_buffer->status = RELEASED;
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+       }
 
        tbm_surface_internal_unref(tbm_surface);
 
@@ -2270,6 +2327,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
                          wl_egl_surface, tbm_surface, bo_name, acquire_fence);
 
        wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+       tpl_gmutex_lock(&wl_egl_buffer->mutex);
 
        /* If there are received region information, save it to wl_egl_buffer */
        if (num_rects && rects) {
@@ -2284,6 +2342,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
 
                if (!wl_egl_buffer->rects) {
                        TPL_ERR("Failed to allocate memory fo damage rects info.");
+                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
                        return TPL_ERROR_OUT_OF_MEMORY;
                }
 
@@ -2295,6 +2354,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
                TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
                                 ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
                TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
                return TPL_ERROR_NONE;
        }
 
@@ -2318,8 +2378,11 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
 
        if (wl_egl_buffer->acquire_fence_fd != -1)
                close(wl_egl_buffer->acquire_fence_fd);
-       
+
        wl_egl_buffer->acquire_fence_fd = acquire_fence;
+       wl_egl_buffer->status = ENQUEUED;
+
+       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
 
        tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
                                                                                tbm_surface);
@@ -2351,9 +2414,12 @@ __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message)
        TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
                                        wl_egl_buffer->acquire_fence_fd);
 
+       tpl_gmutex_lock(&wl_egl_buffer->mutex);
+       tpl_gcond_signal(&wl_egl_buffer->cond);
+       wl_egl_buffer->status = WAITING_VBLANK;
+       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
        tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-       __tpl_list_remove_data(wl_egl_surface->fence_waiting_buffers,
-                                                  (void *)tbm_surface, TPL_FIRST, NULL);
 
        if (wl_egl_surface->vblank_done)
                _thread_wl_surface_commit(wl_egl_surface, tbm_surface);
@@ -2412,6 +2478,10 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
                TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
                                                                           "wl_egl_buffer sould be not NULL");
 
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
+               wl_egl_buffer->status = ACQUIRED;
+
                if (wl_egl_buffer->wl_buffer == NULL) {
                        tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
                        wl_egl_buffer->wl_buffer =
@@ -2437,8 +2507,7 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
                                        tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
                                                                           wl_egl_buffer->acquire_fence_fd, &buffer_funcs,
                                                                           SOURCE_TYPE_DISPOSABLE);
-
-                               __tpl_list_push_back(wl_egl_surface->fence_waiting_buffers, tbm_surface);
+                               wl_egl_buffer->status = WAITING_SIGNALED;
 
                                TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
                                                                  wl_egl_buffer->acquire_fence_fd);
@@ -2451,11 +2520,14 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
                        if (wl_egl_surface->vblank_done)
                                ready_to_commit = TPL_TRUE;
                        else {
+                               wl_egl_buffer->status = WAITING_VBLANK;
                                __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, tbm_surface);
                                ready_to_commit = TPL_FALSE;
                        }
                }
 
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
                if (ready_to_commit)
                        _thread_wl_surface_commit(wl_egl_surface, tbm_surface);
        }
@@ -2496,28 +2568,22 @@ __cb_buffer_fenced_release(void *data,
        tpl_wl_egl_buffer_t *wl_egl_buffer  = (tpl_wl_egl_buffer_t *)data;
        tbm_surface_h tbm_surface           = NULL;
 
-       if (wl_egl_buffer)
-               tbm_surface = wl_egl_buffer->tbm_surface;
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
+
+       tbm_surface = wl_egl_buffer->tbm_surface;
 
        if (tbm_surface_internal_is_valid(tbm_surface)) {
-               if (wl_egl_buffer->need_to_release) {
+               if (wl_egl_buffer->status == COMMITTED) {
                        tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
                        tbm_surface_queue_error_e tsq_err;
 
-                       if (wl_egl_surface->committed_buffers) {
-                               tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-                               __tpl_list_remove_data(wl_egl_surface->committed_buffers,
-                                                                          (void *)tbm_surface,
-                                                                          TPL_FIRST, NULL);
-                               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
-                       }
-
-                       wl_egl_buffer->need_to_release = TPL_FALSE;
+                       tpl_gmutex_lock(&wl_egl_buffer->mutex);
 
                        zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
                        wl_egl_buffer->buffer_release = NULL;
 
                        wl_egl_buffer->release_fence_fd = fence;
+                       wl_egl_buffer->status = RELEASED;
 
                        TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
                                           _get_tbm_surface_bo_name(tbm_surface),
@@ -2531,6 +2597,8 @@ __cb_buffer_fenced_release(void *data,
                                          _get_tbm_surface_bo_name(tbm_surface),
                                          fence);
 
+                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
                        tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
                                                                                                tbm_surface);
                        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
@@ -2550,28 +2618,22 @@ __cb_buffer_immediate_release(void *data,
        tpl_wl_egl_buffer_t *wl_egl_buffer  = (tpl_wl_egl_buffer_t *)data;
        tbm_surface_h tbm_surface           = NULL;
 
-       if (wl_egl_buffer)
-               tbm_surface = wl_egl_buffer->tbm_surface;
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
+
+       tbm_surface = wl_egl_buffer->tbm_surface;
 
        if (tbm_surface_internal_is_valid(tbm_surface)) {
-               if (wl_egl_buffer->need_to_release) {
+               if (wl_egl_buffer->status == COMMITTED) {
                        tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
                        tbm_surface_queue_error_e tsq_err;
 
-                       if (wl_egl_surface->committed_buffers) {
-                               tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-                               __tpl_list_remove_data(wl_egl_surface->committed_buffers,
-                                                                          (void *)tbm_surface,
-                                                                          TPL_FIRST, NULL);
-                               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
-                       }
-
-                       wl_egl_buffer->need_to_release = TPL_FALSE;
+                       tpl_gmutex_lock(&wl_egl_buffer->mutex);
 
                        zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
                        wl_egl_buffer->buffer_release = NULL;
 
                        wl_egl_buffer->release_fence_fd = -1;
+                       wl_egl_buffer->status = RELEASED;
 
                        TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
                                           _get_tbm_surface_bo_name(tbm_surface));
@@ -2583,6 +2645,8 @@ __cb_buffer_immediate_release(void *data,
                                          wl_egl_buffer->wl_buffer, tbm_surface,
                                          _get_tbm_surface_bo_name(tbm_surface));
 
+                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
                        tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
                                                                                                tbm_surface);
                        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
@@ -2606,28 +2670,24 @@ __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
        tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
        tbm_surface_h tbm_surface = NULL;
 
-       if (wl_egl_buffer)
-               tbm_surface = wl_egl_buffer->tbm_surface;
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer)
+
+       tbm_surface = wl_egl_buffer->tbm_surface;
 
        if (tbm_surface_internal_is_valid(tbm_surface)) {
-               if (wl_egl_buffer->need_to_release) {
-                       tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
-                       tbm_surface_queue_error_e tsq_err;
+               tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+               tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
+               if (wl_egl_buffer->status == COMMITTED) {
 
                        tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
                                                                                                tbm_surface);
                        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
                                TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
 
-                       if (wl_egl_surface->committed_buffers) {
-                               tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-                               __tpl_list_remove_data(wl_egl_surface->committed_buffers,
-                                                                          (void *)tbm_surface,
-                                                                          TPL_FIRST, NULL);
-                               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
-                       }
-
-                       wl_egl_buffer->need_to_release = TPL_FALSE;
+                       wl_egl_buffer->status = RELEASED;
 
                        TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
                        TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
@@ -2636,9 +2696,12 @@ __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
                        TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
                                          wl_egl_buffer->wl_buffer, tbm_surface,
                                          _get_tbm_surface_bo_name(tbm_surface));
+               }
 
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+               if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
                        tbm_surface_internal_unref(tbm_surface);
-               }
        } else {
                TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
        }
@@ -2890,8 +2953,6 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
                                                (void *)wl_egl_buffer->wl_buffer,
                                                wl_egl_buffer->serial);
 
-       wl_egl_buffer->need_to_release = TPL_TRUE;
-
        if (wl_egl_display->use_explicit_sync &&
                wl_egl_surface->surface_sync) {
 
@@ -2920,7 +2981,8 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
        TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
                                          _get_tbm_surface_bo_name(tbm_surface));
 
-       wl_egl_buffer->need_to_commit = TPL_FALSE;
+       wl_egl_buffer->need_to_commit   = TPL_FALSE;
+       wl_egl_buffer->status           = COMMITTED;
 
        TPL_LOG_T("WL_EGL", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)",
                          wl_egl_buffer->wl_buffer, tbm_surface,
@@ -2930,10 +2992,6 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
                _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
                TPL_ERR("Failed to set wait vblank.");
 
-       if (wl_egl_surface->committed_buffers) {
-               __tpl_list_push_back(wl_egl_surface->committed_buffers, tbm_surface);
-       }
-
        tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
 
        if (wl_egl_buffer->commit_sync_fd != -1) {
@@ -3024,6 +3082,15 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
        TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
                         wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);
 
+       tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+       if (wl_egl_buffer->idx > 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) {
+               wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL;
+               wl_egl_surface->buffer_cnt--;
+
+               wl_egl_buffer->idx = -1;
+       }
+       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
        wl_display_flush(wl_egl_display->wl_display);
 
        if (wl_egl_buffer->wl_buffer)
@@ -3063,15 +3130,6 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
        free(wl_egl_buffer);
 }
 
-static void
-__cb_buffer_remove_from_list(void *data)
-{
-       tbm_surface_h tbm_surface = (tbm_surface_h)data;
-
-       if (tbm_surface && tbm_surface_internal_is_valid(tbm_surface))
-               tbm_surface_internal_unref(tbm_surface);
-}
-
 static int
 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
 {
@@ -3081,59 +3139,20 @@ _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
 static void
 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
 {
-       int count = 0;
-       int idx = 0;
-       tpl_list_node_t *node = NULL;
-       tbm_surface_h tbm_surface = NULL;
-
-       /* vblank waiting list */
-       count = __tpl_list_get_count(wl_egl_surface->vblank_waiting_buffers);
-       TPL_DEBUG("VBLANK WAITING BUFFERS | wl_egl_surface(%p) list(%p) count(%d)",
-                         wl_egl_surface, wl_egl_surface->vblank_waiting_buffers, count);
-
-       while ((!node &&
-                  (node = __tpl_list_get_front_node(wl_egl_surface->vblank_waiting_buffers))) ||
-                  (node && (node = __tpl_list_node_next(node)))) {
-               tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
-               TPL_DEBUG("VBLANK WAITING BUFFERS | %d | tbm_surface(%p) bo(%d)",
-                                 idx, tbm_surface,
-                                 _get_tbm_surface_bo_name(tbm_surface));
-               idx++;
-       }
-
-       idx = 0;
-       node = NULL;
-
-       /* in use buffers list */
-       count = __tpl_list_get_count(wl_egl_surface->in_use_buffers);
-       TPL_DEBUG("DEQUEUED BUFFERS | wl_egl_surface(%p) list(%p) count(%d)",
-                         wl_egl_surface, wl_egl_surface->in_use_buffers, count);
-
-       while ((!node &&
-                  (node = __tpl_list_get_front_node(wl_egl_surface->in_use_buffers))) ||
-                  (node && (node = __tpl_list_node_next(node)))) {
-               tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
-               TPL_DEBUG("DEQUEUED BUFFERS | %d | tbm_surface(%p) bo(%d)",
-                                 idx, tbm_surface,
-                                 _get_tbm_surface_bo_name(tbm_surface));
-               idx++;
-       }
-
-       idx = 0;
-       node = NULL;
-
-       /* committed buffers list */
-       count = __tpl_list_get_count(wl_egl_surface->committed_buffers);
-       TPL_DEBUG("COMMITTED BUFFERS | wl_egl_surface(%p) list(%p) count(%d)",
-                         wl_egl_surface, wl_egl_surface->committed_buffers, count);
-
-       while ((!node &&
-                  (node = __tpl_list_get_front_node(wl_egl_surface->committed_buffers))) ||
-                  (node && (node = __tpl_list_node_next(node)))) {
-               tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
-               TPL_DEBUG("COMMITTED BUFFERS | %d | tbm_surface(%p) bo(%d)",
-                                 idx, tbm_surface,
-                                 _get_tbm_surface_bo_name(tbm_surface));
-               idx++;
+       int idx                     = 0;
+
+       tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+       TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)",
+                        wl_egl_surface, wl_egl_surface->buffer_cnt);
+       for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
+               tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
+               if (wl_egl_buffer) {
+                       TPL_INFO("[INFO]",
+                                        "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%d)",
+                                        idx, wl_egl_buffer, wl_egl_buffer->tbm_surface,
+                                        _get_tbm_surface_bo_name(wl_egl_buffer->tbm_surface),
+                                        wl_egl_buffer->status);
+               }
        }
+       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
 }