Fix buffer_clear logic for explicit fence sync. 91/255091/1
authorJoonbum Ko <joonbum.ko@samsung.com>
Fri, 12 Mar 2021 01:42:33 +0000 (10:42 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Fri, 12 Mar 2021 01:42:33 +0000 (10:42 +0900)
Change-Id: Idb76fa9179605c03b29c8dd36d9276f121d7753d
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wl_egl_thread.c

index 68cdbef..cc609a0 100755 (executable)
@@ -1875,17 +1875,28 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
                 * so cancel_dequeue must be performed. */
                need_to_cancel = wl_egl_buffer->status == DEQUEUED;
 
-               if (wl_egl_buffer->status >= ENQUEUED &&
-                       wl_egl_buffer->status < WAITING_VBLANK) {
+               if (wl_egl_buffer->status >= ENQUEUED) {
+                       tpl_bool_t need_to_wait  = TPL_FALSE;
                        tpl_result_t wait_result = TPL_ERROR_NONE;
-                       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
-                       wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
-                                                                                         &wl_egl_buffer->mutex,
-                                                                                         16); /* 16ms */
-                       tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
-                       if (wait_result == TPL_ERROR_TIME_OUT)
-                               TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
-                                                wl_egl_buffer);
+
+                       if (!wl_egl_display->use_explicit_sync &&
+                               wl_egl_buffer->status < WAITING_VBLANK)
+                               need_to_wait = TPL_TRUE;
+
+                       if (wl_egl_display->use_explicit_sync &&
+                               wl_egl_buffer->status < COMMITTED)
+                               need_to_wait = TPL_TRUE;
+
+                       if (need_to_wait) {
+                               tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+                               wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
+                                                                                                 &wl_egl_buffer->mutex,
+                                                                                                 16); /* 16ms */
+                               tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
+                               if (wait_result == TPL_ERROR_TIME_OUT)
+                                       TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
+                                                        wl_egl_buffer);
+                       }
                }
 
                if (need_to_release) {
@@ -1904,6 +1915,8 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
                                                wl_egl_buffer->tbm_surface, tsq_err);
                }
 
+               wl_egl_buffer->status = RELEASED;
+
                tpl_gmutex_unlock(&wl_egl_buffer->mutex);
 
                if (need_to_release || need_to_cancel)
@@ -2702,12 +2715,12 @@ __cb_buffer_fenced_release(void *data,
        tbm_surface = wl_egl_buffer->tbm_surface;
 
        if (tbm_surface_internal_is_valid(tbm_surface)) {
+
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
                if (wl_egl_buffer->status == COMMITTED) {
                        tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
                        tbm_surface_queue_error_e tsq_err;
 
-                       tpl_gmutex_lock(&wl_egl_buffer->mutex);
-
                        zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
                        wl_egl_buffer->buffer_release = NULL;
 
@@ -2726,8 +2739,6 @@ __cb_buffer_fenced_release(void *data,
                                          _get_tbm_surface_bo_name(tbm_surface),
                                          fence);
 
-                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
-
                        tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
                                                                                                tbm_surface);
                        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
@@ -2735,6 +2746,9 @@ __cb_buffer_fenced_release(void *data,
 
                        tbm_surface_internal_unref(tbm_surface);
                }
+
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
        } else {
                TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
        }
@@ -2752,12 +2766,12 @@ __cb_buffer_immediate_release(void *data,
        tbm_surface = wl_egl_buffer->tbm_surface;
 
        if (tbm_surface_internal_is_valid(tbm_surface)) {
+
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
                if (wl_egl_buffer->status == COMMITTED) {
                        tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
                        tbm_surface_queue_error_e tsq_err;
 
-                       tpl_gmutex_lock(&wl_egl_buffer->mutex);
-
                        zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
                        wl_egl_buffer->buffer_release = NULL;
 
@@ -2774,8 +2788,6 @@ __cb_buffer_immediate_release(void *data,
                                          wl_egl_buffer->wl_buffer, tbm_surface,
                                          _get_tbm_surface_bo_name(tbm_surface));
 
-                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
-
                        tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
                                                                                                tbm_surface);
                        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
@@ -2783,6 +2795,9 @@ __cb_buffer_immediate_release(void *data,
 
                        tbm_surface_internal_unref(tbm_surface);
                }
+
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
        } else {
                TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
        }
@@ -3130,9 +3145,15 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
        TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
                                          wl_egl_buffer->bo_name);
 
+       tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
        wl_egl_buffer->need_to_commit   = TPL_FALSE;
        wl_egl_buffer->status           = COMMITTED;
 
+       tpl_gcond_signal(&wl_egl_buffer->cond);
+
+       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
        TPL_LOG_T("WL_EGL",
                          "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
                          wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface,