Enhanced protection against fence waiting buffers. 49/258549/1
authorJoonbum Ko <joonbum.ko@samsung.com>
Tue, 18 May 2021 06:38:21 +0000 (15:38 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Thu, 20 May 2021 05:37:01 +0000 (14:37 +0900)
 - timeout value up to 50ms.
 - Modified to guarantee until the waiting buffer
  completes the operation through surf_mutex.

Change-Id: If0fd6de234f5f79369dee23d9cddda9cd961f882
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wl_egl_thread.c

index deba3b6..91d6e65 100755 (executable)
@@ -1959,17 +1959,19 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
                                tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
                                wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
                                                                                                  &wl_egl_buffer->mutex,
-                                                                                                 16); /* 16ms */
+                                                                                                 50); /* 50ms */
                                tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
 
-                               status = wl_egl_buffer->status;
-
                                if (wait_result == TPL_ERROR_TIME_OUT)
                                        TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
                                                         wl_egl_buffer);
                        }
                }
 
+               tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+
+               status = wl_egl_buffer->status; /* update status */
+
                /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
                /* It has been acquired but has not yet been released, so this
                 * buffer must be released. */
@@ -1995,6 +1997,8 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
                                                wl_egl_buffer->tbm_surface, tsq_err);
                }
 
+               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
                wl_egl_buffer->status = RELEASED;
 
                tpl_gmutex_unlock(&wl_egl_buffer->mutex);
@@ -2030,9 +2034,7 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
                         wl_egl_surface,
                         wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
 
-       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
        _tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
-       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
 
        if (wl_egl_surface->surf_source)
                tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);