From 3551e544287916a3763251a6051da72f88d7d5ee Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 22 Dec 2021 14:45:09 +0900 Subject: [PATCH] Modified the new buffer allocation in the RESET situation As-Is : - If the tbm_surface_queue_reset occurs more frequently than VBLANK (16ms), there is a problem that the new tbm_surface continues to be allocated. To-Be : - If there is a dequeue buffer before the RESET occurs, the new buffer will be allocated after waiting for dequeued buffer to be commit. Change-Id: Id256e15e1125e06b362d5b90e7ead7718b6343ad Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index f15040a..81f8253 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -116,6 +116,7 @@ struct _tpl_wl_egl_surface { tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE]; int buffer_cnt; /* the number of using wl_egl_buffers */ tpl_gmutex buffers_mutex; + tpl_wl_egl_buffer_t *last_deq_buffer; tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */ @@ -1701,6 +1702,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->buffer_cnt = 0; } + wl_egl_surface->last_deq_buffer = NULL; + { struct tizen_private *tizen_private = NULL; @@ -2134,6 +2137,8 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) wl_egl_surface->wl_egl_window = NULL; } + wl_egl_surface->last_deq_buffer = NULL; + wl_egl_surface->wl_surface = NULL; wl_egl_surface->wl_egl_display = NULL; wl_egl_surface->tpl_surface = NULL; @@ -2419,6 +2424,25 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, tbm_surface_h tbm_surface = NULL; TPL_OBJECT_UNLOCK(surface); + if (wl_egl_surface->reset == TPL_TRUE && wl_egl_surface->last_deq_buffer) { + tpl_wl_egl_buffer_t *last_deq_buffer = wl_egl_surface->last_deq_buffer; + + tpl_gmutex_lock(&last_deq_buffer->mutex); + if (last_deq_buffer->status > RELEASED && + last_deq_buffer->status < COMMITTED) { + tpl_result_t wait_result; + wait_result = tpl_cond_timed_wait(&last_deq_buffer->cond, + &last_deq_buffer->mutex, + 200); /* 200ms */ + + if (wait_result == TPL_ERROR_TIME_OUT) + TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", + last_deq_buffer); + } + tpl_gmutex_unlock(&last_deq_buffer->mutex); + + wl_egl_surface->last_deq_buffer = NULL; + } tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS); TPL_OBJECT_LOCK(surface); @@ -2510,6 +2534,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, tpl_gmutex_lock(&wl_egl_buffer->mutex); wl_egl_buffer->status = DEQUEUED; + wl_egl_surface->last_deq_buffer = wl_egl_buffer; /* If wl_egl_buffer->release_fence_fd is -1, * the tbm_surface can be used immediately. @@ -2571,6 +2596,9 @@ __tpl_wl_egl_surface_cancel_buffer(tpl_surface_t *surface, tpl_gmutex_unlock(&wl_egl_buffer->mutex); } + if (wl_egl_buffer == wl_egl_surface->last_deq_buffer) + wl_egl_surface->last_deq_buffer = NULL; + tbm_surface_internal_unref(tbm_surface); tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, @@ -3510,6 +3538,7 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) wl_egl_buffer->tbm_surface = NULL; wl_egl_buffer->bo_name = -1; + wl_egl_buffer->status = RELEASED; free(wl_egl_buffer); } -- 2.7.4