From 39609145b0e6ac9c44de22b5f3bd14e40be5f69c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 9 Jun 2022 19:04:11 +0900 Subject: [PATCH 01/16] Fixed to prevent page fault via invalid address. - If the last_enq_buffer of wl_egl_surface has a wrong pointer address, page fault may occur inside the tbm_surface_internal_is_valid of libtbm. - To prevent this problem, it is modified to check in advance from the list of buffers of wl_egl_surface. Change-Id: I459b182e9ed435ce93a3a862251869fb9c7829ad Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 54 +++++++++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index e94fe16..d4efca0 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2445,35 +2445,37 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_OBJECT_UNLOCK(surface); tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - if (wl_egl_surface->reset == TPL_TRUE && - tbm_surface_internal_is_valid(wl_egl_surface->last_enq_buffer)) { - tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer; - tpl_wl_egl_buffer_t *enqueued_buffer = - _get_wl_egl_buffer(last_enq_buffer); - - if (enqueued_buffer) { - tbm_surface_internal_ref(last_enq_buffer); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - tpl_gmutex_lock(&enqueued_buffer->mutex); - while (enqueued_buffer->status >= ENQUEUED && - enqueued_buffer->status < COMMITTED) { - tpl_result_t wait_result; - TPL_INFO("[DEQ_AFTER_RESET]", - "waiting for previous wl_egl_buffer(%p) commit", - enqueued_buffer); - - wait_result = tpl_cond_timed_wait(&enqueued_buffer->cond, - &enqueued_buffer->mutex, - 200); /* 200ms */ - if (wait_result == TPL_ERROR_TIME_OUT) { - TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", + if (wl_egl_surface->reset == TPL_TRUE) { + if (_check_buffer_validate(wl_egl_surface, wl_egl_surface->last_enq_buffer) && + tbm_surface_internal_is_valid(wl_egl_surface->last_enq_buffer)) { + tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer; + tpl_wl_egl_buffer_t *enqueued_buffer = + _get_wl_egl_buffer(last_enq_buffer); + + if (enqueued_buffer) { + tbm_surface_internal_ref(last_enq_buffer); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + tpl_gmutex_lock(&enqueued_buffer->mutex); + while (enqueued_buffer->status >= ENQUEUED && + enqueued_buffer->status < COMMITTED) { + tpl_result_t wait_result; + TPL_INFO("[DEQ_AFTER_RESET]", + "waiting for previous wl_egl_buffer(%p) commit", enqueued_buffer); - break; + + wait_result = tpl_cond_timed_wait(&enqueued_buffer->cond, + &enqueued_buffer->mutex, + 200); /* 200ms */ + if (wait_result == TPL_ERROR_TIME_OUT) { + TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", + enqueued_buffer); + break; + } } + tpl_gmutex_unlock(&enqueued_buffer->mutex); + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + tbm_surface_internal_unref(last_enq_buffer); } - tpl_gmutex_unlock(&enqueued_buffer->mutex); - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - tbm_surface_internal_unref(last_enq_buffer); } wl_egl_surface->last_enq_buffer = NULL; -- 2.7.4 From 7043c50993c996ba17df4f3cf21436c9d37a9a79 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 9 Jun 2022 19:13:37 +0900 Subject: [PATCH 02/16] Package version up to 1.9.7 Change-Id: I9a15a0ff3f4accd438af5fe8abd89d71fc1275fa Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index 0b98317..cb1653c 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 9 -%define TPL_VERSION_PATCH 6 +%define TPL_VERSION_PATCH 7 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 0724b49458ea5b17330fae3f3aeff67401641c81 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 28 Sep 2022 20:26:57 +0900 Subject: [PATCH 03/16] Delete g_cond_wait from tpl_gsource_destroy. MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit - g_cond_wait does not guarantee perfectly. Therefore, it is recommended that the caller of tpl_gsource_destroy should call the g_cond_wait to confirm if the destroy is actually complete. - https://docs.gtk.org/glib/method.Cond.wait.html Atomically releases mutex and waits until cond is signalled. When this function returns, mutex is locked again and owned by the calling thread. When using condition variables, it is possible that a spurious wakeup may occur (ie: g_cond_wait() returns even though g_cond_signal() was not called). It’s also possible that a stolen wakeup may occur. This is when g_cond_signal() is called, but another thread acquires mutex before this thread and modifies the state of the program in such a way that when g_cond_wait() is able to return, the expected condition is no longer met. For this reason, g_cond_wait() must always be used in a loop. See the documentation for GCond for a complete example. Change-Id: If3b98b4d79b205d9125558edb75f4b85ef6a3a99 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 2 -- src/tpl_utils_gthread.h | 5 +++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 3352a0d..20b6838 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -329,8 +329,6 @@ tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread) if (destroy_in_thread) { finalizer->intended_destroy = TPL_TRUE; tpl_gsource_send_message(finalizer, 1); - - g_cond_wait(&thread->thread_cond, &thread->thread_mutex); } else { __gsource_remove_and_destroy(finalizer); source->finalizer = NULL; diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index a1d4ce1..0237f40 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -65,6 +65,11 @@ tpl_gthread_destroy(tpl_gthread *thread); * * This creates a new tpl_gsource to be attached the thread loop. * + * IMPORTANT + * - If destroy_in_thread is TPL_TRUE, since this function does not use + * g_cond_wait(), caller should call tpl_gcond_wait() or tpl_gcond_timed_wait() + * to confirm gsource destroy completely. + * * @param thread Pointer to tpl_gthread to attach new tpl_gsource. * @param data Pointer to some handle used by its user. * @param fd fd to poll. If the value is more than 0, the passed value will be polled. -- 2.7.4 From e6f7aa1cb1942797a572c2e705c5027a907aed38 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 27 Sep 2022 20:41:14 +0900 Subject: [PATCH 04/16] Modified the codes related to call tpl_gcond_wait. - g_cond_wait is sometimes awakened by unknown or stolen signal. In such cases, unexpected problems may arise. To prevent these problems, each tpl_gsource has tpl_gmutex and tpl_gcond, and modified to try tpl_gcond_wait() until gsource_finalized flag becomes true. - It may be modified with better way. Change-Id: I1360c0a3888186ba0309fe4d94c5be8e29c6f1b8 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 128 +++++++++++++++++++++++++----- src/tpl_wl_vk_thread.c | 207 ++++++++++++++++++++++++++++++++++++------------ 2 files changed, 266 insertions(+), 69 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index d4efca0..9fd1e8a 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -60,13 +60,21 @@ struct _tpl_wl_egl_display { tpl_bool_t use_explicit_sync; tpl_bool_t use_tss; tpl_bool_t prepared; - + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex disp_mutex; + tpl_gcond disp_cond; struct { tdm_client *tdm_client; tpl_gsource *tdm_source; int tdm_display_fd; tpl_bool_t tdm_initialized; tpl_list_t *surface_vblanks; + + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex tdm_mutex; + tpl_gcond tdm_cond; } tdm; #if TIZEN_FEATURE_ENABLE @@ -143,6 +151,10 @@ struct _tpl_wl_egl_surface { tpl_bool_t prerotation_capability; tpl_bool_t vblank_done; tpl_bool_t set_serial_is_used; + tpl_bool_t initialized_in_thread; + + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; }; struct _surface_vblank { @@ -363,9 +375,12 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - TPL_LOG_T("WL_EGL", - "tdm_destroy| wl_egl_display(%p) tdm_client(%p) tpl_gsource(%p)", - wl_egl_display, wl_egl_display->tdm.tdm_client, gsource); + tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + + TPL_INFO("[TDM_CLIENT_FINI]", + "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_egl_display, wl_egl_display->tdm.tdm_client, + wl_egl_display->tdm.tdm_display_fd); if (wl_egl_display->tdm.tdm_client) { @@ -383,6 +398,10 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_egl_display->use_wait_vblank = TPL_FALSE; wl_egl_display->tdm.tdm_initialized = TPL_FALSE; + wl_egl_display->tdm.gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_egl_display->tdm.tdm_cond); + tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); } static tpl_gsource_functions tdm_funcs = { @@ -809,11 +828,17 @@ __thread_func_disp_finalize(tpl_gsource *gsource) tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + TPL_DEBUG("[FINALIZE] wl_egl_display(%p) tpl_gsource(%p)", + wl_egl_display, gsource); + if (wl_egl_display->wl_initialized) _thread_wl_display_fini(wl_egl_display); - TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)", - wl_egl_display, gsource); + wl_egl_display->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_egl_display->disp_cond); + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); return; } @@ -867,6 +892,7 @@ __tpl_wl_egl_display_init(tpl_display_t *display) wl_egl_display->use_tss = TPL_FALSE; wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled wl_egl_display->prepared = TPL_FALSE; + wl_egl_display->gsource_finalized = TPL_FALSE; #if TIZEN_FEATURE_ENABLE /* Wayland Interfaces */ @@ -886,6 +912,9 @@ __tpl_wl_egl_display_init(tpl_display_t *display) tpl_gmutex_init(&wl_egl_display->wl_event_mutex); + tpl_gmutex_init(&wl_egl_display->disp_mutex); + tpl_gcond_init(&wl_egl_display->disp_cond); + /* Create gthread */ wl_egl_display->thread = tpl_gthread_create("wl_egl_thread", (tpl_gthread_func)_thread_init, @@ -908,10 +937,13 @@ __tpl_wl_egl_display_init(tpl_display_t *display) if (wl_egl_display->use_wait_vblank && wl_egl_display->tdm.tdm_initialized) { + tpl_gmutex_init(&wl_egl_display->tdm.tdm_mutex); + tpl_gcond_init(&wl_egl_display->tdm.tdm_cond); wl_egl_display->tdm.tdm_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_display, wl_egl_display->tdm.tdm_display_fd, &tdm_funcs, SOURCE_TYPE_NORMAL); + wl_egl_display->tdm.gsource_finalized = TPL_FALSE; if (!wl_egl_display->tdm.tdm_source) { TPL_ERR("Failed to create tdm_gsource\n"); goto free_display; @@ -936,15 +968,33 @@ __tpl_wl_egl_display_init(tpl_display_t *display) return TPL_ERROR_NONE; free_display: - if (wl_egl_display->thread) { - if (wl_egl_display->tdm.tdm_source) + if (wl_egl_display->tdm.tdm_source) { + tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + while (!wl_egl_display->tdm.gsource_finalized) { tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); - if (wl_egl_display->disp_source) + tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex); + } + tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); + } + + if (wl_egl_display->disp_source) { + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + while (!wl_egl_display->gsource_finalized) { tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); + tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); + } + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); + } + if (wl_egl_display->thread) { tpl_gthread_destroy(wl_egl_display->thread); } + tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_egl_display->disp_cond); + tpl_gmutex_clear(&wl_egl_display->disp_mutex); + wl_egl_display->thread = NULL; free(wl_egl_display); @@ -968,20 +1018,43 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) wl_egl_display->wl_display); if (wl_egl_display->tdm.tdm_source && wl_egl_display->tdm.tdm_initialized) { - tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + while (!wl_egl_display->tdm.gsource_finalized) { + tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); + tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex); + } wl_egl_display->tdm.tdm_source = NULL; + tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); } - if (wl_egl_display->disp_source) { + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + while (wl_egl_display->disp_source && !wl_egl_display->gsource_finalized) { tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); - wl_egl_display->disp_source = NULL; + tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); } + wl_egl_display->disp_source = NULL; + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); if (wl_egl_display->thread) { tpl_gthread_destroy(wl_egl_display->thread); wl_egl_display->thread = NULL; } + tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_egl_display->disp_cond); + tpl_gmutex_clear(&wl_egl_display->disp_mutex); + tpl_gmutex_clear(&wl_egl_display->wl_event_mutex); free(wl_egl_display); @@ -1495,8 +1568,6 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) { tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - TPL_INFO("[SURFACE_FINI]", "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", wl_egl_surface, wl_egl_surface->wl_egl_window, @@ -1572,8 +1643,6 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) __cb_surface_vblank_free); wl_egl_surface->vblank = NULL; } - - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } static tpl_bool_t @@ -1588,6 +1657,7 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) TPL_DEBUG("wl_egl_surface(%p) initialize message received!", wl_egl_surface); _thread_wl_egl_surface_init(wl_egl_surface); + wl_egl_surface->initialized_in_thread = TPL_TRUE; tpl_gcond_signal(&wl_egl_surface->surf_cond); } else if (message == ACQUIRABLE) { /* Acquirable */ TPL_DEBUG("wl_egl_surface(%p) acquirable message received!", @@ -1610,10 +1680,16 @@ __thread_func_surf_finalize(tpl_gsource *gsource) wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource); TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); - _thread_wl_egl_surface_fini(wl_egl_surface); - + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)", wl_egl_surface, gsource); + + _thread_wl_egl_surface_fini(wl_egl_surface); + + wl_egl_surface->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_egl_surface->surf_cond); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } static tpl_gsource_functions surf_funcs = { @@ -1685,6 +1761,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->vblank_done = TPL_TRUE; wl_egl_surface->use_render_done_fence = TPL_FALSE; wl_egl_surface->set_serial_is_used = TPL_FALSE; + wl_egl_surface->gsource_finalized = TPL_FALSE; + wl_egl_surface->initialized_in_thread = TPL_FALSE; wl_egl_surface->latest_transform = -1; wl_egl_surface->render_done_cnt = 0; @@ -1754,7 +1832,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->sent_message = INIT_SURFACE; tpl_gsource_send_message(wl_egl_surface->surf_source, wl_egl_surface->sent_message); - tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + while (!wl_egl_surface->initialized_in_thread) + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); TPL_ASSERT(wl_egl_surface->tbm_queue); @@ -2113,9 +2192,18 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) _tpl_wl_egl_surface_buffer_clear(wl_egl_surface); - if (wl_egl_surface->surf_source) + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + while (wl_egl_surface->surf_source && !wl_egl_surface->gsource_finalized) { tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + } wl_egl_surface->surf_source = NULL; + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); _print_buffer_lists(wl_egl_surface); diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 4d8fb2c..7e0e621 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -52,16 +52,27 @@ struct _tpl_wl_vk_display { int last_error; /* errno of the last wl_display error*/ tpl_bool_t wl_initialized; - tpl_bool_t tdm_initialized; - tdm_client *tdm_client; - tpl_gsource *tdm_source; - int tdm_display_fd; + struct { + tdm_client *tdm_client; + tpl_gsource *tdm_source; + int tdm_display_fd; + tpl_bool_t tdm_initialized; + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex tdm_mutex; + tpl_gcond tdm_cond; + } tdm; tpl_bool_t use_wait_vblank; tpl_bool_t use_explicit_sync; tpl_bool_t prepared; + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex disp_mutex; + tpl_gcond disp_cond; + /* device surface capabilities */ int min_buffer; int max_buffer; @@ -75,6 +86,9 @@ struct _tpl_wl_vk_swapchain { tpl_wl_vk_surface_t *wl_vk_surface; tbm_surface_queue_h tbm_queue; + tpl_result_t result; + + tpl_bool_t create_done; struct { int width; @@ -128,6 +142,10 @@ struct _tpl_wl_vk_surface { tpl_bool_t is_activated; tpl_bool_t reset; /* TRUE if queue reseted by external */ tpl_bool_t vblank_done; + tpl_bool_t initialized_in_thread; + + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; surf_message sent_message; @@ -250,7 +268,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) return TPL_FALSE; } - tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client); + tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client); /* If an error occurs in tdm_client_handle_events, it cannot be recovered. * When tdm_source is no longer available due to an unexpected situation, @@ -264,7 +282,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gsource_destroy(gsource, TPL_FALSE); - wl_vk_display->tdm_source = NULL; + wl_vk_display->tdm.tdm_source = NULL; return TPL_FALSE; } @@ -279,17 +297,24 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); - TPL_LOG_T("WL_VK", - "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)", - wl_vk_display, wl_vk_display->tdm_client, gsource); + tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex); + + TPL_INFO("[TDM_CLIENT_FINI]", + "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_vk_display, wl_vk_display->tdm.tdm_client, + wl_vk_display->tdm.tdm_display_fd); - if (wl_vk_display->tdm_client) { - tdm_client_destroy(wl_vk_display->tdm_client); - wl_vk_display->tdm_client = NULL; - wl_vk_display->tdm_display_fd = -1; + if (wl_vk_display->tdm.tdm_client) { + tdm_client_destroy(wl_vk_display->tdm.tdm_client); + wl_vk_display->tdm.tdm_client = NULL; + wl_vk_display->tdm.tdm_display_fd = -1; } - wl_vk_display->tdm_initialized = TPL_FALSE; + wl_vk_display->tdm.tdm_initialized = TPL_FALSE; + wl_vk_display->tdm.gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond); + tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex); } static tpl_gsource_functions tdm_funcs = { @@ -319,10 +344,10 @@ _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display) return TPL_ERROR_INVALID_OPERATION; } - wl_vk_display->tdm_display_fd = tdm_display_fd; - wl_vk_display->tdm_client = tdm_client; - wl_vk_display->tdm_source = NULL; - wl_vk_display->tdm_initialized = TPL_TRUE; + wl_vk_display->tdm.tdm_display_fd = tdm_display_fd; + wl_vk_display->tdm.tdm_client = tdm_client; + wl_vk_display->tdm.tdm_source = NULL; + wl_vk_display->tdm.tdm_initialized = TPL_TRUE; TPL_INFO("[TDM_CLIENT_INIT]", "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)", @@ -668,11 +693,17 @@ __thread_func_disp_finalize(tpl_gsource *gsource) tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + tpl_gmutex_lock(&wl_vk_display->disp_mutex); + TPL_DEBUG("[FINALIZE] wl_vk_display(%p) tpl_gsource(%p)", + wl_vk_display, gsource); + if (wl_vk_display->wl_initialized) _thread_wl_display_fini(wl_vk_display); - TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)", - wl_vk_display, gsource); + wl_vk_display->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_vk_display->disp_cond); + tpl_gmutex_unlock(&wl_vk_display->disp_mutex); return; } @@ -713,7 +744,7 @@ __tpl_wl_vk_display_init(tpl_display_t *display) display->backend.data = wl_vk_display; display->bufmgr_fd = -1; - wl_vk_display->tdm_initialized = TPL_FALSE; + wl_vk_display->tdm.tdm_initialized = TPL_FALSE; wl_vk_display->wl_initialized = TPL_FALSE; wl_vk_display->ev_queue = NULL; @@ -743,6 +774,9 @@ __tpl_wl_vk_display_init(tpl_display_t *display) tpl_gmutex_init(&wl_vk_display->wl_event_mutex); + tpl_gmutex_init(&wl_vk_display->disp_mutex); + tpl_gcond_init(&wl_vk_display->disp_cond); + /* Create gthread */ wl_vk_display->thread = tpl_gthread_create("wl_vk_thread", (tpl_gthread_func)_thread_init, @@ -763,11 +797,14 @@ __tpl_wl_vk_display_init(tpl_display_t *display) goto free_display; } - wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread, + tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex); + tpl_gcond_init(&wl_vk_display->tdm.tdm_cond); + + wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_display, - wl_vk_display->tdm_display_fd, + wl_vk_display->tdm.tdm_display_fd, &tdm_funcs, SOURCE_TYPE_NORMAL); - if (!wl_vk_display->tdm_source) { + if (!wl_vk_display->tdm.tdm_source) { TPL_ERR("Failed to create tdm_gsource\n"); goto free_display; } @@ -786,15 +823,33 @@ __tpl_wl_vk_display_init(tpl_display_t *display) return TPL_ERROR_NONE; free_display: - if (wl_vk_display->thread) { - if (wl_vk_display->tdm_source) - tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); - if (wl_vk_display->disp_source) + if (wl_vk_display->tdm.tdm_source) { + tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex); + while (!wl_vk_display->tdm.gsource_finalized) { + tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE); + tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex); + } + tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex); + } + + if (wl_vk_display->disp_source) { + tpl_gmutex_lock(&wl_vk_display->disp_mutex); + while (!wl_vk_display->gsource_finalized) { tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); + tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex); + } + tpl_gmutex_unlock(&wl_vk_display->disp_mutex); + } + if (wl_vk_display->thread) { tpl_gthread_destroy(wl_vk_display->thread); } + tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_vk_display->disp_cond); + tpl_gmutex_clear(&wl_vk_display->disp_mutex); + wl_vk_display->thread = NULL; free(wl_vk_display); @@ -817,21 +872,44 @@ __tpl_wl_vk_display_fini(tpl_display_t *display) wl_vk_display->thread, wl_vk_display->wl_display); - if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) { - tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); - wl_vk_display->tdm_source = NULL; + if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) { + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex); + while (!wl_vk_display->tdm.gsource_finalized) { + tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE); + tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex); + } + wl_vk_display->tdm.tdm_source = NULL; + tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex); } - if (wl_vk_display->disp_source) { + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_vk_display->disp_mutex); + while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) { tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); - wl_vk_display->disp_source = NULL; + tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex); } + wl_vk_display->disp_source = NULL; + tpl_gmutex_unlock(&wl_vk_display->disp_mutex); if (wl_vk_display->thread) { tpl_gthread_destroy(wl_vk_display->thread); wl_vk_display->thread = NULL; } + tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_vk_display->disp_cond); + tpl_gmutex_clear(&wl_vk_display->disp_mutex); + tpl_gmutex_clear(&wl_vk_display->wl_event_mutex); free(wl_vk_display); @@ -1062,11 +1140,11 @@ _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface) /* tbm_surface_queue will be created at swapchain_create */ wl_vk_surface->vblank = _thread_create_tdm_client_vblank( - wl_vk_display->tdm_client); + wl_vk_display->tdm.tdm_client); if (wl_vk_surface->vblank) { TPL_INFO("[VBLANK_INIT]", "wl_vk_surface(%p) tdm_client(%p) vblank(%p)", - wl_vk_surface, wl_vk_display->tdm_client, + wl_vk_surface, wl_vk_display->tdm.tdm_client, wl_vk_surface->vblank); } @@ -1092,8 +1170,6 @@ _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface) static void _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface) { - tpl_gmutex_lock(&wl_vk_surface->surf_mutex); - TPL_INFO("[SURFACE_FINI]", "wl_vk_surface(%p) wl_surface(%p)", wl_vk_surface, wl_vk_surface->wl_surface); @@ -1120,8 +1196,6 @@ _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface) tdm_client_vblank_destroy(wl_vk_surface->vblank); wl_vk_surface->vblank = NULL; } - - tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } static tpl_bool_t @@ -1177,10 +1251,16 @@ __thread_func_surf_finalize(tpl_gsource *gsource) wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource); TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); - _thread_wl_vk_surface_fini(wl_vk_surface); - + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)", wl_vk_surface, gsource); + + _thread_wl_vk_surface_fini(wl_vk_surface); + + wl_vk_surface->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } static tpl_gsource_functions surf_funcs = { @@ -1237,6 +1317,7 @@ __tpl_wl_vk_surface_init(tpl_surface_t *surface) wl_vk_surface->reset = TPL_FALSE; wl_vk_surface->is_activated = TPL_FALSE; wl_vk_surface->vblank_done = TPL_TRUE; + wl_vk_surface->initialized_in_thread = TPL_FALSE; wl_vk_surface->render_done_cnt = 0; @@ -1266,7 +1347,8 @@ __tpl_wl_vk_surface_init(tpl_surface_t *surface) wl_vk_surface->sent_message = INIT_SURFACE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + while (!wl_vk_surface->initialized_in_thread) + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); TPL_INFO("[SURFACE_INIT]", @@ -1303,9 +1385,18 @@ __tpl_wl_vk_surface_fini(tpl_surface_t *surface) wl_vk_surface->swapchain = NULL; - if (wl_vk_surface->surf_source) + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) { tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE); - wl_vk_surface->surf_source = NULL; + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + } + wl_vk_surface->surf_source = NULL; + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); _print_buffer_lists(wl_vk_surface); @@ -1450,6 +1541,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("buffer count(%d) must be higher than (%d)", swapchain->properties.buffer_count, wl_vk_display->min_buffer); + swapchain->result = TPL_ERROR_INVALID_PARAMETER; return TPL_ERROR_INVALID_PARAMETER; } @@ -1457,12 +1549,14 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("buffer count(%d) must be lower than (%d)", swapchain->properties.buffer_count, wl_vk_display->max_buffer); + swapchain->result = TPL_ERROR_INVALID_PARAMETER; return TPL_ERROR_INVALID_PARAMETER; } if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) { TPL_ERR("Unsupported present_mode(%d)", swapchain->properties.present_mode); + swapchain->result = TPL_ERROR_INVALID_PARAMETER; return TPL_ERROR_INVALID_PARAMETER; } @@ -1504,6 +1598,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) wl_vk_surface->reset = TPL_FALSE; __tpl_util_atomic_inc(&swapchain->ref_cnt); + swapchain->create_done = TPL_TRUE; TPL_INFO("[SWAPCHAIN_REUSE]", "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)", @@ -1538,6 +1633,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) if (!tbm_queue) { TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)", wl_vk_surface); + swapchain->result = TPL_ERROR_OUT_OF_MEMORY; return TPL_ERROR_OUT_OF_MEMORY; } @@ -1547,6 +1643,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)", tbm_queue); tbm_surface_queue_destroy(tbm_queue); + swapchain->result = TPL_ERROR_INVALID_OPERATION; return TPL_ERROR_INVALID_OPERATION; } @@ -1557,6 +1654,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", tbm_queue); tbm_surface_queue_destroy(tbm_queue); + swapchain->result = TPL_ERROR_INVALID_OPERATION; return TPL_ERROR_INVALID_OPERATION; } @@ -1567,10 +1665,12 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", tbm_queue); tbm_surface_queue_destroy(tbm_queue); + swapchain->result = TPL_ERROR_INVALID_OPERATION; return TPL_ERROR_INVALID_OPERATION; } swapchain->tbm_queue = tbm_queue; + swapchain->create_done = TPL_TRUE; TPL_INFO("[TBM_QUEUE_CREATED]", "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)", @@ -1614,6 +1714,9 @@ __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface, swapchain->wl_vk_surface = wl_vk_surface; swapchain->properties.format = format; + swapchain->result = TPL_ERROR_NONE; + swapchain->create_done = TPL_FALSE; + wl_vk_surface->swapchain = swapchain; __tpl_util_atomic_set(&swapchain->ref_cnt, 1); @@ -1623,17 +1726,16 @@ __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface, wl_vk_surface->sent_message = CREATE_QUEUE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE) + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); TPL_CHECK_ON_FALSE_ASSERT_FAIL( swapchain->tbm_queue != NULL, "[CRITICAL FAIL] Failed to create tbm_surface_queue"); - wl_vk_surface->reset = TPL_FALSE; - return TPL_ERROR_NONE; } @@ -1678,6 +1780,12 @@ __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface) return TPL_ERROR_INVALID_OPERATION; } + if (!swapchain->tbm_queue) { + TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.", + wl_vk_surface, wl_vk_surface->swapchain); + return TPL_ERROR_INVALID_OPERATION; + } + if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) { TPL_INFO("[DESTROY_SWAPCHAIN]", "wl_vk_surface(%p) swapchain(%p) still valid.", @@ -1709,7 +1817,8 @@ __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface) wl_vk_surface->sent_message = DESTROY_QUEUE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + while (!swapchain->tbm_queue) + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); _print_buffer_lists(wl_vk_surface); @@ -2439,7 +2548,7 @@ _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface) if (wl_vk_surface->vblank == NULL) { wl_vk_surface->vblank = - _thread_create_tdm_client_vblank(wl_vk_display->tdm_client); + _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client); if (!wl_vk_surface->vblank) { TPL_WARN("Failed to create vblank. wl_vk_surface(%p)", wl_vk_surface); -- 2.7.4 From 62a630689eb0d4dd6974311f5aa024f1c89f54e7 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 29 Sep 2022 20:45:42 +0900 Subject: [PATCH 05/16] Changed the function name properly. tpl_cond_timed_wait -> tpl_gcond_timed_wait Change-Id: Iebe13b6aa7598d652cf3b5968dbc7b2bb96a44e0 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 2 +- src/tpl_utils_gthread.h | 2 +- src/tpl_wl_egl_thread.c | 4 ++-- src/tpl_wl_vk_thread.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 20b6838..20bab7f 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -431,7 +431,7 @@ tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex) } tpl_result_t -tpl_cond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, +tpl_gcond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, int64_t timeout_ms) { gint64 end_time = g_get_monotonic_time() + diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 0237f40..8185cab 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -191,7 +191,7 @@ tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex); * @return tpl_result_t TPL_ERROR_NONE or TPL_ERROR_TIME_OUT */ tpl_result_t -tpl_cond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, int64_t timeout_ms); +tpl_gcond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, int64_t timeout_ms); /** * wrapping g_cond_signal() diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 9fd1e8a..f6c0375 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2117,7 +2117,7 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) if (need_to_wait) { tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond, + wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond, &wl_egl_buffer->mutex, 200); /* 200ms */ tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); @@ -2551,7 +2551,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, "waiting for previous wl_egl_buffer(%p) commit", enqueued_buffer); - wait_result = tpl_cond_timed_wait(&enqueued_buffer->cond, + wait_result = tpl_gcond_timed_wait(&enqueued_buffer->cond, &enqueued_buffer->mutex, 200); /* 200ms */ if (wait_result == TPL_ERROR_TIME_OUT) { diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 7e0e621..884e7c7 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -1049,7 +1049,7 @@ _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface) if (need_to_wait) { tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); - wait_result = tpl_cond_timed_wait(&wl_vk_buffer->cond, + wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond, &wl_vk_buffer->mutex, 16); /* 16ms */ tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); -- 2.7.4 From 7dfe9d051ec17276e10a6f935b45c7c0d63f1770 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 29 Sep 2022 21:04:56 +0900 Subject: [PATCH 06/16] Delete wrong g_main_context_unref - GMainContext is also destroyed when GMainLoop is destroyed. Therefore, it is the wrong code to do context_unref before loop_unref. Change-Id: Ib57eab7ba4345565977102abb0df3268d7f42acb Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 20bab7f..f1dab0c 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -70,12 +70,14 @@ tpl_gthread_create(const char *thread_name, return NULL; } + // context's ref count was increased in g_main_loop_new g_main_context_unref(context); new_thread = calloc(1, sizeof(tpl_gthread)); if (!new_thread) { TPL_ERR("Failed to allocate tpl_gthread"); - g_main_context_unref(context); + + // context is also destroyed when loop is destroyed. g_main_loop_unref(loop); return NULL; -- 2.7.4 From 2ef4882ca7d468582e58e90736c4762ce7313b0e Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 30 Sep 2022 16:23:50 +0900 Subject: [PATCH 07/16] Changed the order of lock/unlock at buffer_clear. - The lock/unlock order of buffer->mutex and display->wl_event_mutex is important. display->mutex must surround buffer->mutex - Before applying this patch, deadlock issue may be occured in buffer_clear. Change-Id: I90d9b36874def4e4c7f0bd36742b3bbd00faab44 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index f6c0375..4434972 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2116,11 +2116,13 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) need_to_wait = (status < COMMITTED); if (need_to_wait) { - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + /* The lock/unlock order of buffer->mutex and display->wl_event_mutex + * is important. display->mutex must surround buffer->mutex */ + tpl_gmutex_unlock(&wl_egl_buffer->mutex); wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond, - &wl_egl_buffer->mutex, - 200); /* 200ms */ - tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); + &wl_egl_display->wl_event_mutex, + 200); /* 200ms */ + tpl_gmutex_lock(&wl_egl_buffer->mutex); if (wait_result == TPL_ERROR_TIME_OUT) TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", -- 2.7.4 From 8325bf951c6608d237629e5eb9411dee6845f7af Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 30 Sep 2022 16:33:54 +0900 Subject: [PATCH 08/16] Modified to call tpl_gsource_destroy only once. - tpl_gsource_destroy means 'sending destroy message to thread'. So it need not to be called in loop. Change-Id: Ifd5706d738a2385396acaa3723138b2f564de4c2 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 4434972..487cf63 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -970,8 +970,9 @@ __tpl_wl_egl_display_init(tpl_display_t *display) free_display: if (wl_egl_display->tdm.tdm_source) { tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); while (!wl_egl_display->tdm.gsource_finalized) { - tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex); } tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); @@ -979,8 +980,9 @@ free_display: if (wl_egl_display->disp_source) { tpl_gmutex_lock(&wl_egl_display->disp_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); while (!wl_egl_display->gsource_finalized) { - tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); } tpl_gmutex_unlock(&wl_egl_display->disp_mutex); @@ -1024,8 +1026,9 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) * caller should use tpl_gcond_wait() in the loop with checking finalized flag * */ tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); while (!wl_egl_display->tdm.gsource_finalized) { - tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex); } wl_egl_display->tdm.tdm_source = NULL; @@ -1038,8 +1041,9 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) * caller should use tpl_gcond_wait() in the loop with checking finalized flag * */ tpl_gmutex_lock(&wl_egl_display->disp_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); while (wl_egl_display->disp_source && !wl_egl_display->gsource_finalized) { - tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); } wl_egl_display->disp_source = NULL; @@ -2200,8 +2204,9 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) * caller should use tpl_gcond_wait() in the loop with checking finalized flag * */ tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); while (wl_egl_surface->surf_source && !wl_egl_surface->gsource_finalized) { - tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); } wl_egl_surface->surf_source = NULL; -- 2.7.4 From 8f0f90756b5f0cd8ef7a6142bfce0269123e903c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 30 Sep 2022 16:35:33 +0900 Subject: [PATCH 09/16] Package version up to 1.9.8 Change-Id: I7a1bda7114b8cf98de0cd8f2f02dfb6b8d2c25fa Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index cb1653c..c4422ec 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 9 -%define TPL_VERSION_PATCH 7 +%define TPL_VERSION_PATCH 8 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 999b176b6427f1e09450bec792b081c636f4370c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 5 Oct 2022 10:49:43 +0900 Subject: [PATCH 10/16] Remove unncessary header Change-Id: I810ec7c149bbb782d8e04167f66810171226d728 Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 884e7c7..11e35f3 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -24,7 +24,6 @@ #endif #if TIZEN_FEATURE_ENABLE -#include #include #endif -- 2.7.4 From 50c990169dcffad91255612a7c2929df3735be97 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 5 Oct 2022 10:57:23 +0900 Subject: [PATCH 11/16] Add null checking before calling tpl_gsource_destroy. Change-Id: I431f21b80215abeafc5ab6daa45778f617cc661a Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 52 ++++++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 487cf63..fb9286f 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1035,19 +1035,21 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); } - /* This is a protection to prevent problems that arise in unexpected situations - * that g_cond_wait cannot work normally. - * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, - * caller should use tpl_gcond_wait() in the loop with checking finalized flag - * */ - tpl_gmutex_lock(&wl_egl_display->disp_mutex); - // Send destroy mesage to thread - tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); - while (wl_egl_display->disp_source && !wl_egl_display->gsource_finalized) { - tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); + if (wl_egl_display->disp_source) { + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + while (!wl_egl_display->gsource_finalized) { + tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); + } + wl_egl_display->disp_source = NULL; + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); } - wl_egl_display->disp_source = NULL; - tpl_gmutex_unlock(&wl_egl_display->disp_mutex); if (wl_egl_display->thread) { tpl_gthread_destroy(wl_egl_display->thread); @@ -2198,19 +2200,21 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) _tpl_wl_egl_surface_buffer_clear(wl_egl_surface); - /* This is a protection to prevent problems that arise in unexpected situations - * that g_cond_wait cannot work normally. - * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, - * caller should use tpl_gcond_wait() in the loop with checking finalized flag - * */ - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - // Send destroy mesage to thread - tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); - while (wl_egl_surface->surf_source && !wl_egl_surface->gsource_finalized) { - tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + if (wl_egl_surface->surf_source) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + while (!wl_egl_surface->gsource_finalized) { + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + } + wl_egl_surface->surf_source = NULL; + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } - wl_egl_surface->surf_source = NULL; - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); _print_buffer_lists(wl_egl_surface); -- 2.7.4 From 0038526f2e4153b1b81b64b7b539998e5e62e5e9 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 5 Oct 2022 10:58:46 +0900 Subject: [PATCH 12/16] Package version up to 1.9.9 Change-Id: I6c7059dae6306e235be00ac9ec8bd1f7d3112136 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index c4422ec..fc4fa15 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 9 -%define TPL_VERSION_PATCH 8 +%define TPL_VERSION_PATCH 9 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 9f60156fde4f33dec22749773f4cb2399aacf12d Mon Sep 17 00:00:00 2001 From: "jinbong, Lee" Date: Wed, 19 Oct 2022 18:58:06 +0900 Subject: [PATCH 13/16] Remove unused int64, int32 hash key for fixing Svace warning Change-Id: I9bb85b566db975860e4243c39683f40e60601aed --- src/tpl_utils.h | 16 ++---------- src/tpl_utils_map.c | 74 ----------------------------------------------------- 2 files changed, 2 insertions(+), 88 deletions(-) diff --git a/src/tpl_utils.h b/src/tpl_utils.h index 21f2580..b7dfa1c 100644 --- a/src/tpl_utils.h +++ b/src/tpl_utils.h @@ -309,7 +309,7 @@ typedef struct _tpl_list_node tpl_list_node_t; typedef struct _tpl_list tpl_list_t; typedef struct tpl_util_map_entry tpl_util_map_entry_t; typedef struct tpl_util_map tpl_util_map_t; -typedef union tpl_util_key tpl_util_key_t; +typedef struct tpl_util_key tpl_util_key_t; typedef int (*tpl_util_hash_func_t)(const tpl_util_key_t key, int key_length); typedef int (*tpl_util_key_length_func_t)(const tpl_util_key_t key); @@ -324,9 +324,7 @@ enum _tpl_occurrence { TPL_ALL }; -union tpl_util_key { - uint32_t key32; - uint64_t key64; +struct tpl_util_key { void *ptr; /*pointer key or user defined key(string)*/ }; @@ -359,12 +357,6 @@ void tpl_util_map_init(tpl_util_map_t *map, int bucket_bits, tpl_util_key_compare_func_t key_compare_func, void *buckets); -void tpl_util_map_int32_init(tpl_util_map_t *map, int bucket_bits, - void *buckets); - -void tpl_util_map_int64_init(tpl_util_map_t *map, int bucket_bits, - void *buckets); - void tpl_util_map_pointer_init(tpl_util_map_t *map, int bucket_bits, void *buckets); @@ -375,10 +367,6 @@ tpl_util_map_create(int bucket_bits, tpl_util_hash_func_t hash_func, tpl_util_key_length_func_t key_length_func, tpl_util_key_compare_func_t key_compare_func); -tpl_util_map_t *tpl_util_map_int32_create(int bucket_bits); - -tpl_util_map_t *tpl_util_map_int64_create(int bucket_bits); - tpl_util_map_t *tpl_util_map_pointer_create(int bucket_bits); void tpl_util_map_destroy(tpl_util_map_t *map); diff --git a/src/tpl_utils_map.c b/src/tpl_utils_map.c index 0336bc4..dbfa74a 100644 --- a/src/tpl_utils_map.c +++ b/src/tpl_utils_map.c @@ -26,52 +26,6 @@ __get_bucket(tpl_util_map_t *map, const tpl_util_key_t key) } static int -__int64_hash(const tpl_util_key_t key, int key_length) -{ - uint64_t _key = key.key64; - - /* Hash functions from Thomas Wang https://gist.github.com/badboy/6267743 */ - _key = ~_key + (_key << 18); - _key ^= _key >> 31; - _key *= 21; - _key ^= _key >> 11; - _key += _key << 6; - _key ^= _key >> 22; - - return (int)_key;; -} - -static int -__int64_key_compare(const tpl_util_key_t key0, int key0_length, - const tpl_util_key_t key1, int key1_length) -{ - return (int)(key0.key64 - key1.key64); -} - -static int -__int32_hash(const tpl_util_key_t key, int key_length) -{ - uint32_t _key = (uint32_t)key.key32; - - /* Hash functions from Thomas Wang https://gist.github.com/badboy/6267743 */ - _key = ~_key + (_key << 15); - _key ^= _key >> 12; - _key += _key << 2; - _key ^= _key >> 4; - _key *= 2057; - _key ^= _key >> 16; - - return (int)_key; -} - -static int -__int32_key_compare(const tpl_util_key_t key0, int key0_length, - const tpl_util_key_t key1, int key1_length) -{ - return (int)(key0.key32 - key1.key32); -} - -static int __pointer_hash(const tpl_util_key_t key, int key_length) { #if INTPTR_MAX == INT32_MAX @@ -129,20 +83,6 @@ tpl_util_map_init(tpl_util_map_t *map, int bucket_bits, } void -tpl_util_map_int32_init(tpl_util_map_t *map, int bucket_bits, void *buckets) -{ - tpl_util_map_init(map, bucket_bits, __int32_hash, NULL, - __int32_key_compare, buckets); -} - -void -tpl_util_map_int64_init(tpl_util_map_t *map, int bucket_bits, void *buckets) -{ - tpl_util_map_init(map, bucket_bits, __int64_hash, NULL, - __int64_key_compare, buckets); -} - -void tpl_util_map_pointer_init(tpl_util_map_t *map, int bucket_bits, void *buckets) { tpl_util_map_init(map, bucket_bits, __pointer_hash, NULL, @@ -174,20 +114,6 @@ tpl_util_map_create(int bucket_bits, tpl_util_hash_func_t hash_func, } tpl_util_map_t * -tpl_util_map_int32_create(int bucket_bits) -{ - return tpl_util_map_create(bucket_bits, __int32_hash, NULL, - __int32_key_compare); -} - -tpl_util_map_t * -tpl_util_map_int64_create(int bucket_bits) -{ - return tpl_util_map_create(bucket_bits, __int64_hash, NULL, - __int64_key_compare); -} - -tpl_util_map_t * tpl_util_map_pointer_create(int bucket_bits) { return tpl_util_map_create(bucket_bits, __pointer_hash, NULL, -- 2.7.4 From ca6f3cf3826ede26e1669dbfe2868a13d4ffed6e Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 27 Oct 2022 20:04:52 +0900 Subject: [PATCH 14/16] wl_vk: Add missed flag setting when surface_init Change-Id: I45e7d3135a6780a1f08b10fe8913fc9ffe71af7f Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 11e35f3..fe0d337 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -1209,6 +1209,7 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) TPL_DEBUG("wl_vk_surface(%p) initialize message received!", wl_vk_surface); _thread_wl_vk_surface_init(wl_vk_surface); + wl_vk_surface->initialized_in_thread = TPL_TRUE; tpl_gcond_signal(&wl_vk_surface->surf_cond); } else if (message == CREATE_QUEUE) { /* Create tbm_surface_queue */ TPL_DEBUG("wl_vk_surface(%p) queue creation message received!", -- 2.7.4 From 3a28f16f753bf42ac47d2dd635e21894fa459c65 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 28 Oct 2022 00:02:58 +0900 Subject: [PATCH 15/16] wl_egl: Add defense code for fake signal in buffer_clear tpl_gcond_timed_wait is changed to be called within the while loop. It will repeat the process of checking status even if it wakes up due to Fake Signal. Change-Id: Ia66810da64ba2830c166f01dcb2f2f4615fc8e4f Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index fb9286f..67c14a1 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2116,19 +2116,17 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) status_to_string[status]); if (status >= ENQUEUED) { - tpl_bool_t need_to_wait = TPL_FALSE; tpl_result_t wait_result = TPL_ERROR_NONE; - need_to_wait = (status < COMMITTED); - - if (need_to_wait) { + while (status < COMMITTED && wait_result != TPL_ERROR_TIME_OUT) { + tpl_gmutex_unlock(&wl_egl_buffer->mutex); /* The lock/unlock order of buffer->mutex and display->wl_event_mutex * is important. display->mutex must surround buffer->mutex */ - tpl_gmutex_unlock(&wl_egl_buffer->mutex); wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond, &wl_egl_display->wl_event_mutex, 200); /* 200ms */ tpl_gmutex_lock(&wl_egl_buffer->mutex); + status = wl_egl_buffer->status; /* update status */ if (wait_result == TPL_ERROR_TIME_OUT) TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", @@ -2136,8 +2134,6 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) } } - status = wl_egl_buffer->status; /* update status */ - /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */ /* It has been acquired but has not yet been released, so this * buffer must be released. */ -- 2.7.4 From b440363836b4c75de49eb5d063e7ef52001ce4fa Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 28 Oct 2022 08:35:20 +0900 Subject: [PATCH 16/16] Package version up to 1.9.10 Change-Id: I43791c514fa7abd274b238d7cdfbc3489e8278eb Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index fc4fa15..3f173a4 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 9 -%define TPL_VERSION_PATCH 9 +%define TPL_VERSION_PATCH 10 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4