From cda120055ddaef4f8f8ad4766829c21b3e81a4a4 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 2 Dec 2022 10:52:44 +0900 Subject: [PATCH 01/16] Package version up to 1.9.14 Change-Id: Id42e31cefc898f35fb9d84a8f949513a77eac515 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index e3f7f86..cc8eeb6 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 9 -%define TPL_VERSION_PATCH 13 +%define TPL_VERSION_PATCH 14 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From cb176af61a52c48c85cce74af397eaedcf65848d Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 2 Dec 2022 16:17:32 +0900 Subject: [PATCH 02/16] utils_gthread: Prevent deadlock caused by wait_idle Deadlock situation: Multi-threaded and multi surfaces for each thread T2: call pause_in_idle()->wait_idle() cond_wait() until idle signaled T1: call wait_idle() wait for lock idle_mutex T2: _thread_idle_cb unlock idle_mutex, set is_idle to TPL_TRUE and wait to lock pause_mutex T1: change is_idle to TPL_FALSE cond_wait() T2: cannot exit from while loop still cond_wait() - is_idle changing should be set TPL_FALSE after cond_wait finished Change-Id: I44292e486a1b9f686ec28bc0ae10ddedf94a48e3 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 208d881..bdc1109 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -475,7 +475,7 @@ _thread_idle_cb(gpointer data) g_mutex_lock(>hread->idle_mutex); gthread->is_idle = TPL_TRUE; - g_cond_signal(>hread->idle_cond); + g_cond_broadcast(>hread->idle_cond); g_mutex_unlock(>hread->idle_mutex); /* If the caller thread of tpl_gthread_wait_idle locked the pause_mutex, @@ -498,7 +498,10 @@ tpl_gthread_wait_idle(tpl_gthread *gthread) TPL_DEBUG("[WAIT IDLE] BEGIN"); g_mutex_lock(>hread->idle_mutex); - gthread->is_idle = TPL_FALSE; + if (gthread->is_idle) { + g_mutex_unlock(>hread->idle_mutex); + return; + } idle_source = g_idle_source_new(); if (idle_source == NULL) { @@ -517,8 +520,7 @@ tpl_gthread_wait_idle(tpl_gthread *gthread) /* 200ms timeout */ end_time = g_get_monotonic_time() + (200 * G_TIME_SPAN_MILLISECOND); - - while (!gthread->is_idle) { + do { ret = g_cond_wait_until(>hread->idle_cond, >hread->idle_mutex, end_time); @@ -526,7 +528,10 @@ tpl_gthread_wait_idle(tpl_gthread *gthread) TPL_ERR("wait_idle timeout!"); break; } - } + } while (!gthread->is_idle); + + gthread->is_idle = TPL_FALSE; + g_mutex_unlock(>hread->idle_mutex); TPL_DEBUG("[WAIT IDLE] END"); -- 2.7.4 From 6d7abc10a4473863b2ad999554ddcbfd6a6c82ee Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 6 Dec 2022 17:51:24 +0900 Subject: [PATCH 03/16] wl_egl: Delete calling wait_idle in surface_fini - tpl_gthread_wait_idle will be called in tpl_gthread_pause_in_idle() tpl_gthread_pause_in_idle will be called in buffer_clear() So, calling wait_idle() is duplicated & meaningless operation Change-Id: I50b2aa2e73f5a22be5860ab3e6be241def2d862f Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 6804f2c..e780787 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2179,8 +2179,6 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) wl_egl_surface, wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue); - tpl_gthread_wait_idle(wl_egl_display->thread); - _tpl_wl_egl_surface_buffer_clear(wl_egl_surface); if (wl_egl_surface->surf_source) { -- 2.7.4 From e7fff1a3fecb076806a7b9c45fdecf3a7f450a84 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 4 Jan 2023 16:29:57 +0900 Subject: [PATCH 04/16] utils_gthread: cancel pause when wait_idle failed - If the tpl_gthread_wait_idle fails with timeout, it should be unlock pause_mutex immediately. Change-Id: If33cc3c0b617ca34bb63d110d927e0dd7c022526 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 56 ++++++++++++++++++++++++++++++++++++------------- src/tpl_utils_gthread.h | 8 +++++-- 2 files changed, 48 insertions(+), 16 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index bdc1109..68c2faf 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -481,33 +481,30 @@ _thread_idle_cb(gpointer data) /* If the caller thread of tpl_gthread_wait_idle locked the pause_mutex, * thread will be paused here until unlock */ g_mutex_lock(>hread->pause_mutex); + gthread->paused = TPL_FALSE; g_mutex_unlock(>hread->pause_mutex); return G_SOURCE_REMOVE; } -void +tpl_result_t tpl_gthread_wait_idle(tpl_gthread *gthread) { - TPL_CHECK_ON_NULL_RETURN(gthread); - GSource *idle_source = NULL; gint64 end_time; gboolean ret = TRUE; + tpl_result_t res = TPL_ERROR_NONE; TPL_DEBUG("[WAIT IDLE] BEGIN"); g_mutex_lock(>hread->idle_mutex); - if (gthread->is_idle) { - g_mutex_unlock(>hread->idle_mutex); - return; - } idle_source = g_idle_source_new(); if (idle_source == NULL) { TPL_WARN("Failed to create and attach idle source"); + res = TPL_ERROR_INVALID_OPERATION; g_mutex_unlock(>hread->idle_mutex); - return; + return res; } g_source_set_priority(idle_source, G_PRIORITY_LOW); @@ -526,6 +523,7 @@ tpl_gthread_wait_idle(tpl_gthread *gthread) end_time); if (!ret) { TPL_ERR("wait_idle timeout!"); + res = TPL_ERROR_TIME_OUT; break; } } while (!gthread->is_idle); @@ -535,16 +533,47 @@ tpl_gthread_wait_idle(tpl_gthread *gthread) g_mutex_unlock(>hread->idle_mutex); TPL_DEBUG("[WAIT IDLE] END"); + + return res; } -void +tpl_bool_t tpl_gthread_pause_in_idle(tpl_gthread *gthread) { - TPL_CHECK_ON_NULL_RETURN(gthread); + TPL_CHECK_ON_NULL_RETURN_VAL(gthread, TPL_FALSE); + + tpl_result_t res; + int cnt = 0; + + /* Assume three threads. (M, C, wl-egl-thread) + * C thread : already locked pause_mutex and doing their own job. + * M thread : call pause_in_idle and trying to lock pause_mutex. + * wl-egl-thread : trying to lock pause_mutex in _thread_idle_cb. + * + * When C thread calls tpl_gthread_continue and unlock pause_mutex, + * M thread may receive schedule and lock pause_mutex. + * In that case, M thread should yield to wl-egl-thread, which is + * paused in thread_idle_cb(). */ + do { + g_mutex_lock(>hread->pause_mutex); + if (gthread->paused) { + g_mutex_unlock(>hread->pause_mutex); + sched_yield(); + } else { + break; + } + } while (++cnt <= 100); - g_mutex_lock(>hread->pause_mutex); - tpl_gthread_wait_idle(gthread); - gthread->paused = TPL_TRUE; + res = tpl_gthread_wait_idle(gthread); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to wait idle. | res(%d)", res); + gthread->paused = TPL_FALSE; + g_mutex_unlock(>hread->pause_mutex); + } else { + gthread->paused = TPL_TRUE; + } + + return gthread->paused; } void @@ -553,6 +582,5 @@ tpl_gthread_continue(tpl_gthread *gthread) TPL_CHECK_ON_NULL_RETURN(gthread); if (!gthread->paused) return; - gthread->paused = TPL_FALSE; g_mutex_unlock(>hread->pause_mutex); } \ No newline at end of file diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 66552c4..c6ae5a4 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -205,16 +205,20 @@ tpl_gcond_signal(tpl_gcond *gcond); * attach idle source and waiting for idle callback * * @param gthread Pointer to tpl_gthread + * + * @return tpl_result_t result of wait_idle */ -void +tpl_result_t tpl_gthread_wait_idle(tpl_gthread *gthread); /** * pause thread when idle callback is called * * @param gthread Pointer to tpl_gthread + * + * @return TPL_TRUE if succeed to pause, TPL_FALSE otherwise. */ -void +tpl_bool_t tpl_gthread_pause_in_idle(tpl_gthread *gthread); /** -- 2.7.4 From f4ecb1ad76e6a1172da49e223cd6f6afe82b6eb5 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 4 Jan 2023 16:49:41 +0900 Subject: [PATCH 05/16] wl_egl_thread: prepare for the failure of pause_in_idle - The calling tpl_gthread_pause_in_idle() move from force_flush() to outside. - Add locking wl_event_mutex after trying tpl_gthread_pause_in_idle. - Locking wl_event_mutex is a secondary means of preparing for the failure of tpl_gthread_pause_in_idle(). If tpl_gthread_pause_in_idle()is successful, locking wl_event_mutex does not affect. Change-Id: I35132da013f67921c0f6deecc0909118461f3872 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index e780787..10ff5f5 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2330,11 +2330,8 @@ __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) tpl_result_t _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) { - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tpl_gthread_pause_in_idle(wl_egl_display->thread); - _print_buffer_lists(wl_egl_surface); if (wl_egl_surface->vblank) { @@ -2350,7 +2347,6 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)", wl_egl_surface->tbm_queue, tsq_err); - tpl_gthread_continue(wl_egl_display->thread); return TPL_ERROR_INVALID_OPERATION; } @@ -2378,8 +2374,6 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) _print_buffer_lists(wl_egl_surface); - tpl_gthread_continue(wl_egl_display->thread); - return TPL_ERROR_NONE; } @@ -2550,13 +2544,25 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", wl_egl_surface->tbm_queue, surface); + + tpl_gthread_pause_in_idle(wl_egl_display->thread); + /* Locking wl_event_mutex is a secondary means of preparing for + * the failure of tpl_gthread_pause_in_idle(). + * If tpl_gthread_pause_in_idle()is successful, + * locking wl_event_mutex does not affect. */ + tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) { TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)", wl_egl_surface->tbm_queue, surface); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + tpl_gthread_continue(wl_egl_display->thread); return NULL; } else { tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; } + + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + tpl_gthread_continue(wl_egl_display->thread); } if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { -- 2.7.4 From 5b989cb1a0f883b59f0513628f0a344f249e26d6 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 12 Jan 2023 16:16:50 +0900 Subject: [PATCH 06/16] wl_egl: support for frontbuffer mode set by surface - Patch for supporting this API. wl_egl_window_tizen_set_frontbuffer_mode() AS-IS: - If the application, which wants to use frontbuffer rendering, sets the frontbuffer mode using setenv(), EGL driver checks whether frontbuffer mode is set using getenv() and if it true calls tpl_surface_set_frontbuffer_mode(). PROBLEMS: - The operation using setenv()/getenv() is not thread safe. - Using env value to set frontbuffer mode cannot manage on/off in runtime. - Using env value to set frontbuffer mode cannot set by surface. TO-BE: - Application would be better to use this API wl_egl_window_tizen_set_frontbuffer_mode() - This API makes supports the setting of frontbuffer mode to the desired surface(window). - This API gurantee thread safety using tpl_surface object lock. - Using this API can help application to turn on/off the frontbuffer mode in runtime. Change-Id: I608309869dcb9d0bd0ba42c7e54afee6da1b5e04 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 92 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 72 insertions(+), 20 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 10ff5f5..681b8ac 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1402,6 +1402,37 @@ __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) return commit_sync_fd; } +static void +__cb_client_window_set_frontbuffer_mode(struct wl_egl_window *wl_egl_window, + void *private, int set) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + tpl_surface_t *surface = wl_egl_surface->tpl_surface; + TPL_CHECK_ON_NULL_RETURN(surface); + + tpl_bool_t is_frontbuffer_mode = set ? TPL_TRUE : TPL_FALSE; + + TPL_OBJECT_LOCK(surface); + if (is_frontbuffer_mode == surface->is_frontbuffer_mode) { + TPL_OBJECT_UNLOCK(surface); + return; + } + + TPL_INFO("[FRONTBUFFER_MODE]", + "[%s] wl_egl_surface(%p) wl_egl_window(%p)", + is_frontbuffer_mode ? "ON" : "OFF", + wl_egl_surface, wl_egl_window); + + surface->is_frontbuffer_mode = is_frontbuffer_mode; + + TPL_OBJECT_UNLOCK(surface); +} + #if TIZEN_FEATURE_ENABLE static int __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private) @@ -1807,6 +1838,7 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) tizen_private->set_window_serial_callback = (void *) __cb_set_window_serial_callback; tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd; + tizen_private->set_frontbuffer_callback = (void *)__cb_client_window_set_frontbuffer_mode; #if TIZEN_FEATURE_ENABLE tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd; #else @@ -2597,14 +2629,32 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wl_egl_surface->width = surface->width; wl_egl_surface->height = surface->height; - if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) { - /* If surface->frontbuffer is already set in frontbuffer mode, - * it will return that frontbuffer if it is still activated, - * otherwise dequeue the new buffer after initializing - * surface->frontbuffer to NULL. */ - if (is_activated && !wl_egl_surface->reset) { - bo_name = _get_tbm_surface_bo_name(surface->frontbuffer); + /* If surface->frontbuffer is not null, the frontbuffer rendering mode will be + * maintained if the surface state meets the conditions below. + * 1. surface->is_frontbuffer_mode == TPL_TRUE + * - It may be changed to true or false by calling + * tpl_surface_set_frontbuffer_mode(will be deprecated) + * or + * wl_egl_window_tizen_set_frontbuffer_mode (recommanded) + * 2. is_activated == TPL_TRUE + * - To check wheter direct display is possible. + * 3. wl_egl_surface->reset == TPL_FALSE + * - tbm_queue reset should not have occured due to window resize. + * If surface is not satisfied with any of above conditions, + * frontbuffer rendering will be stopped and surface->frontbuffer becomes null. + * */ + if (surface->frontbuffer) { + if (!surface->is_frontbuffer_mode || + !is_activated || + wl_egl_surface->reset) { + surface->frontbuffer = NULL; + wl_egl_surface->need_to_enqueue = TPL_TRUE; + TPL_INFO("[FRONTBUFFER RENDERING STOP]", + "wl_egl_surface(%p) wl_egl_window(%p)", + wl_egl_surface, wl_egl_surface->wl_egl_window); + } else { + bo_name = _get_tbm_surface_bo_name(surface->frontbuffer); TPL_LOG_T("WL_EGL", "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", surface->frontbuffer, bo_name); @@ -2613,12 +2663,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, bo_name); tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return surface->frontbuffer; - } else { - surface->frontbuffer = NULL; - wl_egl_surface->need_to_enqueue = TPL_TRUE; } - } else { - surface->frontbuffer = NULL; } tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue, @@ -2656,8 +2701,15 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, } } - if (surface->is_frontbuffer_mode && is_activated) + if (surface->is_frontbuffer_mode && is_activated) { + if (surface->frontbuffer == NULL) { + TPL_INFO("[FRONTBUFFER RENDERING START]", + "wl_egl_surface(%p) wl_egl_window(%p) bo(%d)", + wl_egl_surface, wl_egl_surface->wl_egl_window, + _get_tbm_surface_bo_name(tbm_surface)); + } surface->frontbuffer = tbm_surface; + } wl_egl_surface->reset = TPL_FALSE; @@ -2772,8 +2824,13 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!wl_egl_surface->need_to_enqueue || !wl_egl_buffer->need_to_commit) { - TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", - ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); + + if (acquire_fence != -1) { + close(acquire_fence); + acquire_fence = -1; + } + TPL_LOG_T("FRONTBUFFER_MODE", "[ENQ_SKIP] tbm_surface(%p) bo(%d) need not to enqueue", + tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); tpl_gmutex_unlock(&wl_egl_buffer->mutex); return TPL_ERROR_NONE; @@ -2790,11 +2847,6 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, */ if (surface->frontbuffer == tbm_surface) wl_egl_surface->need_to_enqueue = TPL_FALSE; - - if (acquire_fence != -1) { - close(acquire_fence); - acquire_fence = -1; - } } if (wl_egl_buffer->acquire_fence_fd != -1) -- 2.7.4 From 8473be15ace81d687dbe7fe06271573e8255fd1c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 12 Jan 2023 17:15:20 +0900 Subject: [PATCH 07/16] wayland-egl-tizen: Add description of set_frontbuffer_mode Change-Id: Ic9e966c1b7b7f4064996765cb5e4f63cd55f813a Signed-off-by: Joonbum Ko --- src/wayland-egl-tizen/wayland-egl-tizen.h | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/wayland-egl-tizen/wayland-egl-tizen.h b/src/wayland-egl-tizen/wayland-egl-tizen.h index 206f632..2cb0d25 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen.h +++ b/src/wayland-egl-tizen/wayland-egl-tizen.h @@ -58,6 +58,27 @@ void wl_egl_window_tizen_set_buffer_transform(struct wl_egl_window *egl_window, int wl_output_transform); +/** + * Set/unset frontbuffer rendering mode. + * + * It makes be able to set frontbuffer rendering mode + * by passed wl_egl_window. + * Even if caller set the frontbuffer mode, it works only if + * frontbuffer rendering is possible. + * Even if frontbuffer rendering is impossible, + * the set value is maintained, and when it is possible, + * it operates as frontbuffer mode. + * If the wl_egl_window does not have to use the fronbuffer mode, + * client can call it with set=0. + * + * Important * + * It is recommaned to call before draw call of every frame begins. + * + * @param egl_window handle to wl_egl_window. + * @param set 1 if user wants to set wl_egl_window to frontbuffer mode. + * 0 if user wants to unset frontbuffer mode. + * default : 0 + */ void wl_egl_window_tizen_set_frontbuffer_mode(struct wl_egl_window *egl_window, int set); @@ -73,7 +94,6 @@ void wl_egl_window_tizen_set_window_serial(struct wl_egl_window *egl_window, unsigned int serial); -/* temporary APIs for testing sync feature */ /** * Create a sync fence fd that can tell wl_surface_commit done. * -- 2.7.4 From c32a3c244a9bc923b56aaade266be6a6f72dd9bd Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 12 Jan 2023 17:15:40 +0900 Subject: [PATCH 08/16] Package version up to 1.10.1 Change-Id: I8018c7726952577eda15361b938c722e09f7b13c Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index cc8eeb6..d62c33c 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -3,8 +3,8 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 -%define TPL_VERSION_MINOR 9 -%define TPL_VERSION_PATCH 14 +%define TPL_VERSION_MINOR 10 +%define TPL_VERSION_PATCH 1 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 2325eb727a61308e6b0da7eb77afc39440dbeb46 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 27 Jan 2023 20:08:05 +0900 Subject: [PATCH 09/16] Add new API checks if fence sync is available /** * Check the surface can support fence sync mechanism. * * It is recommended that checking fence sync is available * for every frame because the results may change depending on * frontbuffer rendering is activated or not. * * @param surface surface to check fence sync is available. * @return TPL_TRUE if tpl_surface can support it. */ tpl_bool_t tpl_surface_fence_sync_is_available(tpl_surface_t *surface); - This API helps DDK to determine whether to deliver the acquire_fence to signal the render complete when call the surface_enqueue. - In backend where waiting fence is not implemented, the result of fixed to TPL_FALSE will be returned. - The result from the backend with the waiting fence implementation depends on whether the frontbuffer rendering is activated. Change-Id: I779718fdc7e8efc7890e17b0d4df4d81974a7907 Signed-off-by: Joonbum Ko --- src/tpl.h | 13 +++++++++++++ src/tpl_internal.h | 1 + src/tpl_surface.c | 19 +++++++++++++++++++ 3 files changed, 33 insertions(+) diff --git a/src/tpl.h b/src/tpl.h index 7250315..aea3770 100644 --- a/src/tpl.h +++ b/src/tpl.h @@ -842,6 +842,19 @@ tpl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface); /** + * Check the surface can support fence sync mechanism. + * + * It is recommended that checking fence sync is available + * for every frame because the results may change depending on + * frontbuffer rendering is activated or not. + * + * @param surface surface to check fence sync is available. + * @return TPL_TRUE if tpl_surface can support it. + */ +tpl_bool_t +tpl_surface_fence_sync_is_available(tpl_surface_t *surface); + +/** * Present mode types. * * @TPL_DISPLAY_MODE_IMMEDIATE_KHR: The presentation engine does not wait for diff --git a/src/tpl_internal.h b/src/tpl_internal.h index 0fa3988..3f31a45 100755 --- a/src/tpl_internal.h +++ b/src/tpl_internal.h @@ -113,6 +113,7 @@ struct _tpl_surface_backend { tpl_result_t (*set_post_interval)(tpl_surface_t *surface, int post_interval); void (*get_size)(tpl_surface_t *surface, int *width, int *height); + tpl_bool_t (*fence_sync_is_available)(tpl_surface_t *surface); }; struct _tpl_object { diff --git a/src/tpl_surface.c b/src/tpl_surface.c index e05009e..9bed148 100755 --- a/src/tpl_surface.c +++ b/src/tpl_surface.c @@ -572,3 +572,22 @@ tpl_surface_set_rotation_capability(tpl_surface_t *surface, tpl_bool_t set) return ret; } + +tpl_bool_t +tpl_surface_fence_sync_is_available(tpl_surface_t *surface) +{ + tpl_bool_t ret = TPL_FALSE; + + if (!surface || (surface->type != TPL_SURFACE_TYPE_WINDOW)) { + TPL_ERR("Invalid surface!"); + return ret; + } + + TPL_OBJECT_LOCK(surface); + if (surface->backend.fence_sync_is_available) + ret = surface->backend.fence_sync_is_available(surface); + + TPL_OBJECT_UNLOCK(surface); + + return ret; +} -- 2.7.4 From 375b62b16fa791832b95023cfc255a6f7021c385 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 27 Jan 2023 20:16:44 +0900 Subject: [PATCH 10/16] wl_egl: Implement the backend function of fence_sync_is_available Change-Id: I7e6a6891ff12b6869e66cf3a2d5f64098b04cb94 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 681b8ac..032c9dc 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -152,6 +152,7 @@ struct _tpl_wl_egl_surface { tpl_bool_t vblank_done; tpl_bool_t set_serial_is_used; tpl_bool_t initialized_in_thread; + tpl_bool_t frontbuffer_activated; /* To make sure that tpl_gsource has been successfully finalized. */ tpl_bool_t gsource_finalized; @@ -1799,6 +1800,7 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->set_serial_is_used = TPL_FALSE; wl_egl_surface->gsource_finalized = TPL_FALSE; wl_egl_surface->initialized_in_thread = TPL_FALSE; + wl_egl_surface->frontbuffer_activated = TPL_FALSE; wl_egl_surface->latest_transform = -1; wl_egl_surface->render_done_cnt = 0; @@ -2357,6 +2359,15 @@ __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); } +static tpl_bool_t +__tpl_wl_egl_surface_fence_sync_is_available(tpl_surface_t *surface) +{ + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + + return !wl_egl_surface->frontbuffer_activated; +} + #define CAN_DEQUEUE_TIMEOUT_MS 10000 tpl_result_t @@ -2650,6 +2661,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wl_egl_surface->reset) { surface->frontbuffer = NULL; wl_egl_surface->need_to_enqueue = TPL_TRUE; + wl_egl_surface->frontbuffer_activated = TPL_FALSE; TPL_INFO("[FRONTBUFFER RENDERING STOP]", "wl_egl_surface(%p) wl_egl_window(%p)", wl_egl_surface, wl_egl_surface->wl_egl_window); @@ -2661,6 +2673,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + wl_egl_surface->frontbuffer_activated = TPL_TRUE; tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return surface->frontbuffer; } @@ -3643,6 +3656,8 @@ __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend) __tpl_wl_egl_surface_set_post_interval; backend->get_size = __tpl_wl_egl_surface_get_size; + backend->fence_sync_is_available = + __tpl_wl_egl_surface_fence_sync_is_available; } static void -- 2.7.4 From 8dfd4657161a4bf24f24376d06326834bf75116b Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 17 Mar 2023 19:45:17 +0900 Subject: [PATCH 11/16] Package version up to 1.10.2 Change-Id: Ibac91ed8fd994c1882d9433bc4dbedfd20cfdf21 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index d62c33c..0576a1b 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 10 -%define TPL_VERSION_PATCH 1 +%define TPL_VERSION_PATCH 2 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From d071a1c9a30e96f11bbca25a57e2adea2a418e22 Mon Sep 17 00:00:00 2001 From: "jinbong, Lee" Date: Fri, 17 Mar 2023 19:27:02 +0900 Subject: [PATCH 12/16] wl_egl : remive wl_egl_buffer in vblank's waiting buffers when wl_egl_buffer is freed - Bug fix . if __cb_wl_egl_buffer_free is called then some wl_egl_buffer can be in waiting buffers for vblank. so it must be removed it before wl_egl_buffer is freed. Change-Id: Ia08a7ecc6dcfd8f9d63e644b7cb733740672666b --- src/tpl_wl_egl_thread.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 681b8ac..113a8a7 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -3661,6 +3661,14 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) } tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + if (wl_egl_surface->vblank) { + tpl_gmutex_lock(&wl_egl_surface->vblank->mutex); + if (wl_egl_surface->vblank->waiting_buffers) + __tpl_list_remove_data(wl_egl_surface->vblank->waiting_buffers, (void *)wl_egl_buffer, + TPL_FIRST, NULL); + tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex); + } + if (wl_egl_display) { if (wl_egl_buffer->wl_buffer) { wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, -- 2.7.4 From 0cfe70380f82a241913b0c504d8ec0ee7222b196 Mon Sep 17 00:00:00 2001 From: Xuelian Bai Date: Tue, 21 Mar 2023 14:56:27 +0800 Subject: [PATCH 13/16] Fix memory leak Wait until tbm_queue is destroyed, on pixel phone swapchain is destroyed before tbm_queue is destroyed, then tbm_queue is never destroyed, then cause memory leak Change-Id: I897be13684f0e1f901aad751111c1e469414e178 --- src/tpl_wl_vk_thread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index fe0d337..341360f 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -1817,7 +1817,7 @@ __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface) wl_vk_surface->sent_message = DESTROY_QUEUE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - while (!swapchain->tbm_queue) + while (swapchain->tbm_queue) tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); -- 2.7.4 From 657f745288a970f36027a2f8c91842687a138ec0 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 14 Apr 2023 19:00:36 +0900 Subject: [PATCH 14/16] wl_egl: Initialize vblank_done flag when dequeue timeout occured - Rarely, even if client cannot receive vblank event from tdm server, timeout may occur in can_dequeue. - In this case, if wl_egl_surface->vblank_done flag is not initialized to TPL_TRUE, problem situation may occur waiting vblank without calling tdm_client_vblank_wait after force_flush(can_dequeue_timeout). Change-Id: If3f8eee13b5ae91a3728f189f53aa25720696b12 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 13b7404..16df22e 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2604,6 +2604,8 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; } + wl_egl_surface->vblank_done = TPL_TRUE; + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); tpl_gthread_continue(wl_egl_display->thread); } -- 2.7.4 From a3466ec97f5240eea7b0551ea4582bfb6ea841f7 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 2 Jun 2023 13:15:03 +0900 Subject: [PATCH 15/16] wl_egl/vk: use new tdm API to handle pending events. refer to https://review.tizen.org/gerrit/#/c/platform/core/uifw/libtdm/+/293679/ client: Add new API to handle pending events fb76bf4 Change-Id: I036bc7b9f704b9f1bd250a6c651d5c90a9d94a65 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 5 +++++ src/tpl_wl_vk_thread.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 16df22e..7a57437 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1987,6 +1987,11 @@ _thread_create_tdm_client_vblank(tdm_client *tdm_client) return NULL; } + tdm_err = tdm_client_handle_pending_events(tdm_client); + if (tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err); + } + tdm_client_vblank_set_enable_fake(tdm_vblank, 1); tdm_client_vblank_set_sync(tdm_vblank, 0); diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 341360f..317d005 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -1125,6 +1125,11 @@ _thread_create_tdm_client_vblank(tdm_client *tdm_client) return NULL; } + tdm_err = tdm_client_handle_pending_events(tdm_client); + if (tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err); + } + tdm_client_vblank_set_enable_fake(vblank, 1); tdm_client_vblank_set_sync(vblank, 0); -- 2.7.4 From f4e7cbee1f3249f96c2a56bd765afbd7f0b9f092 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 2 Jun 2023 13:21:49 +0900 Subject: [PATCH 16/16] Package version up to 1.10.3 Change-Id: I4f29af23350c5f5c8ffcea997021256f579f93b1 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index 0576a1b..ba39ffb 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 10 -%define TPL_VERSION_PATCH 2 +%define TPL_VERSION_PATCH 3 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4