From 6d7abc10a4473863b2ad999554ddcbfd6a6c82ee Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 6 Dec 2022 17:51:24 +0900 Subject: [PATCH 01/16] wl_egl: Delete calling wait_idle in surface_fini - tpl_gthread_wait_idle will be called in tpl_gthread_pause_in_idle() tpl_gthread_pause_in_idle will be called in buffer_clear() So, calling wait_idle() is duplicated & meaningless operation Change-Id: I50b2aa2e73f5a22be5860ab3e6be241def2d862f Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 6804f2c..e780787 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2179,8 +2179,6 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) wl_egl_surface, wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue); - tpl_gthread_wait_idle(wl_egl_display->thread); - _tpl_wl_egl_surface_buffer_clear(wl_egl_surface); if (wl_egl_surface->surf_source) { -- 2.7.4 From e7fff1a3fecb076806a7b9c45fdecf3a7f450a84 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 4 Jan 2023 16:29:57 +0900 Subject: [PATCH 02/16] utils_gthread: cancel pause when wait_idle failed - If the tpl_gthread_wait_idle fails with timeout, it should be unlock pause_mutex immediately. Change-Id: If33cc3c0b617ca34bb63d110d927e0dd7c022526 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 56 ++++++++++++++++++++++++++++++++++++------------- src/tpl_utils_gthread.h | 8 +++++-- 2 files changed, 48 insertions(+), 16 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index bdc1109..68c2faf 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -481,33 +481,30 @@ _thread_idle_cb(gpointer data) /* If the caller thread of tpl_gthread_wait_idle locked the pause_mutex, * thread will be paused here until unlock */ g_mutex_lock(>hread->pause_mutex); + gthread->paused = TPL_FALSE; g_mutex_unlock(>hread->pause_mutex); return G_SOURCE_REMOVE; } -void +tpl_result_t tpl_gthread_wait_idle(tpl_gthread *gthread) { - TPL_CHECK_ON_NULL_RETURN(gthread); - GSource *idle_source = NULL; gint64 end_time; gboolean ret = TRUE; + tpl_result_t res = TPL_ERROR_NONE; TPL_DEBUG("[WAIT IDLE] BEGIN"); g_mutex_lock(>hread->idle_mutex); - if (gthread->is_idle) { - g_mutex_unlock(>hread->idle_mutex); - return; - } idle_source = g_idle_source_new(); if (idle_source == NULL) { TPL_WARN("Failed to create and attach idle source"); + res = TPL_ERROR_INVALID_OPERATION; g_mutex_unlock(>hread->idle_mutex); - return; + return res; } g_source_set_priority(idle_source, G_PRIORITY_LOW); @@ -526,6 +523,7 @@ tpl_gthread_wait_idle(tpl_gthread *gthread) end_time); if (!ret) { TPL_ERR("wait_idle timeout!"); + res = TPL_ERROR_TIME_OUT; break; } } while (!gthread->is_idle); @@ -535,16 +533,47 @@ tpl_gthread_wait_idle(tpl_gthread *gthread) g_mutex_unlock(>hread->idle_mutex); TPL_DEBUG("[WAIT IDLE] END"); + + return res; } -void +tpl_bool_t tpl_gthread_pause_in_idle(tpl_gthread *gthread) { - TPL_CHECK_ON_NULL_RETURN(gthread); + TPL_CHECK_ON_NULL_RETURN_VAL(gthread, TPL_FALSE); + + tpl_result_t res; + int cnt = 0; + + /* Assume three threads. (M, C, wl-egl-thread) + * C thread : already locked pause_mutex and doing their own job. + * M thread : call pause_in_idle and trying to lock pause_mutex. + * wl-egl-thread : trying to lock pause_mutex in _thread_idle_cb. + * + * When C thread calls tpl_gthread_continue and unlock pause_mutex, + * M thread may receive schedule and lock pause_mutex. + * In that case, M thread should yield to wl-egl-thread, which is + * paused in thread_idle_cb(). */ + do { + g_mutex_lock(>hread->pause_mutex); + if (gthread->paused) { + g_mutex_unlock(>hread->pause_mutex); + sched_yield(); + } else { + break; + } + } while (++cnt <= 100); - g_mutex_lock(>hread->pause_mutex); - tpl_gthread_wait_idle(gthread); - gthread->paused = TPL_TRUE; + res = tpl_gthread_wait_idle(gthread); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to wait idle. | res(%d)", res); + gthread->paused = TPL_FALSE; + g_mutex_unlock(>hread->pause_mutex); + } else { + gthread->paused = TPL_TRUE; + } + + return gthread->paused; } void @@ -553,6 +582,5 @@ tpl_gthread_continue(tpl_gthread *gthread) TPL_CHECK_ON_NULL_RETURN(gthread); if (!gthread->paused) return; - gthread->paused = TPL_FALSE; g_mutex_unlock(>hread->pause_mutex); } \ No newline at end of file diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 66552c4..c6ae5a4 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -205,16 +205,20 @@ tpl_gcond_signal(tpl_gcond *gcond); * attach idle source and waiting for idle callback * * @param gthread Pointer to tpl_gthread + * + * @return tpl_result_t result of wait_idle */ -void +tpl_result_t tpl_gthread_wait_idle(tpl_gthread *gthread); /** * pause thread when idle callback is called * * @param gthread Pointer to tpl_gthread + * + * @return TPL_TRUE if succeed to pause, TPL_FALSE otherwise. */ -void +tpl_bool_t tpl_gthread_pause_in_idle(tpl_gthread *gthread); /** -- 2.7.4 From f4ecb1ad76e6a1172da49e223cd6f6afe82b6eb5 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 4 Jan 2023 16:49:41 +0900 Subject: [PATCH 03/16] wl_egl_thread: prepare for the failure of pause_in_idle - The calling tpl_gthread_pause_in_idle() move from force_flush() to outside. - Add locking wl_event_mutex after trying tpl_gthread_pause_in_idle. - Locking wl_event_mutex is a secondary means of preparing for the failure of tpl_gthread_pause_in_idle(). If tpl_gthread_pause_in_idle()is successful, locking wl_event_mutex does not affect. Change-Id: I35132da013f67921c0f6deecc0909118461f3872 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index e780787..10ff5f5 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2330,11 +2330,8 @@ __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) tpl_result_t _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) { - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tpl_gthread_pause_in_idle(wl_egl_display->thread); - _print_buffer_lists(wl_egl_surface); if (wl_egl_surface->vblank) { @@ -2350,7 +2347,6 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)", wl_egl_surface->tbm_queue, tsq_err); - tpl_gthread_continue(wl_egl_display->thread); return TPL_ERROR_INVALID_OPERATION; } @@ -2378,8 +2374,6 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) _print_buffer_lists(wl_egl_surface); - tpl_gthread_continue(wl_egl_display->thread); - return TPL_ERROR_NONE; } @@ -2550,13 +2544,25 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", wl_egl_surface->tbm_queue, surface); + + tpl_gthread_pause_in_idle(wl_egl_display->thread); + /* Locking wl_event_mutex is a secondary means of preparing for + * the failure of tpl_gthread_pause_in_idle(). + * If tpl_gthread_pause_in_idle()is successful, + * locking wl_event_mutex does not affect. */ + tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) { TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)", wl_egl_surface->tbm_queue, surface); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + tpl_gthread_continue(wl_egl_display->thread); return NULL; } else { tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; } + + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + tpl_gthread_continue(wl_egl_display->thread); } if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { -- 2.7.4 From 5b989cb1a0f883b59f0513628f0a344f249e26d6 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 12 Jan 2023 16:16:50 +0900 Subject: [PATCH 04/16] wl_egl: support for frontbuffer mode set by surface - Patch for supporting this API. wl_egl_window_tizen_set_frontbuffer_mode() AS-IS: - If the application, which wants to use frontbuffer rendering, sets the frontbuffer mode using setenv(), EGL driver checks whether frontbuffer mode is set using getenv() and if it true calls tpl_surface_set_frontbuffer_mode(). PROBLEMS: - The operation using setenv()/getenv() is not thread safe. - Using env value to set frontbuffer mode cannot manage on/off in runtime. - Using env value to set frontbuffer mode cannot set by surface. TO-BE: - Application would be better to use this API wl_egl_window_tizen_set_frontbuffer_mode() - This API makes supports the setting of frontbuffer mode to the desired surface(window). - This API gurantee thread safety using tpl_surface object lock. - Using this API can help application to turn on/off the frontbuffer mode in runtime. Change-Id: I608309869dcb9d0bd0ba42c7e54afee6da1b5e04 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 92 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 72 insertions(+), 20 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 10ff5f5..681b8ac 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1402,6 +1402,37 @@ __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) return commit_sync_fd; } +static void +__cb_client_window_set_frontbuffer_mode(struct wl_egl_window *wl_egl_window, + void *private, int set) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + tpl_surface_t *surface = wl_egl_surface->tpl_surface; + TPL_CHECK_ON_NULL_RETURN(surface); + + tpl_bool_t is_frontbuffer_mode = set ? TPL_TRUE : TPL_FALSE; + + TPL_OBJECT_LOCK(surface); + if (is_frontbuffer_mode == surface->is_frontbuffer_mode) { + TPL_OBJECT_UNLOCK(surface); + return; + } + + TPL_INFO("[FRONTBUFFER_MODE]", + "[%s] wl_egl_surface(%p) wl_egl_window(%p)", + is_frontbuffer_mode ? "ON" : "OFF", + wl_egl_surface, wl_egl_window); + + surface->is_frontbuffer_mode = is_frontbuffer_mode; + + TPL_OBJECT_UNLOCK(surface); +} + #if TIZEN_FEATURE_ENABLE static int __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private) @@ -1807,6 +1838,7 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) tizen_private->set_window_serial_callback = (void *) __cb_set_window_serial_callback; tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd; + tizen_private->set_frontbuffer_callback = (void *)__cb_client_window_set_frontbuffer_mode; #if TIZEN_FEATURE_ENABLE tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd; #else @@ -2597,14 +2629,32 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wl_egl_surface->width = surface->width; wl_egl_surface->height = surface->height; - if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) { - /* If surface->frontbuffer is already set in frontbuffer mode, - * it will return that frontbuffer if it is still activated, - * otherwise dequeue the new buffer after initializing - * surface->frontbuffer to NULL. */ - if (is_activated && !wl_egl_surface->reset) { - bo_name = _get_tbm_surface_bo_name(surface->frontbuffer); + /* If surface->frontbuffer is not null, the frontbuffer rendering mode will be + * maintained if the surface state meets the conditions below. + * 1. surface->is_frontbuffer_mode == TPL_TRUE + * - It may be changed to true or false by calling + * tpl_surface_set_frontbuffer_mode(will be deprecated) + * or + * wl_egl_window_tizen_set_frontbuffer_mode (recommanded) + * 2. is_activated == TPL_TRUE + * - To check wheter direct display is possible. + * 3. wl_egl_surface->reset == TPL_FALSE + * - tbm_queue reset should not have occured due to window resize. + * If surface is not satisfied with any of above conditions, + * frontbuffer rendering will be stopped and surface->frontbuffer becomes null. + * */ + if (surface->frontbuffer) { + if (!surface->is_frontbuffer_mode || + !is_activated || + wl_egl_surface->reset) { + surface->frontbuffer = NULL; + wl_egl_surface->need_to_enqueue = TPL_TRUE; + TPL_INFO("[FRONTBUFFER RENDERING STOP]", + "wl_egl_surface(%p) wl_egl_window(%p)", + wl_egl_surface, wl_egl_surface->wl_egl_window); + } else { + bo_name = _get_tbm_surface_bo_name(surface->frontbuffer); TPL_LOG_T("WL_EGL", "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", surface->frontbuffer, bo_name); @@ -2613,12 +2663,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, bo_name); tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return surface->frontbuffer; - } else { - surface->frontbuffer = NULL; - wl_egl_surface->need_to_enqueue = TPL_TRUE; } - } else { - surface->frontbuffer = NULL; } tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue, @@ -2656,8 +2701,15 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, } } - if (surface->is_frontbuffer_mode && is_activated) + if (surface->is_frontbuffer_mode && is_activated) { + if (surface->frontbuffer == NULL) { + TPL_INFO("[FRONTBUFFER RENDERING START]", + "wl_egl_surface(%p) wl_egl_window(%p) bo(%d)", + wl_egl_surface, wl_egl_surface->wl_egl_window, + _get_tbm_surface_bo_name(tbm_surface)); + } surface->frontbuffer = tbm_surface; + } wl_egl_surface->reset = TPL_FALSE; @@ -2772,8 +2824,13 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!wl_egl_surface->need_to_enqueue || !wl_egl_buffer->need_to_commit) { - TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", - ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); + + if (acquire_fence != -1) { + close(acquire_fence); + acquire_fence = -1; + } + TPL_LOG_T("FRONTBUFFER_MODE", "[ENQ_SKIP] tbm_surface(%p) bo(%d) need not to enqueue", + tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); tpl_gmutex_unlock(&wl_egl_buffer->mutex); return TPL_ERROR_NONE; @@ -2790,11 +2847,6 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, */ if (surface->frontbuffer == tbm_surface) wl_egl_surface->need_to_enqueue = TPL_FALSE; - - if (acquire_fence != -1) { - close(acquire_fence); - acquire_fence = -1; - } } if (wl_egl_buffer->acquire_fence_fd != -1) -- 2.7.4 From 8473be15ace81d687dbe7fe06271573e8255fd1c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 12 Jan 2023 17:15:20 +0900 Subject: [PATCH 05/16] wayland-egl-tizen: Add description of set_frontbuffer_mode Change-Id: Ic9e966c1b7b7f4064996765cb5e4f63cd55f813a Signed-off-by: Joonbum Ko --- src/wayland-egl-tizen/wayland-egl-tizen.h | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/wayland-egl-tizen/wayland-egl-tizen.h b/src/wayland-egl-tizen/wayland-egl-tizen.h index 206f632..2cb0d25 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen.h +++ b/src/wayland-egl-tizen/wayland-egl-tizen.h @@ -58,6 +58,27 @@ void wl_egl_window_tizen_set_buffer_transform(struct wl_egl_window *egl_window, int wl_output_transform); +/** + * Set/unset frontbuffer rendering mode. + * + * It makes be able to set frontbuffer rendering mode + * by passed wl_egl_window. + * Even if caller set the frontbuffer mode, it works only if + * frontbuffer rendering is possible. + * Even if frontbuffer rendering is impossible, + * the set value is maintained, and when it is possible, + * it operates as frontbuffer mode. + * If the wl_egl_window does not have to use the fronbuffer mode, + * client can call it with set=0. + * + * Important * + * It is recommaned to call before draw call of every frame begins. + * + * @param egl_window handle to wl_egl_window. + * @param set 1 if user wants to set wl_egl_window to frontbuffer mode. + * 0 if user wants to unset frontbuffer mode. + * default : 0 + */ void wl_egl_window_tizen_set_frontbuffer_mode(struct wl_egl_window *egl_window, int set); @@ -73,7 +94,6 @@ void wl_egl_window_tizen_set_window_serial(struct wl_egl_window *egl_window, unsigned int serial); -/* temporary APIs for testing sync feature */ /** * Create a sync fence fd that can tell wl_surface_commit done. * -- 2.7.4 From c32a3c244a9bc923b56aaade266be6a6f72dd9bd Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 12 Jan 2023 17:15:40 +0900 Subject: [PATCH 06/16] Package version up to 1.10.1 Change-Id: I8018c7726952577eda15361b938c722e09f7b13c Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index cc8eeb6..d62c33c 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -3,8 +3,8 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 -%define TPL_VERSION_MINOR 9 -%define TPL_VERSION_PATCH 14 +%define TPL_VERSION_MINOR 10 +%define TPL_VERSION_PATCH 1 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 2325eb727a61308e6b0da7eb77afc39440dbeb46 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 27 Jan 2023 20:08:05 +0900 Subject: [PATCH 07/16] Add new API checks if fence sync is available /** * Check the surface can support fence sync mechanism. * * It is recommended that checking fence sync is available * for every frame because the results may change depending on * frontbuffer rendering is activated or not. * * @param surface surface to check fence sync is available. * @return TPL_TRUE if tpl_surface can support it. */ tpl_bool_t tpl_surface_fence_sync_is_available(tpl_surface_t *surface); - This API helps DDK to determine whether to deliver the acquire_fence to signal the render complete when call the surface_enqueue. - In backend where waiting fence is not implemented, the result of fixed to TPL_FALSE will be returned. - The result from the backend with the waiting fence implementation depends on whether the frontbuffer rendering is activated. Change-Id: I779718fdc7e8efc7890e17b0d4df4d81974a7907 Signed-off-by: Joonbum Ko --- src/tpl.h | 13 +++++++++++++ src/tpl_internal.h | 1 + src/tpl_surface.c | 19 +++++++++++++++++++ 3 files changed, 33 insertions(+) diff --git a/src/tpl.h b/src/tpl.h index 7250315..aea3770 100644 --- a/src/tpl.h +++ b/src/tpl.h @@ -842,6 +842,19 @@ tpl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface); /** + * Check the surface can support fence sync mechanism. + * + * It is recommended that checking fence sync is available + * for every frame because the results may change depending on + * frontbuffer rendering is activated or not. + * + * @param surface surface to check fence sync is available. + * @return TPL_TRUE if tpl_surface can support it. + */ +tpl_bool_t +tpl_surface_fence_sync_is_available(tpl_surface_t *surface); + +/** * Present mode types. * * @TPL_DISPLAY_MODE_IMMEDIATE_KHR: The presentation engine does not wait for diff --git a/src/tpl_internal.h b/src/tpl_internal.h index 0fa3988..3f31a45 100755 --- a/src/tpl_internal.h +++ b/src/tpl_internal.h @@ -113,6 +113,7 @@ struct _tpl_surface_backend { tpl_result_t (*set_post_interval)(tpl_surface_t *surface, int post_interval); void (*get_size)(tpl_surface_t *surface, int *width, int *height); + tpl_bool_t (*fence_sync_is_available)(tpl_surface_t *surface); }; struct _tpl_object { diff --git a/src/tpl_surface.c b/src/tpl_surface.c index e05009e..9bed148 100755 --- a/src/tpl_surface.c +++ b/src/tpl_surface.c @@ -572,3 +572,22 @@ tpl_surface_set_rotation_capability(tpl_surface_t *surface, tpl_bool_t set) return ret; } + +tpl_bool_t +tpl_surface_fence_sync_is_available(tpl_surface_t *surface) +{ + tpl_bool_t ret = TPL_FALSE; + + if (!surface || (surface->type != TPL_SURFACE_TYPE_WINDOW)) { + TPL_ERR("Invalid surface!"); + return ret; + } + + TPL_OBJECT_LOCK(surface); + if (surface->backend.fence_sync_is_available) + ret = surface->backend.fence_sync_is_available(surface); + + TPL_OBJECT_UNLOCK(surface); + + return ret; +} -- 2.7.4 From 375b62b16fa791832b95023cfc255a6f7021c385 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 27 Jan 2023 20:16:44 +0900 Subject: [PATCH 08/16] wl_egl: Implement the backend function of fence_sync_is_available Change-Id: I7e6a6891ff12b6869e66cf3a2d5f64098b04cb94 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 681b8ac..032c9dc 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -152,6 +152,7 @@ struct _tpl_wl_egl_surface { tpl_bool_t vblank_done; tpl_bool_t set_serial_is_used; tpl_bool_t initialized_in_thread; + tpl_bool_t frontbuffer_activated; /* To make sure that tpl_gsource has been successfully finalized. */ tpl_bool_t gsource_finalized; @@ -1799,6 +1800,7 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->set_serial_is_used = TPL_FALSE; wl_egl_surface->gsource_finalized = TPL_FALSE; wl_egl_surface->initialized_in_thread = TPL_FALSE; + wl_egl_surface->frontbuffer_activated = TPL_FALSE; wl_egl_surface->latest_transform = -1; wl_egl_surface->render_done_cnt = 0; @@ -2357,6 +2359,15 @@ __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); } +static tpl_bool_t +__tpl_wl_egl_surface_fence_sync_is_available(tpl_surface_t *surface) +{ + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + + return !wl_egl_surface->frontbuffer_activated; +} + #define CAN_DEQUEUE_TIMEOUT_MS 10000 tpl_result_t @@ -2650,6 +2661,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wl_egl_surface->reset) { surface->frontbuffer = NULL; wl_egl_surface->need_to_enqueue = TPL_TRUE; + wl_egl_surface->frontbuffer_activated = TPL_FALSE; TPL_INFO("[FRONTBUFFER RENDERING STOP]", "wl_egl_surface(%p) wl_egl_window(%p)", wl_egl_surface, wl_egl_surface->wl_egl_window); @@ -2661,6 +2673,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + wl_egl_surface->frontbuffer_activated = TPL_TRUE; tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return surface->frontbuffer; } @@ -3643,6 +3656,8 @@ __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend) __tpl_wl_egl_surface_set_post_interval; backend->get_size = __tpl_wl_egl_surface_get_size; + backend->fence_sync_is_available = + __tpl_wl_egl_surface_fence_sync_is_available; } static void -- 2.7.4 From 8dfd4657161a4bf24f24376d06326834bf75116b Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 17 Mar 2023 19:45:17 +0900 Subject: [PATCH 09/16] Package version up to 1.10.2 Change-Id: Ibac91ed8fd994c1882d9433bc4dbedfd20cfdf21 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index d62c33c..0576a1b 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 10 -%define TPL_VERSION_PATCH 1 +%define TPL_VERSION_PATCH 2 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From d071a1c9a30e96f11bbca25a57e2adea2a418e22 Mon Sep 17 00:00:00 2001 From: "jinbong, Lee" Date: Fri, 17 Mar 2023 19:27:02 +0900 Subject: [PATCH 10/16] wl_egl : remive wl_egl_buffer in vblank's waiting buffers when wl_egl_buffer is freed - Bug fix . if __cb_wl_egl_buffer_free is called then some wl_egl_buffer can be in waiting buffers for vblank. so it must be removed it before wl_egl_buffer is freed. Change-Id: Ia08a7ecc6dcfd8f9d63e644b7cb733740672666b --- src/tpl_wl_egl_thread.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 681b8ac..113a8a7 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -3661,6 +3661,14 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) } tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + if (wl_egl_surface->vblank) { + tpl_gmutex_lock(&wl_egl_surface->vblank->mutex); + if (wl_egl_surface->vblank->waiting_buffers) + __tpl_list_remove_data(wl_egl_surface->vblank->waiting_buffers, (void *)wl_egl_buffer, + TPL_FIRST, NULL); + tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex); + } + if (wl_egl_display) { if (wl_egl_buffer->wl_buffer) { wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, -- 2.7.4 From 0cfe70380f82a241913b0c504d8ec0ee7222b196 Mon Sep 17 00:00:00 2001 From: Xuelian Bai Date: Tue, 21 Mar 2023 14:56:27 +0800 Subject: [PATCH 11/16] Fix memory leak Wait until tbm_queue is destroyed, on pixel phone swapchain is destroyed before tbm_queue is destroyed, then tbm_queue is never destroyed, then cause memory leak Change-Id: I897be13684f0e1f901aad751111c1e469414e178 --- src/tpl_wl_vk_thread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index fe0d337..341360f 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -1817,7 +1817,7 @@ __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface) wl_vk_surface->sent_message = DESTROY_QUEUE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - while (!swapchain->tbm_queue) + while (swapchain->tbm_queue) tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); -- 2.7.4 From 657f745288a970f36027a2f8c91842687a138ec0 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 14 Apr 2023 19:00:36 +0900 Subject: [PATCH 12/16] wl_egl: Initialize vblank_done flag when dequeue timeout occured - Rarely, even if client cannot receive vblank event from tdm server, timeout may occur in can_dequeue. - In this case, if wl_egl_surface->vblank_done flag is not initialized to TPL_TRUE, problem situation may occur waiting vblank without calling tdm_client_vblank_wait after force_flush(can_dequeue_timeout). Change-Id: If3f8eee13b5ae91a3728f189f53aa25720696b12 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 13b7404..16df22e 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2604,6 +2604,8 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; } + wl_egl_surface->vblank_done = TPL_TRUE; + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); tpl_gthread_continue(wl_egl_display->thread); } -- 2.7.4 From a3466ec97f5240eea7b0551ea4582bfb6ea841f7 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 2 Jun 2023 13:15:03 +0900 Subject: [PATCH 13/16] wl_egl/vk: use new tdm API to handle pending events. refer to https://review.tizen.org/gerrit/#/c/platform/core/uifw/libtdm/+/293679/ client: Add new API to handle pending events fb76bf4 Change-Id: I036bc7b9f704b9f1bd250a6c651d5c90a9d94a65 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 5 +++++ src/tpl_wl_vk_thread.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 16df22e..7a57437 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1987,6 +1987,11 @@ _thread_create_tdm_client_vblank(tdm_client *tdm_client) return NULL; } + tdm_err = tdm_client_handle_pending_events(tdm_client); + if (tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err); + } + tdm_client_vblank_set_enable_fake(tdm_vblank, 1); tdm_client_vblank_set_sync(tdm_vblank, 0); diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 341360f..317d005 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -1125,6 +1125,11 @@ _thread_create_tdm_client_vblank(tdm_client *tdm_client) return NULL; } + tdm_err = tdm_client_handle_pending_events(tdm_client); + if (tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err); + } + tdm_client_vblank_set_enable_fake(vblank, 1); tdm_client_vblank_set_sync(vblank, 0); -- 2.7.4 From f4e7cbee1f3249f96c2a56bd765afbd7f0b9f092 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 2 Jun 2023 13:21:49 +0900 Subject: [PATCH 14/16] Package version up to 1.10.3 Change-Id: I4f29af23350c5f5c8ffcea997021256f579f93b1 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index 0576a1b..ba39ffb 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 10 -%define TPL_VERSION_PATCH 2 +%define TPL_VERSION_PATCH 3 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From c362b0b2860d1ebab098956e2567aeef8313952c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 15 Jun 2023 16:02:37 +0900 Subject: [PATCH 15/16] wl_egl: remove checking activated when dequeue buffer - Actual buffer will be attached when calling tbm_surface_queue_can_dequeue(). So calling wayland_tbm_client_queue_check_activate() at dequeue_buffer is not appropriate. Change-Id: Ie5500aa50f396e4a76160c703932d6c805e93d3d Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 7a57437..d305ec0 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2541,7 +2541,6 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tpl_bool_t is_activated = 0; int bo_name = 0; tbm_surface_h tbm_surface = NULL; @@ -2626,22 +2625,6 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, * during below dequeue procedure. */ tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); - /* wayland client can check their states (ACTIVATED or DEACTIVATED) with - * below function [wayland_tbm_client_queue_check_activate()]. - * This function has to be called before tbm_surface_queue_dequeue() - * in order to know what state the buffer will be dequeued next. - * - * ACTIVATED state means non-composite mode. Client can get buffers which - can be displayed directly(without compositing). - * DEACTIVATED state means composite mode. Client's buffer will be displayed - by compositor(E20) with compositing. - */ - is_activated = wayland_tbm_client_queue_check_activate( - wl_egl_display->wl_tbm_client, - wl_egl_surface->tbm_queue); - - wl_egl_surface->is_activated = is_activated; - surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); wl_egl_surface->width = surface->width; @@ -2664,7 +2647,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, * */ if (surface->frontbuffer) { if (!surface->is_frontbuffer_mode || - !is_activated || + !wl_egl_surface->is_activated || wl_egl_surface->reset) { surface->frontbuffer = NULL; wl_egl_surface->need_to_enqueue = TPL_TRUE; @@ -2721,7 +2704,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, } } - if (surface->is_frontbuffer_mode && is_activated) { + if (surface->is_frontbuffer_mode && wl_egl_surface->is_activated) { if (surface->frontbuffer == NULL) { TPL_INFO("[FRONTBUFFER RENDERING START]", "wl_egl_surface(%p) wl_egl_window(%p) bo(%d)", -- 2.7.4 From be51392311d4a004ed5484ee64a3d88e7bbd4b36 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 7 Jul 2023 13:09:29 +0900 Subject: [PATCH 16/16] log: add & apply TPL_LOG_D instead of TPL_DEBUG - TPL_LOG_D can print the tag manually. - The prefix of almost all logs is modified to print wl_egl_surface to distinguish each wl_egl_surface. Change-Id: I68a5b450693ed0d9422a9ba72e119f56235d7a32 Signed-off-by: Joonbum Ko --- src/tpl.c | 3 +- src/tpl_utils.h | 7 ++-- src/tpl_utils_gthread.c | 12 ++++--- src/tpl_wayland_egl.c | 2 +- src/tpl_wl_egl_thread.c | 95 +++++++++++++++++++++++++------------------------ src/tpl_wl_vk_thread.c | 54 +++++++++++++++------------- 6 files changed, 93 insertions(+), 80 deletions(-) diff --git a/src/tpl.c b/src/tpl.c index 1f2bd4e..90e5807 100644 --- a/src/tpl.c +++ b/src/tpl.c @@ -41,7 +41,7 @@ __tpl_init(void) tpl_getenv_initialized = 1; } - TPL_DEBUG("[libtpl-egl] loaded"); + TPL_LOG_D("[libtpl-egl][constructor]", "loaded"); #ifdef OBJECT_HASH_CHECK __tpl_object_hash_init(); #endif @@ -67,6 +67,7 @@ __tpl_runtime_fini(void) pthread_mutex_unlock(&runtime_mutex); } + TPL_LOG_D("[libtpl-egl][destructor]", "finished"); #ifdef OBJECT_HASH_CHECK __tpl_object_hash_shutdown(); #endif diff --git a/src/tpl_utils.h b/src/tpl_utils.h index b7dfa1c..08d7ab6 100644 --- a/src/tpl_utils.h +++ b/src/tpl_utils.h @@ -119,6 +119,7 @@ inline char *tpl_getenv(const char *name) #ifdef LOG_DEFAULT_ENABLE #define TPL_LOG_F(f, x...) tpl_log_f("[TPL_F]", f, ##x) #define TPL_LOG_B(b, f, x...) tpl_log_b("[TPL_" b "]", f, ##x) +#define TPL_LOG_D(t, f, x...) tpl_log_d(t, f, ##x) #define TPL_DEBUG(f, x...) tpl_log_d("[TPL_DEBUG]", f, ##x) #define TPL_LOG_T(b, f, x...) \ { \ @@ -176,16 +177,18 @@ inline char *tpl_getenv(const char *name) } \ } -#define TPL_DEBUG(f, x...) \ +#define TPL_LOG_D(t, f, x...) \ { \ LOG_INIT(); \ if (tpl_log_lvl > 2) \ - tpl_log_d("[TPL_DEBUG]", f, ##x); \ + tpl_log_d(t, f, ##x); \ } + #endif /* LOG_DEFAULT_ENABLE */ #else /* NDEBUG */ #define TPL_LOG_F(f, x...) #define TPL_LOG_B(b, f, x...) +#define TPL_LOG_D(t, f, x...) #define TPL_DEBUG(f, x...) #define TPL_ERR(f, x...) #define TPL_WARN(f, x...) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 68c2faf..4b17598 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -308,7 +308,7 @@ tpl_gsource_create(tpl_gthread *thread, void *data, int fd, g_source_attach(&new_gsource->gsource, g_main_loop_get_context(thread->loop)); - TPL_DEBUG("[GSOURCE_CREATE] tpl_gsource(%p) thread(%p) data(%p) fd(%d) type(%d)", + TPL_LOG_D("[GSOURCE][CREATE]", "tpl_gsource(%p) thread(%p) data(%p) fd(%d) type(%d)", new_gsource, thread, data, new_gsource->fd, type); return new_gsource; @@ -320,7 +320,7 @@ __gsource_remove_and_destroy(tpl_gsource *source) if (g_source_is_destroyed(&source->gsource)) return; - TPL_DEBUG("[GSOURCE_DESTROY] tpl_gsource(%p) type(%d)", + TPL_LOG_D("[GSOURCE][DESTROY]", "tpl_gsource(%p) type(%d)", source, source->type); g_source_remove_unix_fd(&source->gsource, source->tag); @@ -471,7 +471,7 @@ _thread_idle_cb(gpointer data) { tpl_gthread *gthread = (tpl_gthread *)data; - TPL_DEBUG("THREAD IDLE CALLBACK"); + TPL_LOG_D("[WAIT_IDLE]", "THREAD IDLE CALLBACK"); g_mutex_lock(>hread->idle_mutex); gthread->is_idle = TPL_TRUE; @@ -480,9 +480,11 @@ _thread_idle_cb(gpointer data) /* If the caller thread of tpl_gthread_wait_idle locked the pause_mutex, * thread will be paused here until unlock */ + TPL_LOG_D("[THREAD_PAUSE]", "try to lock pause_mutex"); g_mutex_lock(>hread->pause_mutex); gthread->paused = TPL_FALSE; g_mutex_unlock(>hread->pause_mutex); + TPL_LOG_D("[THREAD_RESUME]", "thread resumes"); return G_SOURCE_REMOVE; } @@ -495,7 +497,7 @@ tpl_gthread_wait_idle(tpl_gthread *gthread) gboolean ret = TRUE; tpl_result_t res = TPL_ERROR_NONE; - TPL_DEBUG("[WAIT IDLE] BEGIN"); + TPL_LOG_D("[WAIT_IDLE]", "BEGIN"); g_mutex_lock(>hread->idle_mutex); @@ -532,7 +534,7 @@ tpl_gthread_wait_idle(tpl_gthread *gthread) g_mutex_unlock(>hread->idle_mutex); - TPL_DEBUG("[WAIT IDLE] END"); + TPL_LOG_D("[WAIT_IDLE]", "END"); return res; } diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index b8b24da..38c2f39 100755 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -1068,7 +1068,7 @@ __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface, * the buffer is already displayed. */ if (wayland_egl_buffer->is_new) { - TPL_DEBUG("[FRONTBUFFER RENDERING MODE] tbm_surface(%p) bo(%d)", + TPL_LOG_D("[FRONTBUFFER RENDERING MODE]", "tbm_surface(%p) bo(%d)", tbm_surface, tbm_bo_export(wayland_egl_buffer->bo)); } else { TPL_LOG_B("WL_EGL", diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index d305ec0..84e8d08 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -467,7 +467,9 @@ __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, wl_egl_display->presentation = wl_registry_bind(wl_registry, name, &wp_presentation_interface, 1); - TPL_DEBUG("bind wp_presentation_interface"); + TPL_LOG_D("[REGISTRY_BIND]", + "wl_egl_display(%p) bind wp_presentation_interface", + wl_egl_display); } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) { char *env = tpl_getenv("TPL_EFS"); if (env && !atoi(env)) { @@ -477,7 +479,9 @@ __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, wl_registry_bind(wl_registry, name, &zwp_linux_explicit_synchronization_v1_interface, 1); wl_egl_display->use_explicit_sync = TPL_TRUE; - TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface"); + TPL_LOG_D("[REGISTRY_BIND]", + "wl_egl_display(%p) bind zwp_linux_explicit_synchronization_v1_interface", + wl_egl_display); } } #endif @@ -829,7 +833,7 @@ __thread_func_disp_finalize(tpl_gsource *gsource) (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); tpl_gmutex_lock(&wl_egl_display->disp_mutex); - TPL_DEBUG("[FINALIZE] wl_egl_display(%p) tpl_gsource(%p)", + TPL_LOG_D("[D_FINALIZE]", "wl_egl_display(%p) tpl_gsource(%p)", wl_egl_display, gsource); if (wl_egl_display->wl_initialized) @@ -1216,7 +1220,7 @@ __cb_destroy_callback(void *private) tpl_wl_egl_surface_t *wl_egl_surface = NULL; if (!tizen_private) { - TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface"); + TPL_LOG_D("[WL_EGL_WINDOW_DESTROY_CALLBACK]", "Already destroyed surface"); return; } @@ -1377,7 +1381,7 @@ __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)", wl_egl_surface->commit_sync.fd, commit_sync_fd); - TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)", + TPL_LOG_D("[COMMIT_SYNC][DUP]", "wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)", wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd); tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); return commit_sync_fd; @@ -1395,7 +1399,7 @@ __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)", wl_egl_surface->commit_sync.fd, commit_sync_fd); - TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)", + TPL_LOG_D("[COMMIT_SYNC][CREATE]", "wl_egl_surface(%p) commit_sync_fd(%d)", wl_egl_surface, commit_sync_fd); tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); @@ -1456,7 +1460,7 @@ __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *priv presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)", wl_egl_surface->presentation_sync.fd, presentation_sync_fd); - TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", + TPL_LOG_D("[PRESENTATION_SYNC][DUP]", "wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); return presentation_sync_fd; @@ -1473,7 +1477,7 @@ __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *priv presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)", wl_egl_surface->presentation_sync.fd, presentation_sync_fd); - TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", + TPL_LOG_D("[PRESENTATION_SYNC][CREATE]", "wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); @@ -1569,9 +1573,9 @@ __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); } else { - TPL_LOG_T("[DEACTIVATED]", - " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); + TPL_INFO("[DEACTIVATED]", + " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); } } @@ -1691,13 +1695,13 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_lock(&wl_egl_surface->surf_mutex); if (message == INIT_SURFACE) { /* Initialize surface */ - TPL_DEBUG("wl_egl_surface(%p) initialize message received!", + TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) initialize message received!", wl_egl_surface); _thread_wl_egl_surface_init(wl_egl_surface); wl_egl_surface->initialized_in_thread = TPL_TRUE; tpl_gcond_signal(&wl_egl_surface->surf_cond); } else if (message == ACQUIRABLE) { /* Acquirable */ - TPL_DEBUG("wl_egl_surface(%p) acquirable message received!", + TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) acquirable message received!", wl_egl_surface); _thread_surface_queue_acquire(wl_egl_surface); } @@ -1718,7 +1722,7 @@ __thread_func_surf_finalize(tpl_gsource *gsource) TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)", + TPL_LOG_D("[S_FINALIZE]", "wl_egl_surface(%p) tpl_gsource(%p)", wl_egl_surface, gsource); _thread_wl_egl_surface_fini(wl_egl_surface); @@ -2037,12 +2041,13 @@ _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface) return; } - TPL_INFO("[QUEUE_CREATION]", + TPL_INFO("[QUEUE_CREATION][1/2]", "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)", wl_egl_surface, wl_egl_surface->wl_surface, wl_egl_display->wl_tbm_client); - TPL_INFO("[QUEUE_CREATION]", - "tbm_queue(%p) size(%d x %d) X %d format(%d)", + TPL_INFO("[QUEUE_CREATION][2/2]", + "wl_egl_surface(%p) tbm_queue(%p) size(%d x %d) X %d format(%d)", + wl_egl_surface, wl_egl_surface->tbm_queue, wl_egl_surface->width, wl_egl_surface->height, @@ -2125,8 +2130,6 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) tpl_gthread_pause_in_idle(wl_egl_display->thread); buffer_cnt = __tpl_list_get_count(wl_egl_surface->buffers); - TPL_INFO("[BUFFER_CLEAR]", "BEGIN | wl_egl_surface(%p) buffer_cnt(%d)", - wl_egl_surface, buffer_cnt); while (!__tpl_list_is_empty(wl_egl_surface->buffers)) { wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(wl_egl_surface->buffers, @@ -2136,8 +2139,9 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) status = wl_egl_buffer->status; - TPL_INFO("[BUFFER]","[%d/%d]| wl_egl_buffer(%p) tbm_surface(%p) status(%s)", - ++idx, buffer_cnt, wl_egl_buffer, + TPL_INFO("[BUFFER_CLEAR]", + "[%d/%d] wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) status(%s)", + ++idx, buffer_cnt, wl_egl_surface, wl_egl_buffer, wl_egl_buffer->tbm_surface, status_to_string[status]); @@ -2191,7 +2195,6 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) if (need_to_release || need_to_cancel) tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); } - TPL_INFO("[BUFFER_CLEAR]", "END | wl_egl_surface(%p)", wl_egl_surface); tpl_gthread_continue(wl_egl_display->thread); } @@ -2561,8 +2564,8 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, enqueued_buffer->status < COMMITTED) { tpl_result_t wait_result; TPL_INFO("[DEQ_AFTER_RESET]", - "waiting for previous wl_egl_buffer(%p) commit", - enqueued_buffer); + "wl_egl_surface(%p) waiting for previous wl_egl_buffer(%p) commit", + wl_egl_surface, enqueued_buffer); wait_result = tpl_gcond_timed_wait(&enqueued_buffer->cond, &enqueued_buffer->mutex, @@ -2652,7 +2655,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, surface->frontbuffer = NULL; wl_egl_surface->need_to_enqueue = TPL_TRUE; wl_egl_surface->frontbuffer_activated = TPL_FALSE; - TPL_INFO("[FRONTBUFFER RENDERING STOP]", + TPL_INFO("[FRONTBUFFER_RENDERING_STOP]", "wl_egl_surface(%p) wl_egl_window(%p)", wl_egl_surface, wl_egl_surface->wl_egl_window); } else { @@ -2693,7 +2696,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, #if TIZEN_FEATURE_ENABLE if (wl_egl_display->use_explicit_sync) { *release_fence = wl_egl_buffer->release_fence_fd; - TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)", + TPL_LOG_D("[EXPLICIT_FENCE]", "wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)", wl_egl_surface, wl_egl_buffer, *release_fence); wl_egl_buffer->release_fence_fd = -1; @@ -2706,7 +2709,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, if (surface->is_frontbuffer_mode && wl_egl_surface->is_activated) { if (surface->frontbuffer == NULL) { - TPL_INFO("[FRONTBUFFER RENDERING START]", + TPL_INFO("[FRONTBUFFER_RENDERING_START]", "wl_egl_surface(%p) wl_egl_window(%p) bo(%d)", wl_egl_surface, wl_egl_surface->wl_egl_window, _get_tbm_surface_bo_name(tbm_surface)); @@ -2914,14 +2917,14 @@ __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message) TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)", wl_egl_buffer->acquire_fence_fd); - TPL_DEBUG("[RENDER DONE] wl_egl_buffer(%p) tbm_surface(%p)", - wl_egl_buffer, tbm_surface); + TPL_LOG_D("[RENDER DONE]", "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p)", + wl_egl_surface, wl_egl_buffer, tbm_surface); tpl_gmutex_lock(&wl_egl_buffer->mutex); wl_egl_buffer->status = WAITING_VBLANK; - TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)", - wl_egl_buffer, wl_egl_buffer->waiting_source, + TPL_LOG_D("[FINALIZE]", "wl_egl_surface(%p) wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)", + wl_egl_surface, wl_egl_buffer, wl_egl_buffer->waiting_source, wl_egl_buffer->acquire_fence_fd); close(wl_egl_buffer->acquire_fence_fd); @@ -3049,7 +3052,7 @@ __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; TRACE_ASYNC_END((intptr_t)wl_egl_surface, "WAIT_VBLANK"); - TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface); + TPL_LOG_D("[VBLANK_DONE]", "wl_egl_surface(%p)", wl_egl_surface); if (error == TDM_ERROR_TIMEOUT) TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)", @@ -3274,7 +3277,7 @@ __cb_presentation_feedback_presented(void *data, tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - TPL_DEBUG("[FEEDBACK][PRESENTED] pst_feedback(%p) presentation_feedback(%p) bo(%d)", + TPL_LOG_D("[PRESENTED]", "pst_feedback(%p) presentation_feedback(%p) bo(%d)", pst_feedback, presentation_feedback, pst_feedback->bo_name); if (pst_feedback->pst_sync_fd != -1) { @@ -3315,7 +3318,7 @@ __cb_presentation_feedback_discarded(void *data, tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - TPL_DEBUG("[FEEDBACK][DISCARDED] pst_feedback(%p) presentation_feedback(%p) bo(%d)", + TPL_LOG_D("[DISCARDED]", "pst_feedback(%p) presentation_feedback(%p) bo(%d)", pst_feedback, presentation_feedback, pst_feedback->bo_name); if (pst_feedback->pst_sync_fd != -1) { @@ -3398,8 +3401,8 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, "[FATAL] Failed to create wl_buffer"); TPL_INFO("[WL_BUFFER_CREATE]", - "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", - wl_egl_buffer, wl_egl_buffer->wl_buffer, + "wl_egl_surface(%p) wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_egl_surface, wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); #if TIZEN_FEATURE_ENABLE @@ -3523,7 +3526,7 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync, wl_egl_buffer->acquire_fence_fd); - TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)", + TPL_LOG_D("[SET_ACQUIRE_FENCE][1/2]", "wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)", wl_egl_surface, wl_egl_buffer->tbm_surface, wl_egl_buffer->acquire_fence_fd); close(wl_egl_buffer->acquire_fence_fd); wl_egl_buffer->acquire_fence_fd = -1; @@ -3535,7 +3538,7 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, } else { zwp_linux_buffer_release_v1_add_listener( wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer); - TPL_DEBUG("add explicit_sync_release_listener."); + TPL_LOG_D("[SET_ACQUIRE_FENCE][2/2]", "add explicit_sync_release_listener."); } } #endif @@ -3577,7 +3580,7 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)", wl_egl_buffer->bo_name); - TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)", + TPL_LOG_D("[COMMIT_SYNC][SEND]", "wl_egl_surface(%p) commit_sync_fd(%d)", wl_egl_surface, wl_egl_buffer->commit_sync_fd); close(wl_egl_buffer->commit_sync_fd); @@ -3656,8 +3659,8 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", - wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); + TPL_INFO("[BUFFER_FREE]", "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, wl_egl_buffer, wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name); tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); if (wl_egl_surface->buffers) { @@ -3752,17 +3755,15 @@ _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); buffer_cnt = __tpl_list_get_count(wl_egl_surface->buffers); - TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)", - wl_egl_surface, buffer_cnt); node = __tpl_list_get_front_node(wl_egl_surface->buffers); do { if (!node) break; wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_node_get_data(node); - TPL_INFO("[INFO]", - "[%d/%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", - ++idx, buffer_cnt, wl_egl_buffer, wl_egl_buffer->tbm_surface, - wl_egl_buffer->bo_name, + TPL_INFO("[BUFFERS_INFO]", + "[%d/%d] wl_egl_surface(%p), wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", + ++idx, buffer_cnt, wl_egl_surface, wl_egl_buffer, + wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name, status_to_string[wl_egl_buffer->status]); } while ((node = __tpl_list_node_next(node))); tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 317d005..a339349 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -372,7 +372,9 @@ __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, wl_registry_bind(wl_registry, name, &zwp_linux_explicit_synchronization_v1_interface, 1); wl_vk_display->use_explicit_sync = TPL_TRUE; - TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface"); + TPL_LOG_D("[REGISTRY_BIND]", + "wl_vk_display(%p) bind zwp_linux_explicit_synchronization_v1_interface", + wl_vk_display); } } #endif @@ -693,7 +695,7 @@ __thread_func_disp_finalize(tpl_gsource *gsource) (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); tpl_gmutex_lock(&wl_vk_display->disp_mutex); - TPL_DEBUG("[FINALIZE] wl_vk_display(%p) tpl_gsource(%p)", + TPL_LOG_D("[D_FINALIZE]", "wl_vk_display(%p) tpl_gsource(%p)", wl_vk_display, gsource); if (wl_vk_display->wl_initialized) @@ -1029,10 +1031,11 @@ _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface) status = wl_vk_buffer->status; - TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)", - idx, wl_vk_buffer, - wl_vk_buffer->tbm_surface, - status_to_string[status]); + TPL_INFO("[BUFFER_CLEAR]", + "[%d] wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) status(%s)", + idx, wl_vk_surface, wl_vk_buffer, + wl_vk_buffer->tbm_surface, + status_to_string[status]); if (status >= ENQUEUED) { tpl_bool_t need_to_wait = TPL_FALSE; @@ -1211,13 +1214,13 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_lock(&wl_vk_surface->surf_mutex); if (message == INIT_SURFACE) { /* Initialize surface */ - TPL_DEBUG("wl_vk_surface(%p) initialize message received!", + TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) initialize message received!", wl_vk_surface); _thread_wl_vk_surface_init(wl_vk_surface); wl_vk_surface->initialized_in_thread = TPL_TRUE; tpl_gcond_signal(&wl_vk_surface->surf_cond); } else if (message == CREATE_QUEUE) { /* Create tbm_surface_queue */ - TPL_DEBUG("wl_vk_surface(%p) queue creation message received!", + TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) queue creation message received!", wl_vk_surface); if (_thread_swapchain_create_tbm_queue(wl_vk_surface) != TPL_ERROR_NONE) { @@ -1226,12 +1229,12 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) } tpl_gcond_signal(&wl_vk_surface->surf_cond); } else if (message == DESTROY_QUEUE) { /* swapchain destroy */ - TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!", + TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) swapchain destroy message received!", wl_vk_surface); _thread_swapchain_destroy_tbm_queue(wl_vk_surface); tpl_gcond_signal(&wl_vk_surface->surf_cond); } else if (message == ACQUIRABLE) { /* Acquirable message */ - TPL_DEBUG("wl_vk_surface(%p) acquirable message received!", + TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) acquirable message received!", wl_vk_surface); if (_thread_surface_queue_acquire(wl_vk_surface) != TPL_ERROR_NONE) { @@ -1257,7 +1260,7 @@ __thread_func_surf_finalize(tpl_gsource *gsource) TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); tpl_gmutex_lock(&wl_vk_surface->surf_mutex); - TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)", + TPL_LOG_D("[S_FINALIZE]", "wl_vk_surface(%p) tpl_gsource(%p)", wl_vk_surface, gsource); _thread_wl_vk_surface_fini(wl_vk_surface); @@ -1498,9 +1501,9 @@ __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)", wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue); } else { - TPL_LOG_T("[DEACTIVATED]", - " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)", - wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue); + TPL_INFO("[DEACTIVATED]", + " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue); } } @@ -1573,7 +1576,9 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) int i; for (i = 0; i < swapchain->properties.buffer_count; i++) { if (swapchain->swapchain_buffers[i]) { - TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]); + TPL_INFO("[UNTRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)", + i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i], + _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i])); tbm_surface_internal_unref(swapchain->swapchain_buffers[i]); swapchain->swapchain_buffers[i] = NULL; } @@ -1678,7 +1683,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) swapchain->create_done = TPL_TRUE; TPL_INFO("[TBM_QUEUE_CREATED]", - "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)", + "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)", wl_vk_surface, swapchain, tbm_queue); return TPL_ERROR_NONE; @@ -1805,8 +1810,9 @@ __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface) if (swapchain->swapchain_buffers) { for (int i = 0; i < swapchain->properties.buffer_count; i++) { if (swapchain->swapchain_buffers[i]) { - TPL_DEBUG("Stop tracking tbm_surface(%p)", - swapchain->swapchain_buffers[i]); + TPL_INFO("[UNTRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)", + i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i], + _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i])); tbm_surface_internal_unref(swapchain->swapchain_buffers[i]); swapchain->swapchain_buffers[i] = NULL; } @@ -1888,8 +1894,8 @@ __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface, for (i = 0; i < *buffer_count; i++) { if (swapchain->swapchain_buffers[i]) { - TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)", - i, swapchain->swapchain_buffers[i], + TPL_INFO("[TRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)", + i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i], _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i])); tbm_surface_internal_ref(swapchain->swapchain_buffers[i]); } @@ -2121,7 +2127,7 @@ __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface, #if TIZEN_FEATURE_ENABLE if (wl_vk_surface->surface_sync) { *release_fence = wl_vk_buffer->release_fence_fd; - TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)", + TPL_LOG_D("[EXPLICIT_FENCE]", "wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)", wl_vk_surface, wl_vk_buffer, *release_fence); wl_vk_buffer->release_fence_fd = -1; } else @@ -2528,7 +2534,7 @@ __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; TRACE_ASYNC_END((intptr_t)wl_vk_surface, "WAIT_VBLANK"); - TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface); + TPL_LOG_D("[VBLANK_DONE]", "wl_vk_surface(%p)", wl_vk_surface); if (error == TDM_ERROR_TIMEOUT) TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)", @@ -2646,7 +2652,7 @@ _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface, zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync, wl_vk_buffer->acquire_fence_fd); - TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)", + TPL_LOG_D("[SET_ACQUIRE_FENCE][1/2]", "wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)", wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd); close(wl_vk_buffer->acquire_fence_fd); wl_vk_buffer->acquire_fence_fd = -1; @@ -2658,7 +2664,7 @@ _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface, } else { zwp_linux_buffer_release_v1_add_listener( wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer); - TPL_DEBUG("add explicit_sync_release_listener."); + TPL_LOG_D("[SET_ACQUIRE_FENCE][2/2]", "add explicit_sync_release_listener."); } } #endif -- 2.7.4