From b1725298fc58f3e2e63da142bf9687a214f38146 Mon Sep 17 00:00:00 2001 From: Jinbong Date: Mon, 18 Jul 2022 14:14:28 +0900 Subject: [PATCH 01/16] Remove compile warning about casting to pointer from integer of different size --- src/tpl_tbm.c | 4 ++-- src/tpl_wayland_egl.c | 18 +++++++++--------- src/tpl_wl_egl_thread.c | 24 ++++++++++++------------ src/unused/tpl_gbm.c | 6 +++--- src/unused/tpl_wayland_egl_thread.c | 24 ++++++++++++------------ src/unused/tpl_wl_egl_thread_legacy.c | 14 +++++++------- 6 files changed, 45 insertions(+), 45 deletions(-) diff --git a/src/tpl_tbm.c b/src/tpl_tbm.c index a41187c..1a66f17 100644 --- a/src/tpl_tbm.c +++ b/src/tpl_tbm.c @@ -250,7 +250,7 @@ __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); tbm_surface_internal_unref(tbm_surface); @@ -320,7 +320,7 @@ __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, surface, tpl_tbm_surface->tbm_queue, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO(%d)", + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); return tbm_surface; diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index 16fc9f2..b8b24da 100755 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -805,7 +805,7 @@ __tpl_wayland_egl_surface_fini(tpl_surface_t *surface) tbm_surface_queue_error_e tsq_err; tbm_surface_h tbm_surface = __tpl_list_pop_front(wayland_egl_surface->attached_buffers, NULL); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo( tbm_surface, 0))); tbm_surface_internal_unref(tbm_surface); @@ -1018,7 +1018,7 @@ __tpl_wayland_egl_surface_commit(tpl_surface_t *surface, if (tdm_lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->tdm_mutex); } - TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); } @@ -1074,7 +1074,7 @@ __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface, TPL_LOG_B("WL_EGL", "[ENQ_SKIP][F] Client already uses frontbuffer(%p)", surface->frontbuffer); - TRACE_ASYNC_END((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); return TPL_ERROR_NONE; } @@ -1103,7 +1103,7 @@ __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_OPERATION; } - TRACE_ASYNC_END((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); TRACE_BEGIN("[WAIT_VBLANK]"); @@ -1333,7 +1333,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface( surface->frontbuffer); if (wayland_egl_buffer) - TRACE_ASYNC_BEGIN((int)wayland_egl_buffer, + TRACE_ASYNC_BEGIN((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); if (lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->wl_event_mutex); @@ -1361,7 +1361,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou if ((wayland_egl_buffer = __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface(tbm_surface)) != NULL) { TRACE_MARK("[DEQ][REUSED]BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); - TRACE_ASYNC_BEGIN((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); wayland_egl_buffer->dx = wl_egl_window->dx; @@ -1471,7 +1471,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou wayland_egl_buffer); TRACE_MARK("[DEQ][NEW]BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); - TRACE_ASYNC_BEGIN((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); TPL_LOG_B("WL_EGL", "[DEQ][N] tpl_wayland_egl_buffer_t(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -1589,7 +1589,7 @@ __cb_client_buffer_release_callback(void *data, struct wl_proxy *proxy) tbm_surface = (tbm_surface_h) data; - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); TPL_LOG_B("WL_EGL", "[RELEASE_CB] wl_buffer(%p) tbm_surface(%p) bo(%d)", proxy, tbm_surface, @@ -1909,7 +1909,7 @@ static void __cb_tizen_surface_shm_flusher_flush_callback(void *data, tpl_wayland_egl_buffer_t *wayland_egl_buffer = __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface(tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo( tbm_surface, 0))); diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index b8830a7..2ad01c2 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2539,7 +2539,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_LOG_T("WL_EGL", "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", surface->frontbuffer, bo_name); - TRACE_ASYNC_BEGIN((int)surface->frontbuffer, + TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); @@ -2593,7 +2593,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wl_egl_surface->reset = TPL_FALSE; TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", wl_egl_buffer->bo_name); TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name, @@ -2665,7 +2665,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!tbm_surface_internal_is_valid(tbm_surface)) { TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); return TPL_ERROR_INVALID_PARAMETER; } @@ -2705,7 +2705,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, !wl_egl_buffer->need_to_commit) { TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); tpl_gmutex_unlock(&wl_egl_buffer->mutex); return TPL_ERROR_NONE; } @@ -2762,7 +2762,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d", tbm_surface, wl_egl_surface, tsq_err); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); return TPL_ERROR_INVALID_OPERATION; } @@ -2772,7 +2772,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); return TPL_ERROR_NONE; } @@ -2924,7 +2924,7 @@ __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data; tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK"); + TRACE_ASYNC_END((intptr_t)wl_egl_surface, "WAIT_VBLANK"); TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface); if (error == TDM_ERROR_TIMEOUT) @@ -2987,7 +2987,7 @@ __cb_buffer_fenced_release(void *data, TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", _get_tbm_surface_bo_name(tbm_surface), fence); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T("WL_EGL", @@ -3038,7 +3038,7 @@ __cb_buffer_immediate_release(void *data, TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T("WL_EGL", @@ -3094,7 +3094,7 @@ __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) wl_egl_buffer->status = RELEASED; TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -3243,7 +3243,7 @@ _thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface) if (tdm_err == TDM_ERROR_NONE) { wl_egl_surface->vblank_done = TPL_FALSE; - TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK"); + TRACE_ASYNC_BEGIN((intptr_t)wl_egl_surface, "WAIT_VBLANK"); } else { TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); return TPL_ERROR_INVALID_OPERATION; @@ -3420,7 +3420,7 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, wl_display_flush(wl_egl_display->wl_display); - TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_BEGIN((intptr_t)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", wl_egl_buffer->bo_name); tpl_gmutex_lock(&wl_egl_buffer->mutex); diff --git a/src/unused/tpl_gbm.c b/src/unused/tpl_gbm.c index 83c9c81..be5f238 100644 --- a/src/unused/tpl_gbm.c +++ b/src/unused/tpl_gbm.c @@ -396,7 +396,7 @@ __tpl_gbm_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } - TRACE_ASYNC_END((int)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); TPL_IMAGE_DUMP(tbm_surface, surface->width, surface->height); @@ -521,7 +521,7 @@ __tpl_gbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, gbm_buffer = __tpl_gbm_get_gbm_buffer_from_tbm_surface(tbm_surface); if (gbm_buffer) { TRACE_MARK("[DEQ][REUSED]BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); - TRACE_ASYNC_BEGIN((int)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); TPL_LOG_B("GBM", "[DEQ][R] tpl_gbm_surface_t(%p) tbm_surface(%p) bo(%d)", gbm_surface, tbm_surface, tbm_bo_export(gbm_buffer->bo)); @@ -549,7 +549,7 @@ __tpl_gbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, __tpl_gbm_set_gbm_buffer_to_tbm_surface(tbm_surface, gbm_buffer); TRACE_MARK("[DEQ][NEW]BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); - TRACE_ASYNC_BEGIN((int)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); TPL_LOG_B("GBM", "[DEQ][N] tpl_gbm_surface_t(%p) tbm_surface(%p) bo(%d)", gbm_surface, tbm_surface, tbm_bo_export(bo)); diff --git a/src/unused/tpl_wayland_egl_thread.c b/src/unused/tpl_wayland_egl_thread.c index 0ea5cc4..ccbc370 100755 --- a/src/unused/tpl_wayland_egl_thread.c +++ b/src/unused/tpl_wayland_egl_thread.c @@ -1692,7 +1692,7 @@ __cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer) buf_info->need_to_release = TPL_FALSE; TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T(BACKEND, "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -1742,7 +1742,7 @@ __cb_buffer_fenced_release(void *data, TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", _get_tbm_surface_bo_name(tbm_surface), fence); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T(BACKEND, @@ -1794,7 +1794,7 @@ __cb_buffer_immediate_release(void *data, TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T(BACKEND, @@ -2165,7 +2165,7 @@ __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, return; } - TRACE_ASYNC_END((int)surf_source, "WAIT_VBLANK"); + TRACE_ASYNC_END((intptr_t)surf_source, "WAIT_VBLANK"); if (error == TDM_ERROR_TIMEOUT) TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. surf_source(%p)", @@ -2249,7 +2249,7 @@ _twe_surface_wait_vblank(twe_wl_surf_source *surf_source) if (tdm_err == TDM_ERROR_NONE) { surf_source->vblank_done = TPL_FALSE; - TRACE_ASYNC_BEGIN((int)surf_source, "WAIT_VBLANK"); + TRACE_ASYNC_BEGIN((intptr_t)surf_source, "WAIT_VBLANK"); } else { TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); return TPL_ERROR_INVALID_OPERATION; @@ -2316,7 +2316,7 @@ _twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source, wl_display_flush(surf_source->disp_source->disp); TRACE_MARK("[COMMIT] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); buf_info->sync_timestamp++; @@ -2600,7 +2600,7 @@ _twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source, wl_display_flush(surf_source->disp_source->disp); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); buf_info->need_to_commit = TPL_FALSE; @@ -3107,7 +3107,7 @@ _twe_thread_wl_surf_source_destroy(void *source) __tpl_list_pop_front(surf_source->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) @@ -3564,7 +3564,7 @@ twe_surface_destroy_swapchain(twe_surface_h twe_surface) __tpl_list_pop_front(surf_source->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) @@ -3761,7 +3761,7 @@ _twe_thread_fence_wait_source_dispatch(GSource *source, GSourceFunc cb, gpointer surf_source->render_done_cnt++; - TRACE_ASYNC_END((int)wait_source, "FENCE WAIT fd(%d)", wait_source->fence_fd); + TRACE_ASYNC_END((intptr_t)wait_source, "FENCE WAIT fd(%d)", wait_source->fence_fd); g_mutex_lock(&surf_source->surf_mutex); /* Since this source is going to be removed, acquire_and_commit must be @@ -3819,7 +3819,7 @@ _twe_thread_fence_wait_source_attach(twe_wl_surf_source *surf_source, return TPL_ERROR_OUT_OF_MEMORY; } - TRACE_ASYNC_BEGIN((int)wait_source, "FENCE WAIT fd(%d)", sync_fd); + TRACE_ASYNC_BEGIN((intptr_t)wait_source, "FENCE WAIT fd(%d)", sync_fd); tbm_surface_internal_ref(tbm_surface); @@ -4008,7 +4008,7 @@ twe_surface_queue_force_flush(twe_surface_h twe_surface) tbm_surface_h tbm_surface = __tpl_list_pop_front(surf_source->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) diff --git a/src/unused/tpl_wl_egl_thread_legacy.c b/src/unused/tpl_wl_egl_thread_legacy.c index bb6a55d..c37ee96 100644 --- a/src/unused/tpl_wl_egl_thread_legacy.c +++ b/src/unused/tpl_wl_egl_thread_legacy.c @@ -529,7 +529,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!wayland_egl_surface) { TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)", surface, wayland_egl_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -537,7 +537,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!tbm_surface_internal_is_valid(tbm_surface)) { TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -564,7 +564,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, TPL_LOG_T("WL_EGL", "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_NONE; } @@ -602,14 +602,14 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d", tbm_surface, surface, tsq_err); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_INVALID_OPERATION; } tbm_surface_internal_unref(tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_NONE; @@ -748,7 +748,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, surface->frontbuffer, tbm_bo_export(tbm_surface_internal_get_bo( surface->frontbuffer, 0))); - TRACE_ASYNC_BEGIN((int)surface->frontbuffer, + TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo( surface->frontbuffer, 0))); @@ -791,7 +791,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wayland_egl_surface->reset = TPL_FALSE; TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)", tbm_surface, bo_name, sync_fence ? *sync_fence : -1); -- 2.7.4 From 373fe0f818b66e9d724b76456c33e1a3c4dd9329 Mon Sep 17 00:00:00 2001 From: Jinbong Date: Mon, 18 Jul 2022 14:46:27 +0900 Subject: [PATCH 02/16] Revert "Remove compile warning about casting to pointer from integer of different size" This reverts commit b1725298fc58f3e2e63da142bf9687a214f38146. --- src/tpl_tbm.c | 4 ++-- src/tpl_wayland_egl.c | 18 +++++++++--------- src/tpl_wl_egl_thread.c | 24 ++++++++++++------------ src/unused/tpl_gbm.c | 6 +++--- src/unused/tpl_wayland_egl_thread.c | 24 ++++++++++++------------ src/unused/tpl_wl_egl_thread_legacy.c | 14 +++++++------- 6 files changed, 45 insertions(+), 45 deletions(-) diff --git a/src/tpl_tbm.c b/src/tpl_tbm.c index 1a66f17..a41187c 100644 --- a/src/tpl_tbm.c +++ b/src/tpl_tbm.c @@ -250,7 +250,7 @@ __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } - TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); tbm_surface_internal_unref(tbm_surface); @@ -320,7 +320,7 @@ __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, surface, tpl_tbm_surface->tbm_queue, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); - TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO(%d)", + TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); return tbm_surface; diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index b8b24da..16fc9f2 100755 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -805,7 +805,7 @@ __tpl_wayland_egl_surface_fini(tpl_surface_t *surface) tbm_surface_queue_error_e tsq_err; tbm_surface_h tbm_surface = __tpl_list_pop_front(wayland_egl_surface->attached_buffers, NULL); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo( tbm_surface, 0))); tbm_surface_internal_unref(tbm_surface); @@ -1018,7 +1018,7 @@ __tpl_wayland_egl_surface_commit(tpl_surface_t *surface, if (tdm_lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->tdm_mutex); } - TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); } @@ -1074,7 +1074,7 @@ __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface, TPL_LOG_B("WL_EGL", "[ENQ_SKIP][F] Client already uses frontbuffer(%p)", surface->frontbuffer); - TRACE_ASYNC_END((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_END((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); return TPL_ERROR_NONE; } @@ -1103,7 +1103,7 @@ __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_OPERATION; } - TRACE_ASYNC_END((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_END((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); TRACE_BEGIN("[WAIT_VBLANK]"); @@ -1333,7 +1333,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface( surface->frontbuffer); if (wayland_egl_buffer) - TRACE_ASYNC_BEGIN((intptr_t)wayland_egl_buffer, + TRACE_ASYNC_BEGIN((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); if (lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->wl_event_mutex); @@ -1361,7 +1361,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou if ((wayland_egl_buffer = __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface(tbm_surface)) != NULL) { TRACE_MARK("[DEQ][REUSED]BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); - TRACE_ASYNC_BEGIN((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); wayland_egl_buffer->dx = wl_egl_window->dx; @@ -1471,7 +1471,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou wayland_egl_buffer); TRACE_MARK("[DEQ][NEW]BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); - TRACE_ASYNC_BEGIN((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); TPL_LOG_B("WL_EGL", "[DEQ][N] tpl_wayland_egl_buffer_t(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -1589,7 +1589,7 @@ __cb_client_buffer_release_callback(void *data, struct wl_proxy *proxy) tbm_surface = (tbm_surface_h) data; - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); TPL_LOG_B("WL_EGL", "[RELEASE_CB] wl_buffer(%p) tbm_surface(%p) bo(%d)", proxy, tbm_surface, @@ -1909,7 +1909,7 @@ static void __cb_tizen_surface_shm_flusher_flush_callback(void *data, tpl_wayland_egl_buffer_t *wayland_egl_buffer = __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface(tbm_surface); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo( tbm_surface, 0))); diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 2ad01c2..b8830a7 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2539,7 +2539,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_LOG_T("WL_EGL", "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", surface->frontbuffer, bo_name); - TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer, + TRACE_ASYNC_BEGIN((int)surface->frontbuffer, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); @@ -2593,7 +2593,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wl_egl_surface->reset = TPL_FALSE; TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name); - TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", wl_egl_buffer->bo_name); TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name, @@ -2665,7 +2665,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!tbm_surface_internal_is_valid(tbm_surface)) { TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", tbm_surface); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); return TPL_ERROR_INVALID_PARAMETER; } @@ -2705,7 +2705,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, !wl_egl_buffer->need_to_commit) { TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); tpl_gmutex_unlock(&wl_egl_buffer->mutex); return TPL_ERROR_NONE; } @@ -2762,7 +2762,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d", tbm_surface, wl_egl_surface, tsq_err); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); return TPL_ERROR_INVALID_OPERATION; } @@ -2772,7 +2772,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); return TPL_ERROR_NONE; } @@ -2924,7 +2924,7 @@ __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data; tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - TRACE_ASYNC_END((intptr_t)wl_egl_surface, "WAIT_VBLANK"); + TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK"); TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface); if (error == TDM_ERROR_TIMEOUT) @@ -2987,7 +2987,7 @@ __cb_buffer_fenced_release(void *data, TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", _get_tbm_surface_bo_name(tbm_surface), fence); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T("WL_EGL", @@ -3038,7 +3038,7 @@ __cb_buffer_immediate_release(void *data, TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T("WL_EGL", @@ -3094,7 +3094,7 @@ __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) wl_egl_buffer->status = RELEASED; TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -3243,7 +3243,7 @@ _thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface) if (tdm_err == TDM_ERROR_NONE) { wl_egl_surface->vblank_done = TPL_FALSE; - TRACE_ASYNC_BEGIN((intptr_t)wl_egl_surface, "WAIT_VBLANK"); + TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK"); } else { TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); return TPL_ERROR_INVALID_OPERATION; @@ -3420,7 +3420,7 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, wl_display_flush(wl_egl_display->wl_display); - TRACE_ASYNC_BEGIN((intptr_t)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", wl_egl_buffer->bo_name); tpl_gmutex_lock(&wl_egl_buffer->mutex); diff --git a/src/unused/tpl_gbm.c b/src/unused/tpl_gbm.c index be5f238..83c9c81 100644 --- a/src/unused/tpl_gbm.c +++ b/src/unused/tpl_gbm.c @@ -396,7 +396,7 @@ __tpl_gbm_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } - TRACE_ASYNC_END((intptr_t)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_END((int)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); TPL_IMAGE_DUMP(tbm_surface, surface->width, surface->height); @@ -521,7 +521,7 @@ __tpl_gbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, gbm_buffer = __tpl_gbm_get_gbm_buffer_from_tbm_surface(tbm_surface); if (gbm_buffer) { TRACE_MARK("[DEQ][REUSED]BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); - TRACE_ASYNC_BEGIN((intptr_t)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((int)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); TPL_LOG_B("GBM", "[DEQ][R] tpl_gbm_surface_t(%p) tbm_surface(%p) bo(%d)", gbm_surface, tbm_surface, tbm_bo_export(gbm_buffer->bo)); @@ -549,7 +549,7 @@ __tpl_gbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, __tpl_gbm_set_gbm_buffer_to_tbm_surface(tbm_surface, gbm_buffer); TRACE_MARK("[DEQ][NEW]BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); - TRACE_ASYNC_BEGIN((intptr_t)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((int)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); TPL_LOG_B("GBM", "[DEQ][N] tpl_gbm_surface_t(%p) tbm_surface(%p) bo(%d)", gbm_surface, tbm_surface, tbm_bo_export(bo)); diff --git a/src/unused/tpl_wayland_egl_thread.c b/src/unused/tpl_wayland_egl_thread.c index ccbc370..0ea5cc4 100755 --- a/src/unused/tpl_wayland_egl_thread.c +++ b/src/unused/tpl_wayland_egl_thread.c @@ -1692,7 +1692,7 @@ __cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer) buf_info->need_to_release = TPL_FALSE; TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T(BACKEND, "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -1742,7 +1742,7 @@ __cb_buffer_fenced_release(void *data, TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", _get_tbm_surface_bo_name(tbm_surface), fence); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T(BACKEND, @@ -1794,7 +1794,7 @@ __cb_buffer_immediate_release(void *data, TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T(BACKEND, @@ -2165,7 +2165,7 @@ __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, return; } - TRACE_ASYNC_END((intptr_t)surf_source, "WAIT_VBLANK"); + TRACE_ASYNC_END((int)surf_source, "WAIT_VBLANK"); if (error == TDM_ERROR_TIMEOUT) TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. surf_source(%p)", @@ -2249,7 +2249,7 @@ _twe_surface_wait_vblank(twe_wl_surf_source *surf_source) if (tdm_err == TDM_ERROR_NONE) { surf_source->vblank_done = TPL_FALSE; - TRACE_ASYNC_BEGIN((intptr_t)surf_source, "WAIT_VBLANK"); + TRACE_ASYNC_BEGIN((int)surf_source, "WAIT_VBLANK"); } else { TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); return TPL_ERROR_INVALID_OPERATION; @@ -2316,7 +2316,7 @@ _twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source, wl_display_flush(surf_source->disp_source->disp); TRACE_MARK("[COMMIT] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); buf_info->sync_timestamp++; @@ -2600,7 +2600,7 @@ _twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source, wl_display_flush(surf_source->disp_source->disp); - TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); buf_info->need_to_commit = TPL_FALSE; @@ -3107,7 +3107,7 @@ _twe_thread_wl_surf_source_destroy(void *source) __tpl_list_pop_front(surf_source->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) @@ -3564,7 +3564,7 @@ twe_surface_destroy_swapchain(twe_surface_h twe_surface) __tpl_list_pop_front(surf_source->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) @@ -3761,7 +3761,7 @@ _twe_thread_fence_wait_source_dispatch(GSource *source, GSourceFunc cb, gpointer surf_source->render_done_cnt++; - TRACE_ASYNC_END((intptr_t)wait_source, "FENCE WAIT fd(%d)", wait_source->fence_fd); + TRACE_ASYNC_END((int)wait_source, "FENCE WAIT fd(%d)", wait_source->fence_fd); g_mutex_lock(&surf_source->surf_mutex); /* Since this source is going to be removed, acquire_and_commit must be @@ -3819,7 +3819,7 @@ _twe_thread_fence_wait_source_attach(twe_wl_surf_source *surf_source, return TPL_ERROR_OUT_OF_MEMORY; } - TRACE_ASYNC_BEGIN((intptr_t)wait_source, "FENCE WAIT fd(%d)", sync_fd); + TRACE_ASYNC_BEGIN((int)wait_source, "FENCE WAIT fd(%d)", sync_fd); tbm_surface_internal_ref(tbm_surface); @@ -4008,7 +4008,7 @@ twe_surface_queue_force_flush(twe_surface_h twe_surface) tbm_surface_h tbm_surface = __tpl_list_pop_front(surf_source->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) diff --git a/src/unused/tpl_wl_egl_thread_legacy.c b/src/unused/tpl_wl_egl_thread_legacy.c index c37ee96..bb6a55d 100644 --- a/src/unused/tpl_wl_egl_thread_legacy.c +++ b/src/unused/tpl_wl_egl_thread_legacy.c @@ -529,7 +529,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!wayland_egl_surface) { TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)", surface, wayland_egl_surface); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -537,7 +537,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!tbm_surface_internal_is_valid(tbm_surface)) { TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", tbm_surface); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -564,7 +564,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, TPL_LOG_T("WL_EGL", "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_NONE; } @@ -602,14 +602,14 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d", tbm_surface, surface, tsq_err); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_INVALID_OPERATION; } tbm_surface_internal_unref(tbm_surface); - TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_NONE; @@ -748,7 +748,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, surface->frontbuffer, tbm_bo_export(tbm_surface_internal_get_bo( surface->frontbuffer, 0))); - TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer, + TRACE_ASYNC_BEGIN((int)surface->frontbuffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo( surface->frontbuffer, 0))); @@ -791,7 +791,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wayland_egl_surface->reset = TPL_FALSE; TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name); - TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)", tbm_surface, bo_name, sync_fence ? *sync_fence : -1); -- 2.7.4 From f4e10888fa9a749af5679d6c02f68be519161515 Mon Sep 17 00:00:00 2001 From: Jinbong Date: Mon, 18 Jul 2022 14:53:58 +0900 Subject: [PATCH 03/16] Fix pointer to int cast warning Change-Id: Id0af40582f900c46745f6c1b8e6864789daeca81 --- src/tpl_tbm.c | 4 ++-- src/tpl_wayland_egl.c | 18 +++++++++--------- src/tpl_wl_egl_thread.c | 24 ++++++++++++------------ src/tpl_wl_vk_thread.c | 4 ++-- src/unused/tpl_gbm.c | 6 +++--- src/unused/tpl_wayland_egl_thread.c | 24 ++++++++++++------------ src/unused/tpl_wl_egl_thread_legacy.c | 14 +++++++------- 7 files changed, 47 insertions(+), 47 deletions(-) diff --git a/src/tpl_tbm.c b/src/tpl_tbm.c index a41187c..1a66f17 100644 --- a/src/tpl_tbm.c +++ b/src/tpl_tbm.c @@ -250,7 +250,7 @@ __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); tbm_surface_internal_unref(tbm_surface); @@ -320,7 +320,7 @@ __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, surface, tpl_tbm_surface->tbm_queue, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO(%d)", + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); return tbm_surface; diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index 16fc9f2..b8b24da 100755 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -805,7 +805,7 @@ __tpl_wayland_egl_surface_fini(tpl_surface_t *surface) tbm_surface_queue_error_e tsq_err; tbm_surface_h tbm_surface = __tpl_list_pop_front(wayland_egl_surface->attached_buffers, NULL); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo( tbm_surface, 0))); tbm_surface_internal_unref(tbm_surface); @@ -1018,7 +1018,7 @@ __tpl_wayland_egl_surface_commit(tpl_surface_t *surface, if (tdm_lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->tdm_mutex); } - TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); } @@ -1074,7 +1074,7 @@ __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface, TPL_LOG_B("WL_EGL", "[ENQ_SKIP][F] Client already uses frontbuffer(%p)", surface->frontbuffer); - TRACE_ASYNC_END((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); return TPL_ERROR_NONE; } @@ -1103,7 +1103,7 @@ __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_OPERATION; } - TRACE_ASYNC_END((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); TRACE_BEGIN("[WAIT_VBLANK]"); @@ -1333,7 +1333,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface( surface->frontbuffer); if (wayland_egl_buffer) - TRACE_ASYNC_BEGIN((int)wayland_egl_buffer, + TRACE_ASYNC_BEGIN((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); if (lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->wl_event_mutex); @@ -1361,7 +1361,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou if ((wayland_egl_buffer = __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface(tbm_surface)) != NULL) { TRACE_MARK("[DEQ][REUSED]BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); - TRACE_ASYNC_BEGIN((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); wayland_egl_buffer->dx = wl_egl_window->dx; @@ -1471,7 +1471,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou wayland_egl_buffer); TRACE_MARK("[DEQ][NEW]BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); - TRACE_ASYNC_BEGIN((int)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)wayland_egl_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(wayland_egl_buffer->bo)); TPL_LOG_B("WL_EGL", "[DEQ][N] tpl_wayland_egl_buffer_t(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -1589,7 +1589,7 @@ __cb_client_buffer_release_callback(void *data, struct wl_proxy *proxy) tbm_surface = (tbm_surface_h) data; - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); TPL_LOG_B("WL_EGL", "[RELEASE_CB] wl_buffer(%p) tbm_surface(%p) bo(%d)", proxy, tbm_surface, @@ -1909,7 +1909,7 @@ static void __cb_tizen_surface_shm_flusher_flush_callback(void *data, tpl_wayland_egl_buffer_t *wayland_egl_buffer = __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface(tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo( tbm_surface, 0))); diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index b8830a7..2ad01c2 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2539,7 +2539,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_LOG_T("WL_EGL", "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", surface->frontbuffer, bo_name); - TRACE_ASYNC_BEGIN((int)surface->frontbuffer, + TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); @@ -2593,7 +2593,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wl_egl_surface->reset = TPL_FALSE; TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", wl_egl_buffer->bo_name); TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name, @@ -2665,7 +2665,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!tbm_surface_internal_is_valid(tbm_surface)) { TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); return TPL_ERROR_INVALID_PARAMETER; } @@ -2705,7 +2705,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, !wl_egl_buffer->need_to_commit) { TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); tpl_gmutex_unlock(&wl_egl_buffer->mutex); return TPL_ERROR_NONE; } @@ -2762,7 +2762,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d", tbm_surface, wl_egl_surface, tsq_err); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); return TPL_ERROR_INVALID_OPERATION; } @@ -2772,7 +2772,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); return TPL_ERROR_NONE; } @@ -2924,7 +2924,7 @@ __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data; tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK"); + TRACE_ASYNC_END((intptr_t)wl_egl_surface, "WAIT_VBLANK"); TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface); if (error == TDM_ERROR_TIMEOUT) @@ -2987,7 +2987,7 @@ __cb_buffer_fenced_release(void *data, TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", _get_tbm_surface_bo_name(tbm_surface), fence); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T("WL_EGL", @@ -3038,7 +3038,7 @@ __cb_buffer_immediate_release(void *data, TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T("WL_EGL", @@ -3094,7 +3094,7 @@ __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) wl_egl_buffer->status = RELEASED; TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -3243,7 +3243,7 @@ _thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface) if (tdm_err == TDM_ERROR_NONE) { wl_egl_surface->vblank_done = TPL_FALSE; - TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK"); + TRACE_ASYNC_BEGIN((intptr_t)wl_egl_surface, "WAIT_VBLANK"); } else { TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); return TPL_ERROR_INVALID_OPERATION; @@ -3420,7 +3420,7 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, wl_display_flush(wl_egl_display->wl_display); - TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_BEGIN((intptr_t)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", wl_egl_buffer->bo_name); tpl_gmutex_lock(&wl_egl_buffer->mutex); diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index f9d6952..4d8fb2c 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -2413,7 +2413,7 @@ __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data; tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; - TRACE_ASYNC_END((int)wl_vk_surface, "WAIT_VBLANK"); + TRACE_ASYNC_END((intptr_t)wl_vk_surface, "WAIT_VBLANK"); TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface); if (error == TDM_ERROR_TIMEOUT) @@ -2454,7 +2454,7 @@ _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface) if (tdm_err == TDM_ERROR_NONE) { wl_vk_surface->vblank_done = TPL_FALSE; - TRACE_ASYNC_BEGIN((int)wl_vk_surface, "WAIT_VBLANK"); + TRACE_ASYNC_BEGIN((intptr_t)wl_vk_surface, "WAIT_VBLANK"); } else { TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); return TPL_ERROR_INVALID_OPERATION; diff --git a/src/unused/tpl_gbm.c b/src/unused/tpl_gbm.c index 83c9c81..be5f238 100644 --- a/src/unused/tpl_gbm.c +++ b/src/unused/tpl_gbm.c @@ -396,7 +396,7 @@ __tpl_gbm_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } - TRACE_ASYNC_END((int)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_END((intptr_t)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); TPL_IMAGE_DUMP(tbm_surface, surface->width, surface->height); @@ -521,7 +521,7 @@ __tpl_gbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, gbm_buffer = __tpl_gbm_get_gbm_buffer_from_tbm_surface(tbm_surface); if (gbm_buffer) { TRACE_MARK("[DEQ][REUSED]BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); - TRACE_ASYNC_BEGIN((int)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); TPL_LOG_B("GBM", "[DEQ][R] tpl_gbm_surface_t(%p) tbm_surface(%p) bo(%d)", gbm_surface, tbm_surface, tbm_bo_export(gbm_buffer->bo)); @@ -549,7 +549,7 @@ __tpl_gbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, __tpl_gbm_set_gbm_buffer_to_tbm_surface(tbm_surface, gbm_buffer); TRACE_MARK("[DEQ][NEW]BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); - TRACE_ASYNC_BEGIN((int)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", + TRACE_ASYNC_BEGIN((intptr_t)gbm_buffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(gbm_buffer->bo)); TPL_LOG_B("GBM", "[DEQ][N] tpl_gbm_surface_t(%p) tbm_surface(%p) bo(%d)", gbm_surface, tbm_surface, tbm_bo_export(bo)); diff --git a/src/unused/tpl_wayland_egl_thread.c b/src/unused/tpl_wayland_egl_thread.c index 0ea5cc4..ccbc370 100755 --- a/src/unused/tpl_wayland_egl_thread.c +++ b/src/unused/tpl_wayland_egl_thread.c @@ -1692,7 +1692,7 @@ __cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer) buf_info->need_to_release = TPL_FALSE; TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T(BACKEND, "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -1742,7 +1742,7 @@ __cb_buffer_fenced_release(void *data, TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", _get_tbm_surface_bo_name(tbm_surface), fence); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T(BACKEND, @@ -1794,7 +1794,7 @@ __cb_buffer_immediate_release(void *data, TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TPL_LOG_T(BACKEND, @@ -2165,7 +2165,7 @@ __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, return; } - TRACE_ASYNC_END((int)surf_source, "WAIT_VBLANK"); + TRACE_ASYNC_END((intptr_t)surf_source, "WAIT_VBLANK"); if (error == TDM_ERROR_TIMEOUT) TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. surf_source(%p)", @@ -2249,7 +2249,7 @@ _twe_surface_wait_vblank(twe_wl_surf_source *surf_source) if (tdm_err == TDM_ERROR_NONE) { surf_source->vblank_done = TPL_FALSE; - TRACE_ASYNC_BEGIN((int)surf_source, "WAIT_VBLANK"); + TRACE_ASYNC_BEGIN((intptr_t)surf_source, "WAIT_VBLANK"); } else { TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); return TPL_ERROR_INVALID_OPERATION; @@ -2316,7 +2316,7 @@ _twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source, wl_display_flush(surf_source->disp_source->disp); TRACE_MARK("[COMMIT] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); buf_info->sync_timestamp++; @@ -2600,7 +2600,7 @@ _twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source, wl_display_flush(surf_source->disp_source->disp); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); buf_info->need_to_commit = TPL_FALSE; @@ -3107,7 +3107,7 @@ _twe_thread_wl_surf_source_destroy(void *source) __tpl_list_pop_front(surf_source->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) @@ -3564,7 +3564,7 @@ twe_surface_destroy_swapchain(twe_surface_h twe_surface) __tpl_list_pop_front(surf_source->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) @@ -3761,7 +3761,7 @@ _twe_thread_fence_wait_source_dispatch(GSource *source, GSourceFunc cb, gpointer surf_source->render_done_cnt++; - TRACE_ASYNC_END((int)wait_source, "FENCE WAIT fd(%d)", wait_source->fence_fd); + TRACE_ASYNC_END((intptr_t)wait_source, "FENCE WAIT fd(%d)", wait_source->fence_fd); g_mutex_lock(&surf_source->surf_mutex); /* Since this source is going to be removed, acquire_and_commit must be @@ -3819,7 +3819,7 @@ _twe_thread_fence_wait_source_attach(twe_wl_surf_source *surf_source, return TPL_ERROR_OUT_OF_MEMORY; } - TRACE_ASYNC_BEGIN((int)wait_source, "FENCE WAIT fd(%d)", sync_fd); + TRACE_ASYNC_BEGIN((intptr_t)wait_source, "FENCE WAIT fd(%d)", sync_fd); tbm_surface_internal_ref(tbm_surface); @@ -4008,7 +4008,7 @@ twe_surface_queue_force_flush(twe_surface_h twe_surface) tbm_surface_h tbm_surface = __tpl_list_pop_front(surf_source->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) diff --git a/src/unused/tpl_wl_egl_thread_legacy.c b/src/unused/tpl_wl_egl_thread_legacy.c index bb6a55d..c37ee96 100644 --- a/src/unused/tpl_wl_egl_thread_legacy.c +++ b/src/unused/tpl_wl_egl_thread_legacy.c @@ -529,7 +529,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!wayland_egl_surface) { TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)", surface, wayland_egl_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -537,7 +537,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!tbm_surface_internal_is_valid(tbm_surface)) { TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -564,7 +564,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, TPL_LOG_T("WL_EGL", "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_NONE; } @@ -602,14 +602,14 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d", tbm_surface, surface, tsq_err); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_INVALID_OPERATION; } tbm_surface_internal_unref(tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_OBJECT_UNLOCK(wayland_egl_surface); return TPL_ERROR_NONE; @@ -748,7 +748,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, surface->frontbuffer, tbm_bo_export(tbm_surface_internal_get_bo( surface->frontbuffer, 0))); - TRACE_ASYNC_BEGIN((int)surface->frontbuffer, + TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer, "[DEQ]~[ENQ] BO_NAME:%d", tbm_bo_export(tbm_surface_internal_get_bo( surface->frontbuffer, 0))); @@ -791,7 +791,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wayland_egl_surface->reset = TPL_FALSE; TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)", tbm_surface, bo_name, sync_fence ? *sync_fence : -1); -- 2.7.4 From 3aafd1eddac8923a8adbd462f263862776045fb2 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 9 Jun 2022 18:43:49 +0900 Subject: [PATCH 04/16] Added internal function to check buffer is validate - It can be modified flexibly. - For now, this function can check if given tbm_surface_h is managed by wl_egl_surface. Change-Id: Ied59f583666a5f18f15537be6507c83c5277a866 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 2ad01c2..e94fe16 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -252,6 +252,8 @@ static int _get_tbm_surface_bo_name(tbm_surface_h tbm_surface); static void _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface); +static tpl_bool_t +_check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface); static void __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer); static tpl_wl_egl_buffer_t * @@ -3639,3 +3641,31 @@ _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) } tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); } + +static tpl_bool_t +_check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface) +{ + int idx = 0; + tpl_bool_t ret = TPL_FALSE; + + /* silent return */ + if (!wl_egl_surface || !tbm_surface) + return ret; + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) { + tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx]; + if (wl_egl_buffer && wl_egl_buffer->tbm_surface == tbm_surface) { + ret = TPL_TRUE; + break; + } + } + + if (ret == TPL_FALSE || idx == BUFFER_ARRAY_SIZE) { + TPL_ERR("tbm_surface(%p) is not owned by wl_egl_surface(%p)", + tbm_surface, wl_egl_surface); + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + + return ret; +} -- 2.7.4 From 39609145b0e6ac9c44de22b5f3bd14e40be5f69c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 9 Jun 2022 19:04:11 +0900 Subject: [PATCH 05/16] Fixed to prevent page fault via invalid address. - If the last_enq_buffer of wl_egl_surface has a wrong pointer address, page fault may occur inside the tbm_surface_internal_is_valid of libtbm. - To prevent this problem, it is modified to check in advance from the list of buffers of wl_egl_surface. Change-Id: I459b182e9ed435ce93a3a862251869fb9c7829ad Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 54 +++++++++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index e94fe16..d4efca0 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2445,35 +2445,37 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_OBJECT_UNLOCK(surface); tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - if (wl_egl_surface->reset == TPL_TRUE && - tbm_surface_internal_is_valid(wl_egl_surface->last_enq_buffer)) { - tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer; - tpl_wl_egl_buffer_t *enqueued_buffer = - _get_wl_egl_buffer(last_enq_buffer); - - if (enqueued_buffer) { - tbm_surface_internal_ref(last_enq_buffer); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - tpl_gmutex_lock(&enqueued_buffer->mutex); - while (enqueued_buffer->status >= ENQUEUED && - enqueued_buffer->status < COMMITTED) { - tpl_result_t wait_result; - TPL_INFO("[DEQ_AFTER_RESET]", - "waiting for previous wl_egl_buffer(%p) commit", - enqueued_buffer); - - wait_result = tpl_cond_timed_wait(&enqueued_buffer->cond, - &enqueued_buffer->mutex, - 200); /* 200ms */ - if (wait_result == TPL_ERROR_TIME_OUT) { - TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", + if (wl_egl_surface->reset == TPL_TRUE) { + if (_check_buffer_validate(wl_egl_surface, wl_egl_surface->last_enq_buffer) && + tbm_surface_internal_is_valid(wl_egl_surface->last_enq_buffer)) { + tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer; + tpl_wl_egl_buffer_t *enqueued_buffer = + _get_wl_egl_buffer(last_enq_buffer); + + if (enqueued_buffer) { + tbm_surface_internal_ref(last_enq_buffer); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + tpl_gmutex_lock(&enqueued_buffer->mutex); + while (enqueued_buffer->status >= ENQUEUED && + enqueued_buffer->status < COMMITTED) { + tpl_result_t wait_result; + TPL_INFO("[DEQ_AFTER_RESET]", + "waiting for previous wl_egl_buffer(%p) commit", enqueued_buffer); - break; + + wait_result = tpl_cond_timed_wait(&enqueued_buffer->cond, + &enqueued_buffer->mutex, + 200); /* 200ms */ + if (wait_result == TPL_ERROR_TIME_OUT) { + TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", + enqueued_buffer); + break; + } } + tpl_gmutex_unlock(&enqueued_buffer->mutex); + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + tbm_surface_internal_unref(last_enq_buffer); } - tpl_gmutex_unlock(&enqueued_buffer->mutex); - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - tbm_surface_internal_unref(last_enq_buffer); } wl_egl_surface->last_enq_buffer = NULL; -- 2.7.4 From 7043c50993c996ba17df4f3cf21436c9d37a9a79 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 9 Jun 2022 19:13:37 +0900 Subject: [PATCH 06/16] Package version up to 1.9.7 Change-Id: I9a15a0ff3f4accd438af5fe8abd89d71fc1275fa Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index 0b98317..cb1653c 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 9 -%define TPL_VERSION_PATCH 6 +%define TPL_VERSION_PATCH 7 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 0724b49458ea5b17330fae3f3aeff67401641c81 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 28 Sep 2022 20:26:57 +0900 Subject: [PATCH 07/16] Delete g_cond_wait from tpl_gsource_destroy. MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit - g_cond_wait does not guarantee perfectly. Therefore, it is recommended that the caller of tpl_gsource_destroy should call the g_cond_wait to confirm if the destroy is actually complete. - https://docs.gtk.org/glib/method.Cond.wait.html Atomically releases mutex and waits until cond is signalled. When this function returns, mutex is locked again and owned by the calling thread. When using condition variables, it is possible that a spurious wakeup may occur (ie: g_cond_wait() returns even though g_cond_signal() was not called). It’s also possible that a stolen wakeup may occur. This is when g_cond_signal() is called, but another thread acquires mutex before this thread and modifies the state of the program in such a way that when g_cond_wait() is able to return, the expected condition is no longer met. For this reason, g_cond_wait() must always be used in a loop. See the documentation for GCond for a complete example. Change-Id: If3b98b4d79b205d9125558edb75f4b85ef6a3a99 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 2 -- src/tpl_utils_gthread.h | 5 +++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 3352a0d..20b6838 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -329,8 +329,6 @@ tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread) if (destroy_in_thread) { finalizer->intended_destroy = TPL_TRUE; tpl_gsource_send_message(finalizer, 1); - - g_cond_wait(&thread->thread_cond, &thread->thread_mutex); } else { __gsource_remove_and_destroy(finalizer); source->finalizer = NULL; diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index a1d4ce1..0237f40 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -65,6 +65,11 @@ tpl_gthread_destroy(tpl_gthread *thread); * * This creates a new tpl_gsource to be attached the thread loop. * + * IMPORTANT + * - If destroy_in_thread is TPL_TRUE, since this function does not use + * g_cond_wait(), caller should call tpl_gcond_wait() or tpl_gcond_timed_wait() + * to confirm gsource destroy completely. + * * @param thread Pointer to tpl_gthread to attach new tpl_gsource. * @param data Pointer to some handle used by its user. * @param fd fd to poll. If the value is more than 0, the passed value will be polled. -- 2.7.4 From e6f7aa1cb1942797a572c2e705c5027a907aed38 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 27 Sep 2022 20:41:14 +0900 Subject: [PATCH 08/16] Modified the codes related to call tpl_gcond_wait. - g_cond_wait is sometimes awakened by unknown or stolen signal. In such cases, unexpected problems may arise. To prevent these problems, each tpl_gsource has tpl_gmutex and tpl_gcond, and modified to try tpl_gcond_wait() until gsource_finalized flag becomes true. - It may be modified with better way. Change-Id: I1360c0a3888186ba0309fe4d94c5be8e29c6f1b8 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 128 +++++++++++++++++++++++++----- src/tpl_wl_vk_thread.c | 207 ++++++++++++++++++++++++++++++++++++------------ 2 files changed, 266 insertions(+), 69 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index d4efca0..9fd1e8a 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -60,13 +60,21 @@ struct _tpl_wl_egl_display { tpl_bool_t use_explicit_sync; tpl_bool_t use_tss; tpl_bool_t prepared; - + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex disp_mutex; + tpl_gcond disp_cond; struct { tdm_client *tdm_client; tpl_gsource *tdm_source; int tdm_display_fd; tpl_bool_t tdm_initialized; tpl_list_t *surface_vblanks; + + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex tdm_mutex; + tpl_gcond tdm_cond; } tdm; #if TIZEN_FEATURE_ENABLE @@ -143,6 +151,10 @@ struct _tpl_wl_egl_surface { tpl_bool_t prerotation_capability; tpl_bool_t vblank_done; tpl_bool_t set_serial_is_used; + tpl_bool_t initialized_in_thread; + + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; }; struct _surface_vblank { @@ -363,9 +375,12 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - TPL_LOG_T("WL_EGL", - "tdm_destroy| wl_egl_display(%p) tdm_client(%p) tpl_gsource(%p)", - wl_egl_display, wl_egl_display->tdm.tdm_client, gsource); + tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + + TPL_INFO("[TDM_CLIENT_FINI]", + "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_egl_display, wl_egl_display->tdm.tdm_client, + wl_egl_display->tdm.tdm_display_fd); if (wl_egl_display->tdm.tdm_client) { @@ -383,6 +398,10 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_egl_display->use_wait_vblank = TPL_FALSE; wl_egl_display->tdm.tdm_initialized = TPL_FALSE; + wl_egl_display->tdm.gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_egl_display->tdm.tdm_cond); + tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); } static tpl_gsource_functions tdm_funcs = { @@ -809,11 +828,17 @@ __thread_func_disp_finalize(tpl_gsource *gsource) tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + TPL_DEBUG("[FINALIZE] wl_egl_display(%p) tpl_gsource(%p)", + wl_egl_display, gsource); + if (wl_egl_display->wl_initialized) _thread_wl_display_fini(wl_egl_display); - TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)", - wl_egl_display, gsource); + wl_egl_display->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_egl_display->disp_cond); + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); return; } @@ -867,6 +892,7 @@ __tpl_wl_egl_display_init(tpl_display_t *display) wl_egl_display->use_tss = TPL_FALSE; wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled wl_egl_display->prepared = TPL_FALSE; + wl_egl_display->gsource_finalized = TPL_FALSE; #if TIZEN_FEATURE_ENABLE /* Wayland Interfaces */ @@ -886,6 +912,9 @@ __tpl_wl_egl_display_init(tpl_display_t *display) tpl_gmutex_init(&wl_egl_display->wl_event_mutex); + tpl_gmutex_init(&wl_egl_display->disp_mutex); + tpl_gcond_init(&wl_egl_display->disp_cond); + /* Create gthread */ wl_egl_display->thread = tpl_gthread_create("wl_egl_thread", (tpl_gthread_func)_thread_init, @@ -908,10 +937,13 @@ __tpl_wl_egl_display_init(tpl_display_t *display) if (wl_egl_display->use_wait_vblank && wl_egl_display->tdm.tdm_initialized) { + tpl_gmutex_init(&wl_egl_display->tdm.tdm_mutex); + tpl_gcond_init(&wl_egl_display->tdm.tdm_cond); wl_egl_display->tdm.tdm_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_display, wl_egl_display->tdm.tdm_display_fd, &tdm_funcs, SOURCE_TYPE_NORMAL); + wl_egl_display->tdm.gsource_finalized = TPL_FALSE; if (!wl_egl_display->tdm.tdm_source) { TPL_ERR("Failed to create tdm_gsource\n"); goto free_display; @@ -936,15 +968,33 @@ __tpl_wl_egl_display_init(tpl_display_t *display) return TPL_ERROR_NONE; free_display: - if (wl_egl_display->thread) { - if (wl_egl_display->tdm.tdm_source) + if (wl_egl_display->tdm.tdm_source) { + tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + while (!wl_egl_display->tdm.gsource_finalized) { tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); - if (wl_egl_display->disp_source) + tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex); + } + tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); + } + + if (wl_egl_display->disp_source) { + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + while (!wl_egl_display->gsource_finalized) { tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); + tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); + } + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); + } + if (wl_egl_display->thread) { tpl_gthread_destroy(wl_egl_display->thread); } + tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_egl_display->disp_cond); + tpl_gmutex_clear(&wl_egl_display->disp_mutex); + wl_egl_display->thread = NULL; free(wl_egl_display); @@ -968,20 +1018,43 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) wl_egl_display->wl_display); if (wl_egl_display->tdm.tdm_source && wl_egl_display->tdm.tdm_initialized) { - tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + while (!wl_egl_display->tdm.gsource_finalized) { + tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); + tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex); + } wl_egl_display->tdm.tdm_source = NULL; + tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); } - if (wl_egl_display->disp_source) { + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + while (wl_egl_display->disp_source && !wl_egl_display->gsource_finalized) { tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); - wl_egl_display->disp_source = NULL; + tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); } + wl_egl_display->disp_source = NULL; + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); if (wl_egl_display->thread) { tpl_gthread_destroy(wl_egl_display->thread); wl_egl_display->thread = NULL; } + tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_egl_display->disp_cond); + tpl_gmutex_clear(&wl_egl_display->disp_mutex); + tpl_gmutex_clear(&wl_egl_display->wl_event_mutex); free(wl_egl_display); @@ -1495,8 +1568,6 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) { tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - TPL_INFO("[SURFACE_FINI]", "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", wl_egl_surface, wl_egl_surface->wl_egl_window, @@ -1572,8 +1643,6 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) __cb_surface_vblank_free); wl_egl_surface->vblank = NULL; } - - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } static tpl_bool_t @@ -1588,6 +1657,7 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) TPL_DEBUG("wl_egl_surface(%p) initialize message received!", wl_egl_surface); _thread_wl_egl_surface_init(wl_egl_surface); + wl_egl_surface->initialized_in_thread = TPL_TRUE; tpl_gcond_signal(&wl_egl_surface->surf_cond); } else if (message == ACQUIRABLE) { /* Acquirable */ TPL_DEBUG("wl_egl_surface(%p) acquirable message received!", @@ -1610,10 +1680,16 @@ __thread_func_surf_finalize(tpl_gsource *gsource) wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource); TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); - _thread_wl_egl_surface_fini(wl_egl_surface); - + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)", wl_egl_surface, gsource); + + _thread_wl_egl_surface_fini(wl_egl_surface); + + wl_egl_surface->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_egl_surface->surf_cond); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } static tpl_gsource_functions surf_funcs = { @@ -1685,6 +1761,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->vblank_done = TPL_TRUE; wl_egl_surface->use_render_done_fence = TPL_FALSE; wl_egl_surface->set_serial_is_used = TPL_FALSE; + wl_egl_surface->gsource_finalized = TPL_FALSE; + wl_egl_surface->initialized_in_thread = TPL_FALSE; wl_egl_surface->latest_transform = -1; wl_egl_surface->render_done_cnt = 0; @@ -1754,7 +1832,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->sent_message = INIT_SURFACE; tpl_gsource_send_message(wl_egl_surface->surf_source, wl_egl_surface->sent_message); - tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + while (!wl_egl_surface->initialized_in_thread) + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); TPL_ASSERT(wl_egl_surface->tbm_queue); @@ -2113,9 +2192,18 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) _tpl_wl_egl_surface_buffer_clear(wl_egl_surface); - if (wl_egl_surface->surf_source) + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + while (wl_egl_surface->surf_source && !wl_egl_surface->gsource_finalized) { tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + } wl_egl_surface->surf_source = NULL; + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); _print_buffer_lists(wl_egl_surface); diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 4d8fb2c..7e0e621 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -52,16 +52,27 @@ struct _tpl_wl_vk_display { int last_error; /* errno of the last wl_display error*/ tpl_bool_t wl_initialized; - tpl_bool_t tdm_initialized; - tdm_client *tdm_client; - tpl_gsource *tdm_source; - int tdm_display_fd; + struct { + tdm_client *tdm_client; + tpl_gsource *tdm_source; + int tdm_display_fd; + tpl_bool_t tdm_initialized; + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex tdm_mutex; + tpl_gcond tdm_cond; + } tdm; tpl_bool_t use_wait_vblank; tpl_bool_t use_explicit_sync; tpl_bool_t prepared; + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex disp_mutex; + tpl_gcond disp_cond; + /* device surface capabilities */ int min_buffer; int max_buffer; @@ -75,6 +86,9 @@ struct _tpl_wl_vk_swapchain { tpl_wl_vk_surface_t *wl_vk_surface; tbm_surface_queue_h tbm_queue; + tpl_result_t result; + + tpl_bool_t create_done; struct { int width; @@ -128,6 +142,10 @@ struct _tpl_wl_vk_surface { tpl_bool_t is_activated; tpl_bool_t reset; /* TRUE if queue reseted by external */ tpl_bool_t vblank_done; + tpl_bool_t initialized_in_thread; + + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; surf_message sent_message; @@ -250,7 +268,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) return TPL_FALSE; } - tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client); + tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client); /* If an error occurs in tdm_client_handle_events, it cannot be recovered. * When tdm_source is no longer available due to an unexpected situation, @@ -264,7 +282,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gsource_destroy(gsource, TPL_FALSE); - wl_vk_display->tdm_source = NULL; + wl_vk_display->tdm.tdm_source = NULL; return TPL_FALSE; } @@ -279,17 +297,24 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); - TPL_LOG_T("WL_VK", - "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)", - wl_vk_display, wl_vk_display->tdm_client, gsource); + tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex); + + TPL_INFO("[TDM_CLIENT_FINI]", + "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_vk_display, wl_vk_display->tdm.tdm_client, + wl_vk_display->tdm.tdm_display_fd); - if (wl_vk_display->tdm_client) { - tdm_client_destroy(wl_vk_display->tdm_client); - wl_vk_display->tdm_client = NULL; - wl_vk_display->tdm_display_fd = -1; + if (wl_vk_display->tdm.tdm_client) { + tdm_client_destroy(wl_vk_display->tdm.tdm_client); + wl_vk_display->tdm.tdm_client = NULL; + wl_vk_display->tdm.tdm_display_fd = -1; } - wl_vk_display->tdm_initialized = TPL_FALSE; + wl_vk_display->tdm.tdm_initialized = TPL_FALSE; + wl_vk_display->tdm.gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond); + tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex); } static tpl_gsource_functions tdm_funcs = { @@ -319,10 +344,10 @@ _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display) return TPL_ERROR_INVALID_OPERATION; } - wl_vk_display->tdm_display_fd = tdm_display_fd; - wl_vk_display->tdm_client = tdm_client; - wl_vk_display->tdm_source = NULL; - wl_vk_display->tdm_initialized = TPL_TRUE; + wl_vk_display->tdm.tdm_display_fd = tdm_display_fd; + wl_vk_display->tdm.tdm_client = tdm_client; + wl_vk_display->tdm.tdm_source = NULL; + wl_vk_display->tdm.tdm_initialized = TPL_TRUE; TPL_INFO("[TDM_CLIENT_INIT]", "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)", @@ -668,11 +693,17 @@ __thread_func_disp_finalize(tpl_gsource *gsource) tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + tpl_gmutex_lock(&wl_vk_display->disp_mutex); + TPL_DEBUG("[FINALIZE] wl_vk_display(%p) tpl_gsource(%p)", + wl_vk_display, gsource); + if (wl_vk_display->wl_initialized) _thread_wl_display_fini(wl_vk_display); - TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)", - wl_vk_display, gsource); + wl_vk_display->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_vk_display->disp_cond); + tpl_gmutex_unlock(&wl_vk_display->disp_mutex); return; } @@ -713,7 +744,7 @@ __tpl_wl_vk_display_init(tpl_display_t *display) display->backend.data = wl_vk_display; display->bufmgr_fd = -1; - wl_vk_display->tdm_initialized = TPL_FALSE; + wl_vk_display->tdm.tdm_initialized = TPL_FALSE; wl_vk_display->wl_initialized = TPL_FALSE; wl_vk_display->ev_queue = NULL; @@ -743,6 +774,9 @@ __tpl_wl_vk_display_init(tpl_display_t *display) tpl_gmutex_init(&wl_vk_display->wl_event_mutex); + tpl_gmutex_init(&wl_vk_display->disp_mutex); + tpl_gcond_init(&wl_vk_display->disp_cond); + /* Create gthread */ wl_vk_display->thread = tpl_gthread_create("wl_vk_thread", (tpl_gthread_func)_thread_init, @@ -763,11 +797,14 @@ __tpl_wl_vk_display_init(tpl_display_t *display) goto free_display; } - wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread, + tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex); + tpl_gcond_init(&wl_vk_display->tdm.tdm_cond); + + wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_display, - wl_vk_display->tdm_display_fd, + wl_vk_display->tdm.tdm_display_fd, &tdm_funcs, SOURCE_TYPE_NORMAL); - if (!wl_vk_display->tdm_source) { + if (!wl_vk_display->tdm.tdm_source) { TPL_ERR("Failed to create tdm_gsource\n"); goto free_display; } @@ -786,15 +823,33 @@ __tpl_wl_vk_display_init(tpl_display_t *display) return TPL_ERROR_NONE; free_display: - if (wl_vk_display->thread) { - if (wl_vk_display->tdm_source) - tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); - if (wl_vk_display->disp_source) + if (wl_vk_display->tdm.tdm_source) { + tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex); + while (!wl_vk_display->tdm.gsource_finalized) { + tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE); + tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex); + } + tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex); + } + + if (wl_vk_display->disp_source) { + tpl_gmutex_lock(&wl_vk_display->disp_mutex); + while (!wl_vk_display->gsource_finalized) { tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); + tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex); + } + tpl_gmutex_unlock(&wl_vk_display->disp_mutex); + } + if (wl_vk_display->thread) { tpl_gthread_destroy(wl_vk_display->thread); } + tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_vk_display->disp_cond); + tpl_gmutex_clear(&wl_vk_display->disp_mutex); + wl_vk_display->thread = NULL; free(wl_vk_display); @@ -817,21 +872,44 @@ __tpl_wl_vk_display_fini(tpl_display_t *display) wl_vk_display->thread, wl_vk_display->wl_display); - if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) { - tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); - wl_vk_display->tdm_source = NULL; + if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) { + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex); + while (!wl_vk_display->tdm.gsource_finalized) { + tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE); + tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex); + } + wl_vk_display->tdm.tdm_source = NULL; + tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex); } - if (wl_vk_display->disp_source) { + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_vk_display->disp_mutex); + while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) { tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); - wl_vk_display->disp_source = NULL; + tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex); } + wl_vk_display->disp_source = NULL; + tpl_gmutex_unlock(&wl_vk_display->disp_mutex); if (wl_vk_display->thread) { tpl_gthread_destroy(wl_vk_display->thread); wl_vk_display->thread = NULL; } + tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_vk_display->disp_cond); + tpl_gmutex_clear(&wl_vk_display->disp_mutex); + tpl_gmutex_clear(&wl_vk_display->wl_event_mutex); free(wl_vk_display); @@ -1062,11 +1140,11 @@ _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface) /* tbm_surface_queue will be created at swapchain_create */ wl_vk_surface->vblank = _thread_create_tdm_client_vblank( - wl_vk_display->tdm_client); + wl_vk_display->tdm.tdm_client); if (wl_vk_surface->vblank) { TPL_INFO("[VBLANK_INIT]", "wl_vk_surface(%p) tdm_client(%p) vblank(%p)", - wl_vk_surface, wl_vk_display->tdm_client, + wl_vk_surface, wl_vk_display->tdm.tdm_client, wl_vk_surface->vblank); } @@ -1092,8 +1170,6 @@ _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface) static void _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface) { - tpl_gmutex_lock(&wl_vk_surface->surf_mutex); - TPL_INFO("[SURFACE_FINI]", "wl_vk_surface(%p) wl_surface(%p)", wl_vk_surface, wl_vk_surface->wl_surface); @@ -1120,8 +1196,6 @@ _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface) tdm_client_vblank_destroy(wl_vk_surface->vblank); wl_vk_surface->vblank = NULL; } - - tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } static tpl_bool_t @@ -1177,10 +1251,16 @@ __thread_func_surf_finalize(tpl_gsource *gsource) wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource); TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); - _thread_wl_vk_surface_fini(wl_vk_surface); - + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)", wl_vk_surface, gsource); + + _thread_wl_vk_surface_fini(wl_vk_surface); + + wl_vk_surface->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } static tpl_gsource_functions surf_funcs = { @@ -1237,6 +1317,7 @@ __tpl_wl_vk_surface_init(tpl_surface_t *surface) wl_vk_surface->reset = TPL_FALSE; wl_vk_surface->is_activated = TPL_FALSE; wl_vk_surface->vblank_done = TPL_TRUE; + wl_vk_surface->initialized_in_thread = TPL_FALSE; wl_vk_surface->render_done_cnt = 0; @@ -1266,7 +1347,8 @@ __tpl_wl_vk_surface_init(tpl_surface_t *surface) wl_vk_surface->sent_message = INIT_SURFACE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + while (!wl_vk_surface->initialized_in_thread) + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); TPL_INFO("[SURFACE_INIT]", @@ -1303,9 +1385,18 @@ __tpl_wl_vk_surface_fini(tpl_surface_t *surface) wl_vk_surface->swapchain = NULL; - if (wl_vk_surface->surf_source) + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) { tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE); - wl_vk_surface->surf_source = NULL; + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + } + wl_vk_surface->surf_source = NULL; + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); _print_buffer_lists(wl_vk_surface); @@ -1450,6 +1541,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("buffer count(%d) must be higher than (%d)", swapchain->properties.buffer_count, wl_vk_display->min_buffer); + swapchain->result = TPL_ERROR_INVALID_PARAMETER; return TPL_ERROR_INVALID_PARAMETER; } @@ -1457,12 +1549,14 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("buffer count(%d) must be lower than (%d)", swapchain->properties.buffer_count, wl_vk_display->max_buffer); + swapchain->result = TPL_ERROR_INVALID_PARAMETER; return TPL_ERROR_INVALID_PARAMETER; } if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) { TPL_ERR("Unsupported present_mode(%d)", swapchain->properties.present_mode); + swapchain->result = TPL_ERROR_INVALID_PARAMETER; return TPL_ERROR_INVALID_PARAMETER; } @@ -1504,6 +1598,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) wl_vk_surface->reset = TPL_FALSE; __tpl_util_atomic_inc(&swapchain->ref_cnt); + swapchain->create_done = TPL_TRUE; TPL_INFO("[SWAPCHAIN_REUSE]", "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)", @@ -1538,6 +1633,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) if (!tbm_queue) { TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)", wl_vk_surface); + swapchain->result = TPL_ERROR_OUT_OF_MEMORY; return TPL_ERROR_OUT_OF_MEMORY; } @@ -1547,6 +1643,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)", tbm_queue); tbm_surface_queue_destroy(tbm_queue); + swapchain->result = TPL_ERROR_INVALID_OPERATION; return TPL_ERROR_INVALID_OPERATION; } @@ -1557,6 +1654,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", tbm_queue); tbm_surface_queue_destroy(tbm_queue); + swapchain->result = TPL_ERROR_INVALID_OPERATION; return TPL_ERROR_INVALID_OPERATION; } @@ -1567,10 +1665,12 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", tbm_queue); tbm_surface_queue_destroy(tbm_queue); + swapchain->result = TPL_ERROR_INVALID_OPERATION; return TPL_ERROR_INVALID_OPERATION; } swapchain->tbm_queue = tbm_queue; + swapchain->create_done = TPL_TRUE; TPL_INFO("[TBM_QUEUE_CREATED]", "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)", @@ -1614,6 +1714,9 @@ __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface, swapchain->wl_vk_surface = wl_vk_surface; swapchain->properties.format = format; + swapchain->result = TPL_ERROR_NONE; + swapchain->create_done = TPL_FALSE; + wl_vk_surface->swapchain = swapchain; __tpl_util_atomic_set(&swapchain->ref_cnt, 1); @@ -1623,17 +1726,16 @@ __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface, wl_vk_surface->sent_message = CREATE_QUEUE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE) + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); TPL_CHECK_ON_FALSE_ASSERT_FAIL( swapchain->tbm_queue != NULL, "[CRITICAL FAIL] Failed to create tbm_surface_queue"); - wl_vk_surface->reset = TPL_FALSE; - return TPL_ERROR_NONE; } @@ -1678,6 +1780,12 @@ __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface) return TPL_ERROR_INVALID_OPERATION; } + if (!swapchain->tbm_queue) { + TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.", + wl_vk_surface, wl_vk_surface->swapchain); + return TPL_ERROR_INVALID_OPERATION; + } + if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) { TPL_INFO("[DESTROY_SWAPCHAIN]", "wl_vk_surface(%p) swapchain(%p) still valid.", @@ -1709,7 +1817,8 @@ __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface) wl_vk_surface->sent_message = DESTROY_QUEUE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + while (!swapchain->tbm_queue) + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); _print_buffer_lists(wl_vk_surface); @@ -2439,7 +2548,7 @@ _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface) if (wl_vk_surface->vblank == NULL) { wl_vk_surface->vblank = - _thread_create_tdm_client_vblank(wl_vk_display->tdm_client); + _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client); if (!wl_vk_surface->vblank) { TPL_WARN("Failed to create vblank. wl_vk_surface(%p)", wl_vk_surface); -- 2.7.4 From 62a630689eb0d4dd6974311f5aa024f1c89f54e7 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 29 Sep 2022 20:45:42 +0900 Subject: [PATCH 09/16] Changed the function name properly. tpl_cond_timed_wait -> tpl_gcond_timed_wait Change-Id: Iebe13b6aa7598d652cf3b5968dbc7b2bb96a44e0 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 2 +- src/tpl_utils_gthread.h | 2 +- src/tpl_wl_egl_thread.c | 4 ++-- src/tpl_wl_vk_thread.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 20b6838..20bab7f 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -431,7 +431,7 @@ tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex) } tpl_result_t -tpl_cond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, +tpl_gcond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, int64_t timeout_ms) { gint64 end_time = g_get_monotonic_time() + diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 0237f40..8185cab 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -191,7 +191,7 @@ tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex); * @return tpl_result_t TPL_ERROR_NONE or TPL_ERROR_TIME_OUT */ tpl_result_t -tpl_cond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, int64_t timeout_ms); +tpl_gcond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, int64_t timeout_ms); /** * wrapping g_cond_signal() diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 9fd1e8a..f6c0375 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2117,7 +2117,7 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) if (need_to_wait) { tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond, + wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond, &wl_egl_buffer->mutex, 200); /* 200ms */ tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); @@ -2551,7 +2551,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, "waiting for previous wl_egl_buffer(%p) commit", enqueued_buffer); - wait_result = tpl_cond_timed_wait(&enqueued_buffer->cond, + wait_result = tpl_gcond_timed_wait(&enqueued_buffer->cond, &enqueued_buffer->mutex, 200); /* 200ms */ if (wait_result == TPL_ERROR_TIME_OUT) { diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 7e0e621..884e7c7 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -1049,7 +1049,7 @@ _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface) if (need_to_wait) { tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); - wait_result = tpl_cond_timed_wait(&wl_vk_buffer->cond, + wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond, &wl_vk_buffer->mutex, 16); /* 16ms */ tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); -- 2.7.4 From 7dfe9d051ec17276e10a6f935b45c7c0d63f1770 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 29 Sep 2022 21:04:56 +0900 Subject: [PATCH 10/16] Delete wrong g_main_context_unref - GMainContext is also destroyed when GMainLoop is destroyed. Therefore, it is the wrong code to do context_unref before loop_unref. Change-Id: Ib57eab7ba4345565977102abb0df3268d7f42acb Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 20bab7f..f1dab0c 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -70,12 +70,14 @@ tpl_gthread_create(const char *thread_name, return NULL; } + // context's ref count was increased in g_main_loop_new g_main_context_unref(context); new_thread = calloc(1, sizeof(tpl_gthread)); if (!new_thread) { TPL_ERR("Failed to allocate tpl_gthread"); - g_main_context_unref(context); + + // context is also destroyed when loop is destroyed. g_main_loop_unref(loop); return NULL; -- 2.7.4 From 2ef4882ca7d468582e58e90736c4762ce7313b0e Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 30 Sep 2022 16:23:50 +0900 Subject: [PATCH 11/16] Changed the order of lock/unlock at buffer_clear. - The lock/unlock order of buffer->mutex and display->wl_event_mutex is important. display->mutex must surround buffer->mutex - Before applying this patch, deadlock issue may be occured in buffer_clear. Change-Id: I90d9b36874def4e4c7f0bd36742b3bbd00faab44 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index f6c0375..4434972 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2116,11 +2116,13 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) need_to_wait = (status < COMMITTED); if (need_to_wait) { - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + /* The lock/unlock order of buffer->mutex and display->wl_event_mutex + * is important. display->mutex must surround buffer->mutex */ + tpl_gmutex_unlock(&wl_egl_buffer->mutex); wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond, - &wl_egl_buffer->mutex, - 200); /* 200ms */ - tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); + &wl_egl_display->wl_event_mutex, + 200); /* 200ms */ + tpl_gmutex_lock(&wl_egl_buffer->mutex); if (wait_result == TPL_ERROR_TIME_OUT) TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", -- 2.7.4 From 8325bf951c6608d237629e5eb9411dee6845f7af Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 30 Sep 2022 16:33:54 +0900 Subject: [PATCH 12/16] Modified to call tpl_gsource_destroy only once. - tpl_gsource_destroy means 'sending destroy message to thread'. So it need not to be called in loop. Change-Id: Ifd5706d738a2385396acaa3723138b2f564de4c2 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 4434972..487cf63 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -970,8 +970,9 @@ __tpl_wl_egl_display_init(tpl_display_t *display) free_display: if (wl_egl_display->tdm.tdm_source) { tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); while (!wl_egl_display->tdm.gsource_finalized) { - tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex); } tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); @@ -979,8 +980,9 @@ free_display: if (wl_egl_display->disp_source) { tpl_gmutex_lock(&wl_egl_display->disp_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); while (!wl_egl_display->gsource_finalized) { - tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); } tpl_gmutex_unlock(&wl_egl_display->disp_mutex); @@ -1024,8 +1026,9 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) * caller should use tpl_gcond_wait() in the loop with checking finalized flag * */ tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); while (!wl_egl_display->tdm.gsource_finalized) { - tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex); } wl_egl_display->tdm.tdm_source = NULL; @@ -1038,8 +1041,9 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) * caller should use tpl_gcond_wait() in the loop with checking finalized flag * */ tpl_gmutex_lock(&wl_egl_display->disp_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); while (wl_egl_display->disp_source && !wl_egl_display->gsource_finalized) { - tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); } wl_egl_display->disp_source = NULL; @@ -2200,8 +2204,9 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) * caller should use tpl_gcond_wait() in the loop with checking finalized flag * */ tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); while (wl_egl_surface->surf_source && !wl_egl_surface->gsource_finalized) { - tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); } wl_egl_surface->surf_source = NULL; -- 2.7.4 From 8f0f90756b5f0cd8ef7a6142bfce0269123e903c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 30 Sep 2022 16:35:33 +0900 Subject: [PATCH 13/16] Package version up to 1.9.8 Change-Id: I7a1bda7114b8cf98de0cd8f2f02dfb6b8d2c25fa Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index cb1653c..c4422ec 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 9 -%define TPL_VERSION_PATCH 7 +%define TPL_VERSION_PATCH 8 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 999b176b6427f1e09450bec792b081c636f4370c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 5 Oct 2022 10:49:43 +0900 Subject: [PATCH 14/16] Remove unncessary header Change-Id: I810ec7c149bbb782d8e04167f66810171226d728 Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 884e7c7..11e35f3 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -24,7 +24,6 @@ #endif #if TIZEN_FEATURE_ENABLE -#include #include #endif -- 2.7.4 From 50c990169dcffad91255612a7c2929df3735be97 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 5 Oct 2022 10:57:23 +0900 Subject: [PATCH 15/16] Add null checking before calling tpl_gsource_destroy. Change-Id: I431f21b80215abeafc5ab6daa45778f617cc661a Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 52 ++++++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 487cf63..fb9286f 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1035,19 +1035,21 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); } - /* This is a protection to prevent problems that arise in unexpected situations - * that g_cond_wait cannot work normally. - * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, - * caller should use tpl_gcond_wait() in the loop with checking finalized flag - * */ - tpl_gmutex_lock(&wl_egl_display->disp_mutex); - // Send destroy mesage to thread - tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); - while (wl_egl_display->disp_source && !wl_egl_display->gsource_finalized) { - tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); + if (wl_egl_display->disp_source) { + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + while (!wl_egl_display->gsource_finalized) { + tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); + } + wl_egl_display->disp_source = NULL; + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); } - wl_egl_display->disp_source = NULL; - tpl_gmutex_unlock(&wl_egl_display->disp_mutex); if (wl_egl_display->thread) { tpl_gthread_destroy(wl_egl_display->thread); @@ -2198,19 +2200,21 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) _tpl_wl_egl_surface_buffer_clear(wl_egl_surface); - /* This is a protection to prevent problems that arise in unexpected situations - * that g_cond_wait cannot work normally. - * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, - * caller should use tpl_gcond_wait() in the loop with checking finalized flag - * */ - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - // Send destroy mesage to thread - tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); - while (wl_egl_surface->surf_source && !wl_egl_surface->gsource_finalized) { - tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + if (wl_egl_surface->surf_source) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + // Send destroy mesage to thread + tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + while (!wl_egl_surface->gsource_finalized) { + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + } + wl_egl_surface->surf_source = NULL; + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } - wl_egl_surface->surf_source = NULL; - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); _print_buffer_lists(wl_egl_surface); -- 2.7.4 From 0038526f2e4153b1b81b64b7b539998e5e62e5e9 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 5 Oct 2022 10:58:46 +0900 Subject: [PATCH 16/16] Package version up to 1.9.9 Change-Id: I6c7059dae6306e235be00ac9ec8bd1f7d3112136 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index c4422ec..fc4fa15 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 9 -%define TPL_VERSION_PATCH 8 +%define TPL_VERSION_PATCH 9 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4