From: Joonbum Ko Date: Tue, 27 Sep 2022 11:41:14 +0000 (+0900) Subject: Modified the codes related to call tpl_gcond_wait. X-Git-Tag: accepted/tizen/7.0/unified/20221110.062136~8 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e6f7aa1cb1942797a572c2e705c5027a907aed38;p=platform%2Fcore%2Fuifw%2Flibtpl-egl.git Modified the codes related to call tpl_gcond_wait. - g_cond_wait is sometimes awakened by unknown or stolen signal. In such cases, unexpected problems may arise. To prevent these problems, each tpl_gsource has tpl_gmutex and tpl_gcond, and modified to try tpl_gcond_wait() until gsource_finalized flag becomes true. - It may be modified with better way. Change-Id: I1360c0a3888186ba0309fe4d94c5be8e29c6f1b8 Signed-off-by: Joonbum Ko --- diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index d4efca0..9fd1e8a 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -60,13 +60,21 @@ struct _tpl_wl_egl_display { tpl_bool_t use_explicit_sync; tpl_bool_t use_tss; tpl_bool_t prepared; - + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex disp_mutex; + tpl_gcond disp_cond; struct { tdm_client *tdm_client; tpl_gsource *tdm_source; int tdm_display_fd; tpl_bool_t tdm_initialized; tpl_list_t *surface_vblanks; + + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex tdm_mutex; + tpl_gcond tdm_cond; } tdm; #if TIZEN_FEATURE_ENABLE @@ -143,6 +151,10 @@ struct _tpl_wl_egl_surface { tpl_bool_t prerotation_capability; tpl_bool_t vblank_done; tpl_bool_t set_serial_is_used; + tpl_bool_t initialized_in_thread; + + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; }; struct _surface_vblank { @@ -363,9 +375,12 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - TPL_LOG_T("WL_EGL", - "tdm_destroy| wl_egl_display(%p) tdm_client(%p) tpl_gsource(%p)", - wl_egl_display, wl_egl_display->tdm.tdm_client, gsource); + tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + + TPL_INFO("[TDM_CLIENT_FINI]", + "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_egl_display, wl_egl_display->tdm.tdm_client, + wl_egl_display->tdm.tdm_display_fd); if (wl_egl_display->tdm.tdm_client) { @@ -383,6 +398,10 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_egl_display->use_wait_vblank = TPL_FALSE; wl_egl_display->tdm.tdm_initialized = TPL_FALSE; + wl_egl_display->tdm.gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_egl_display->tdm.tdm_cond); + tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); } static tpl_gsource_functions tdm_funcs = { @@ -809,11 +828,17 @@ __thread_func_disp_finalize(tpl_gsource *gsource) tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + TPL_DEBUG("[FINALIZE] wl_egl_display(%p) tpl_gsource(%p)", + wl_egl_display, gsource); + if (wl_egl_display->wl_initialized) _thread_wl_display_fini(wl_egl_display); - TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)", - wl_egl_display, gsource); + wl_egl_display->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_egl_display->disp_cond); + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); return; } @@ -867,6 +892,7 @@ __tpl_wl_egl_display_init(tpl_display_t *display) wl_egl_display->use_tss = TPL_FALSE; wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled wl_egl_display->prepared = TPL_FALSE; + wl_egl_display->gsource_finalized = TPL_FALSE; #if TIZEN_FEATURE_ENABLE /* Wayland Interfaces */ @@ -886,6 +912,9 @@ __tpl_wl_egl_display_init(tpl_display_t *display) tpl_gmutex_init(&wl_egl_display->wl_event_mutex); + tpl_gmutex_init(&wl_egl_display->disp_mutex); + tpl_gcond_init(&wl_egl_display->disp_cond); + /* Create gthread */ wl_egl_display->thread = tpl_gthread_create("wl_egl_thread", (tpl_gthread_func)_thread_init, @@ -908,10 +937,13 @@ __tpl_wl_egl_display_init(tpl_display_t *display) if (wl_egl_display->use_wait_vblank && wl_egl_display->tdm.tdm_initialized) { + tpl_gmutex_init(&wl_egl_display->tdm.tdm_mutex); + tpl_gcond_init(&wl_egl_display->tdm.tdm_cond); wl_egl_display->tdm.tdm_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_display, wl_egl_display->tdm.tdm_display_fd, &tdm_funcs, SOURCE_TYPE_NORMAL); + wl_egl_display->tdm.gsource_finalized = TPL_FALSE; if (!wl_egl_display->tdm.tdm_source) { TPL_ERR("Failed to create tdm_gsource\n"); goto free_display; @@ -936,15 +968,33 @@ __tpl_wl_egl_display_init(tpl_display_t *display) return TPL_ERROR_NONE; free_display: - if (wl_egl_display->thread) { - if (wl_egl_display->tdm.tdm_source) + if (wl_egl_display->tdm.tdm_source) { + tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + while (!wl_egl_display->tdm.gsource_finalized) { tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); - if (wl_egl_display->disp_source) + tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex); + } + tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); + } + + if (wl_egl_display->disp_source) { + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + while (!wl_egl_display->gsource_finalized) { tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); + tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); + } + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); + } + if (wl_egl_display->thread) { tpl_gthread_destroy(wl_egl_display->thread); } + tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_egl_display->disp_cond); + tpl_gmutex_clear(&wl_egl_display->disp_mutex); + wl_egl_display->thread = NULL; free(wl_egl_display); @@ -968,20 +1018,43 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) wl_egl_display->wl_display); if (wl_egl_display->tdm.tdm_source && wl_egl_display->tdm.tdm_initialized) { - tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex); + while (!wl_egl_display->tdm.gsource_finalized) { + tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE); + tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex); + } wl_egl_display->tdm.tdm_source = NULL; + tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex); } - if (wl_egl_display->disp_source) { + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_egl_display->disp_mutex); + while (wl_egl_display->disp_source && !wl_egl_display->gsource_finalized) { tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); - wl_egl_display->disp_source = NULL; + tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex); } + wl_egl_display->disp_source = NULL; + tpl_gmutex_unlock(&wl_egl_display->disp_mutex); if (wl_egl_display->thread) { tpl_gthread_destroy(wl_egl_display->thread); wl_egl_display->thread = NULL; } + tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_egl_display->disp_cond); + tpl_gmutex_clear(&wl_egl_display->disp_mutex); + tpl_gmutex_clear(&wl_egl_display->wl_event_mutex); free(wl_egl_display); @@ -1495,8 +1568,6 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) { tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - TPL_INFO("[SURFACE_FINI]", "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", wl_egl_surface, wl_egl_surface->wl_egl_window, @@ -1572,8 +1643,6 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) __cb_surface_vblank_free); wl_egl_surface->vblank = NULL; } - - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } static tpl_bool_t @@ -1588,6 +1657,7 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) TPL_DEBUG("wl_egl_surface(%p) initialize message received!", wl_egl_surface); _thread_wl_egl_surface_init(wl_egl_surface); + wl_egl_surface->initialized_in_thread = TPL_TRUE; tpl_gcond_signal(&wl_egl_surface->surf_cond); } else if (message == ACQUIRABLE) { /* Acquirable */ TPL_DEBUG("wl_egl_surface(%p) acquirable message received!", @@ -1610,10 +1680,16 @@ __thread_func_surf_finalize(tpl_gsource *gsource) wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource); TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); - _thread_wl_egl_surface_fini(wl_egl_surface); - + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)", wl_egl_surface, gsource); + + _thread_wl_egl_surface_fini(wl_egl_surface); + + wl_egl_surface->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_egl_surface->surf_cond); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } static tpl_gsource_functions surf_funcs = { @@ -1685,6 +1761,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->vblank_done = TPL_TRUE; wl_egl_surface->use_render_done_fence = TPL_FALSE; wl_egl_surface->set_serial_is_used = TPL_FALSE; + wl_egl_surface->gsource_finalized = TPL_FALSE; + wl_egl_surface->initialized_in_thread = TPL_FALSE; wl_egl_surface->latest_transform = -1; wl_egl_surface->render_done_cnt = 0; @@ -1754,7 +1832,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->sent_message = INIT_SURFACE; tpl_gsource_send_message(wl_egl_surface->surf_source, wl_egl_surface->sent_message); - tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + while (!wl_egl_surface->initialized_in_thread) + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); TPL_ASSERT(wl_egl_surface->tbm_queue); @@ -2113,9 +2192,18 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) _tpl_wl_egl_surface_buffer_clear(wl_egl_surface); - if (wl_egl_surface->surf_source) + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + while (wl_egl_surface->surf_source && !wl_egl_surface->gsource_finalized) { tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + } wl_egl_surface->surf_source = NULL; + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); _print_buffer_lists(wl_egl_surface); diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 4d8fb2c..7e0e621 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -52,16 +52,27 @@ struct _tpl_wl_vk_display { int last_error; /* errno of the last wl_display error*/ tpl_bool_t wl_initialized; - tpl_bool_t tdm_initialized; - tdm_client *tdm_client; - tpl_gsource *tdm_source; - int tdm_display_fd; + struct { + tdm_client *tdm_client; + tpl_gsource *tdm_source; + int tdm_display_fd; + tpl_bool_t tdm_initialized; + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex tdm_mutex; + tpl_gcond tdm_cond; + } tdm; tpl_bool_t use_wait_vblank; tpl_bool_t use_explicit_sync; tpl_bool_t prepared; + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; + tpl_gmutex disp_mutex; + tpl_gcond disp_cond; + /* device surface capabilities */ int min_buffer; int max_buffer; @@ -75,6 +86,9 @@ struct _tpl_wl_vk_swapchain { tpl_wl_vk_surface_t *wl_vk_surface; tbm_surface_queue_h tbm_queue; + tpl_result_t result; + + tpl_bool_t create_done; struct { int width; @@ -128,6 +142,10 @@ struct _tpl_wl_vk_surface { tpl_bool_t is_activated; tpl_bool_t reset; /* TRUE if queue reseted by external */ tpl_bool_t vblank_done; + tpl_bool_t initialized_in_thread; + + /* To make sure that tpl_gsource has been successfully finalized. */ + tpl_bool_t gsource_finalized; surf_message sent_message; @@ -250,7 +268,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) return TPL_FALSE; } - tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client); + tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client); /* If an error occurs in tdm_client_handle_events, it cannot be recovered. * When tdm_source is no longer available due to an unexpected situation, @@ -264,7 +282,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gsource_destroy(gsource, TPL_FALSE); - wl_vk_display->tdm_source = NULL; + wl_vk_display->tdm.tdm_source = NULL; return TPL_FALSE; } @@ -279,17 +297,24 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); - TPL_LOG_T("WL_VK", - "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)", - wl_vk_display, wl_vk_display->tdm_client, gsource); + tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex); + + TPL_INFO("[TDM_CLIENT_FINI]", + "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_vk_display, wl_vk_display->tdm.tdm_client, + wl_vk_display->tdm.tdm_display_fd); - if (wl_vk_display->tdm_client) { - tdm_client_destroy(wl_vk_display->tdm_client); - wl_vk_display->tdm_client = NULL; - wl_vk_display->tdm_display_fd = -1; + if (wl_vk_display->tdm.tdm_client) { + tdm_client_destroy(wl_vk_display->tdm.tdm_client); + wl_vk_display->tdm.tdm_client = NULL; + wl_vk_display->tdm.tdm_display_fd = -1; } - wl_vk_display->tdm_initialized = TPL_FALSE; + wl_vk_display->tdm.tdm_initialized = TPL_FALSE; + wl_vk_display->tdm.gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond); + tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex); } static tpl_gsource_functions tdm_funcs = { @@ -319,10 +344,10 @@ _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display) return TPL_ERROR_INVALID_OPERATION; } - wl_vk_display->tdm_display_fd = tdm_display_fd; - wl_vk_display->tdm_client = tdm_client; - wl_vk_display->tdm_source = NULL; - wl_vk_display->tdm_initialized = TPL_TRUE; + wl_vk_display->tdm.tdm_display_fd = tdm_display_fd; + wl_vk_display->tdm.tdm_client = tdm_client; + wl_vk_display->tdm.tdm_source = NULL; + wl_vk_display->tdm.tdm_initialized = TPL_TRUE; TPL_INFO("[TDM_CLIENT_INIT]", "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)", @@ -668,11 +693,17 @@ __thread_func_disp_finalize(tpl_gsource *gsource) tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + tpl_gmutex_lock(&wl_vk_display->disp_mutex); + TPL_DEBUG("[FINALIZE] wl_vk_display(%p) tpl_gsource(%p)", + wl_vk_display, gsource); + if (wl_vk_display->wl_initialized) _thread_wl_display_fini(wl_vk_display); - TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)", - wl_vk_display, gsource); + wl_vk_display->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_vk_display->disp_cond); + tpl_gmutex_unlock(&wl_vk_display->disp_mutex); return; } @@ -713,7 +744,7 @@ __tpl_wl_vk_display_init(tpl_display_t *display) display->backend.data = wl_vk_display; display->bufmgr_fd = -1; - wl_vk_display->tdm_initialized = TPL_FALSE; + wl_vk_display->tdm.tdm_initialized = TPL_FALSE; wl_vk_display->wl_initialized = TPL_FALSE; wl_vk_display->ev_queue = NULL; @@ -743,6 +774,9 @@ __tpl_wl_vk_display_init(tpl_display_t *display) tpl_gmutex_init(&wl_vk_display->wl_event_mutex); + tpl_gmutex_init(&wl_vk_display->disp_mutex); + tpl_gcond_init(&wl_vk_display->disp_cond); + /* Create gthread */ wl_vk_display->thread = tpl_gthread_create("wl_vk_thread", (tpl_gthread_func)_thread_init, @@ -763,11 +797,14 @@ __tpl_wl_vk_display_init(tpl_display_t *display) goto free_display; } - wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread, + tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex); + tpl_gcond_init(&wl_vk_display->tdm.tdm_cond); + + wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_display, - wl_vk_display->tdm_display_fd, + wl_vk_display->tdm.tdm_display_fd, &tdm_funcs, SOURCE_TYPE_NORMAL); - if (!wl_vk_display->tdm_source) { + if (!wl_vk_display->tdm.tdm_source) { TPL_ERR("Failed to create tdm_gsource\n"); goto free_display; } @@ -786,15 +823,33 @@ __tpl_wl_vk_display_init(tpl_display_t *display) return TPL_ERROR_NONE; free_display: - if (wl_vk_display->thread) { - if (wl_vk_display->tdm_source) - tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); - if (wl_vk_display->disp_source) + if (wl_vk_display->tdm.tdm_source) { + tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex); + while (!wl_vk_display->tdm.gsource_finalized) { + tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE); + tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex); + } + tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex); + } + + if (wl_vk_display->disp_source) { + tpl_gmutex_lock(&wl_vk_display->disp_mutex); + while (!wl_vk_display->gsource_finalized) { tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); + tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex); + } + tpl_gmutex_unlock(&wl_vk_display->disp_mutex); + } + if (wl_vk_display->thread) { tpl_gthread_destroy(wl_vk_display->thread); } + tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_vk_display->disp_cond); + tpl_gmutex_clear(&wl_vk_display->disp_mutex); + wl_vk_display->thread = NULL; free(wl_vk_display); @@ -817,21 +872,44 @@ __tpl_wl_vk_display_fini(tpl_display_t *display) wl_vk_display->thread, wl_vk_display->wl_display); - if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) { - tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); - wl_vk_display->tdm_source = NULL; + if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) { + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex); + while (!wl_vk_display->tdm.gsource_finalized) { + tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE); + tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex); + } + wl_vk_display->tdm.tdm_source = NULL; + tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex); } - if (wl_vk_display->disp_source) { + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_vk_display->disp_mutex); + while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) { tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); - wl_vk_display->disp_source = NULL; + tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex); } + wl_vk_display->disp_source = NULL; + tpl_gmutex_unlock(&wl_vk_display->disp_mutex); if (wl_vk_display->thread) { tpl_gthread_destroy(wl_vk_display->thread); wl_vk_display->thread = NULL; } + tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond); + tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex); + tpl_gcond_clear(&wl_vk_display->disp_cond); + tpl_gmutex_clear(&wl_vk_display->disp_mutex); + tpl_gmutex_clear(&wl_vk_display->wl_event_mutex); free(wl_vk_display); @@ -1062,11 +1140,11 @@ _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface) /* tbm_surface_queue will be created at swapchain_create */ wl_vk_surface->vblank = _thread_create_tdm_client_vblank( - wl_vk_display->tdm_client); + wl_vk_display->tdm.tdm_client); if (wl_vk_surface->vblank) { TPL_INFO("[VBLANK_INIT]", "wl_vk_surface(%p) tdm_client(%p) vblank(%p)", - wl_vk_surface, wl_vk_display->tdm_client, + wl_vk_surface, wl_vk_display->tdm.tdm_client, wl_vk_surface->vblank); } @@ -1092,8 +1170,6 @@ _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface) static void _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface) { - tpl_gmutex_lock(&wl_vk_surface->surf_mutex); - TPL_INFO("[SURFACE_FINI]", "wl_vk_surface(%p) wl_surface(%p)", wl_vk_surface, wl_vk_surface->wl_surface); @@ -1120,8 +1196,6 @@ _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface) tdm_client_vblank_destroy(wl_vk_surface->vblank); wl_vk_surface->vblank = NULL; } - - tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } static tpl_bool_t @@ -1177,10 +1251,16 @@ __thread_func_surf_finalize(tpl_gsource *gsource) wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource); TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); - _thread_wl_vk_surface_fini(wl_vk_surface); - + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)", wl_vk_surface, gsource); + + _thread_wl_vk_surface_fini(wl_vk_surface); + + wl_vk_surface->gsource_finalized = TPL_TRUE; + + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } static tpl_gsource_functions surf_funcs = { @@ -1237,6 +1317,7 @@ __tpl_wl_vk_surface_init(tpl_surface_t *surface) wl_vk_surface->reset = TPL_FALSE; wl_vk_surface->is_activated = TPL_FALSE; wl_vk_surface->vblank_done = TPL_TRUE; + wl_vk_surface->initialized_in_thread = TPL_FALSE; wl_vk_surface->render_done_cnt = 0; @@ -1266,7 +1347,8 @@ __tpl_wl_vk_surface_init(tpl_surface_t *surface) wl_vk_surface->sent_message = INIT_SURFACE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + while (!wl_vk_surface->initialized_in_thread) + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); TPL_INFO("[SURFACE_INIT]", @@ -1303,9 +1385,18 @@ __tpl_wl_vk_surface_fini(tpl_surface_t *surface) wl_vk_surface->swapchain = NULL; - if (wl_vk_surface->surf_source) + /* This is a protection to prevent problems that arise in unexpected situations + * that g_cond_wait cannot work normally. + * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE, + * caller should use tpl_gcond_wait() in the loop with checking finalized flag + * */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) { tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE); - wl_vk_surface->surf_source = NULL; + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + } + wl_vk_surface->surf_source = NULL; + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); _print_buffer_lists(wl_vk_surface); @@ -1450,6 +1541,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("buffer count(%d) must be higher than (%d)", swapchain->properties.buffer_count, wl_vk_display->min_buffer); + swapchain->result = TPL_ERROR_INVALID_PARAMETER; return TPL_ERROR_INVALID_PARAMETER; } @@ -1457,12 +1549,14 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("buffer count(%d) must be lower than (%d)", swapchain->properties.buffer_count, wl_vk_display->max_buffer); + swapchain->result = TPL_ERROR_INVALID_PARAMETER; return TPL_ERROR_INVALID_PARAMETER; } if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) { TPL_ERR("Unsupported present_mode(%d)", swapchain->properties.present_mode); + swapchain->result = TPL_ERROR_INVALID_PARAMETER; return TPL_ERROR_INVALID_PARAMETER; } @@ -1504,6 +1598,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) wl_vk_surface->reset = TPL_FALSE; __tpl_util_atomic_inc(&swapchain->ref_cnt); + swapchain->create_done = TPL_TRUE; TPL_INFO("[SWAPCHAIN_REUSE]", "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)", @@ -1538,6 +1633,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) if (!tbm_queue) { TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)", wl_vk_surface); + swapchain->result = TPL_ERROR_OUT_OF_MEMORY; return TPL_ERROR_OUT_OF_MEMORY; } @@ -1547,6 +1643,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)", tbm_queue); tbm_surface_queue_destroy(tbm_queue); + swapchain->result = TPL_ERROR_INVALID_OPERATION; return TPL_ERROR_INVALID_OPERATION; } @@ -1557,6 +1654,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", tbm_queue); tbm_surface_queue_destroy(tbm_queue); + swapchain->result = TPL_ERROR_INVALID_OPERATION; return TPL_ERROR_INVALID_OPERATION; } @@ -1567,10 +1665,12 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", tbm_queue); tbm_surface_queue_destroy(tbm_queue); + swapchain->result = TPL_ERROR_INVALID_OPERATION; return TPL_ERROR_INVALID_OPERATION; } swapchain->tbm_queue = tbm_queue; + swapchain->create_done = TPL_TRUE; TPL_INFO("[TBM_QUEUE_CREATED]", "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)", @@ -1614,6 +1714,9 @@ __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface, swapchain->wl_vk_surface = wl_vk_surface; swapchain->properties.format = format; + swapchain->result = TPL_ERROR_NONE; + swapchain->create_done = TPL_FALSE; + wl_vk_surface->swapchain = swapchain; __tpl_util_atomic_set(&swapchain->ref_cnt, 1); @@ -1623,17 +1726,16 @@ __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface, wl_vk_surface->sent_message = CREATE_QUEUE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE) + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); TPL_CHECK_ON_FALSE_ASSERT_FAIL( swapchain->tbm_queue != NULL, "[CRITICAL FAIL] Failed to create tbm_surface_queue"); - wl_vk_surface->reset = TPL_FALSE; - return TPL_ERROR_NONE; } @@ -1678,6 +1780,12 @@ __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface) return TPL_ERROR_INVALID_OPERATION; } + if (!swapchain->tbm_queue) { + TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.", + wl_vk_surface, wl_vk_surface->swapchain); + return TPL_ERROR_INVALID_OPERATION; + } + if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) { TPL_INFO("[DESTROY_SWAPCHAIN]", "wl_vk_surface(%p) swapchain(%p) still valid.", @@ -1709,7 +1817,8 @@ __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface) wl_vk_surface->sent_message = DESTROY_QUEUE; tpl_gsource_send_message(wl_vk_surface->surf_source, wl_vk_surface->sent_message); - tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + while (!swapchain->tbm_queue) + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); _print_buffer_lists(wl_vk_surface); @@ -2439,7 +2548,7 @@ _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface) if (wl_vk_surface->vblank == NULL) { wl_vk_surface->vblank = - _thread_create_tdm_client_vblank(wl_vk_display->tdm_client); + _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client); if (!wl_vk_surface->vblank) { TPL_WARN("Failed to create vblank. wl_vk_surface(%p)", wl_vk_surface);