tpl_bool_t use_explicit_sync;
tpl_bool_t use_tss;
tpl_bool_t prepared;
-
+ /* To make sure that tpl_gsource has been successfully finalized. */
+ tpl_bool_t gsource_finalized;
+ tpl_gmutex disp_mutex;
+ tpl_gcond disp_cond;
struct {
tdm_client *tdm_client;
tpl_gsource *tdm_source;
int tdm_display_fd;
tpl_bool_t tdm_initialized;
tpl_list_t *surface_vblanks;
+
+ /* To make sure that tpl_gsource has been successfully finalized. */
+ tpl_bool_t gsource_finalized;
+ tpl_gmutex tdm_mutex;
+ tpl_gcond tdm_cond;
} tdm;
#if TIZEN_FEATURE_ENABLE
tpl_bool_t prerotation_capability;
tpl_bool_t vblank_done;
tpl_bool_t set_serial_is_used;
+ tpl_bool_t initialized_in_thread;
+
+ /* To make sure that tpl_gsource has been successfully finalized. */
+ tpl_bool_t gsource_finalized;
};
struct _surface_vblank {
wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
- TPL_LOG_T("WL_EGL",
- "tdm_destroy| wl_egl_display(%p) tdm_client(%p) tpl_gsource(%p)",
- wl_egl_display, wl_egl_display->tdm.tdm_client, gsource);
+ tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
+
+ TPL_INFO("[TDM_CLIENT_FINI]",
+ "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
+ wl_egl_display, wl_egl_display->tdm.tdm_client,
+ wl_egl_display->tdm.tdm_display_fd);
if (wl_egl_display->tdm.tdm_client) {
wl_egl_display->use_wait_vblank = TPL_FALSE;
wl_egl_display->tdm.tdm_initialized = TPL_FALSE;
+ wl_egl_display->tdm.gsource_finalized = TPL_TRUE;
+
+ tpl_gcond_signal(&wl_egl_display->tdm.tdm_cond);
+ tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
}
static tpl_gsource_functions tdm_funcs = {
tpl_wl_egl_display_t *wl_egl_display =
(tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+ tpl_gmutex_lock(&wl_egl_display->disp_mutex);
+ TPL_DEBUG("[FINALIZE] wl_egl_display(%p) tpl_gsource(%p)",
+ wl_egl_display, gsource);
+
if (wl_egl_display->wl_initialized)
_thread_wl_display_fini(wl_egl_display);
- TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)",
- wl_egl_display, gsource);
+ wl_egl_display->gsource_finalized = TPL_TRUE;
+
+ tpl_gcond_signal(&wl_egl_display->disp_cond);
+ tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
return;
}
wl_egl_display->use_tss = TPL_FALSE;
wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled
wl_egl_display->prepared = TPL_FALSE;
+ wl_egl_display->gsource_finalized = TPL_FALSE;
#if TIZEN_FEATURE_ENABLE
/* Wayland Interfaces */
tpl_gmutex_init(&wl_egl_display->wl_event_mutex);
+ tpl_gmutex_init(&wl_egl_display->disp_mutex);
+ tpl_gcond_init(&wl_egl_display->disp_cond);
+
/* Create gthread */
wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
(tpl_gthread_func)_thread_init,
if (wl_egl_display->use_wait_vblank &&
wl_egl_display->tdm.tdm_initialized) {
+ tpl_gmutex_init(&wl_egl_display->tdm.tdm_mutex);
+ tpl_gcond_init(&wl_egl_display->tdm.tdm_cond);
wl_egl_display->tdm.tdm_source = tpl_gsource_create(wl_egl_display->thread,
(void *)wl_egl_display,
wl_egl_display->tdm.tdm_display_fd,
&tdm_funcs, SOURCE_TYPE_NORMAL);
+ wl_egl_display->tdm.gsource_finalized = TPL_FALSE;
if (!wl_egl_display->tdm.tdm_source) {
TPL_ERR("Failed to create tdm_gsource\n");
goto free_display;
return TPL_ERROR_NONE;
free_display:
- if (wl_egl_display->thread) {
- if (wl_egl_display->tdm.tdm_source)
+ if (wl_egl_display->tdm.tdm_source) {
+ tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
+ while (!wl_egl_display->tdm.gsource_finalized) {
tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
- if (wl_egl_display->disp_source)
+ tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex);
+ }
+ tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
+ }
+
+ if (wl_egl_display->disp_source) {
+ tpl_gmutex_lock(&wl_egl_display->disp_mutex);
+ while (!wl_egl_display->gsource_finalized) {
tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
+ tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex);
+ }
+ tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
+ }
+ if (wl_egl_display->thread) {
tpl_gthread_destroy(wl_egl_display->thread);
}
+ tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond);
+ tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex);
+ tpl_gcond_clear(&wl_egl_display->disp_cond);
+ tpl_gmutex_clear(&wl_egl_display->disp_mutex);
+
wl_egl_display->thread = NULL;
free(wl_egl_display);
wl_egl_display->wl_display);
if (wl_egl_display->tdm.tdm_source && wl_egl_display->tdm.tdm_initialized) {
- tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
+ /* This is a protection to prevent problems that arise in unexpected situations
+ * that g_cond_wait cannot work normally.
+ * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+ * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+ * */
+ tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
+ while (!wl_egl_display->tdm.gsource_finalized) {
+ tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
+ tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex);
+ }
wl_egl_display->tdm.tdm_source = NULL;
+ tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
}
- if (wl_egl_display->disp_source) {
+ /* This is a protection to prevent problems that arise in unexpected situations
+ * that g_cond_wait cannot work normally.
+ * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+ * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+ * */
+ tpl_gmutex_lock(&wl_egl_display->disp_mutex);
+ while (wl_egl_display->disp_source && !wl_egl_display->gsource_finalized) {
tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
- wl_egl_display->disp_source = NULL;
+ tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex);
}
+ wl_egl_display->disp_source = NULL;
+ tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
if (wl_egl_display->thread) {
tpl_gthread_destroy(wl_egl_display->thread);
wl_egl_display->thread = NULL;
}
+ tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond);
+ tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex);
+ tpl_gcond_clear(&wl_egl_display->disp_cond);
+ tpl_gmutex_clear(&wl_egl_display->disp_mutex);
+
tpl_gmutex_clear(&wl_egl_display->wl_event_mutex);
free(wl_egl_display);
{
tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-
TPL_INFO("[SURFACE_FINI]",
"wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
wl_egl_surface, wl_egl_surface->wl_egl_window,
__cb_surface_vblank_free);
wl_egl_surface->vblank = NULL;
}
-
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
static tpl_bool_t
TPL_DEBUG("wl_egl_surface(%p) initialize message received!",
wl_egl_surface);
_thread_wl_egl_surface_init(wl_egl_surface);
+ wl_egl_surface->initialized_in_thread = TPL_TRUE;
tpl_gcond_signal(&wl_egl_surface->surf_cond);
} else if (message == ACQUIRABLE) { /* Acquirable */
TPL_DEBUG("wl_egl_surface(%p) acquirable message received!",
wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
- _thread_wl_egl_surface_fini(wl_egl_surface);
-
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)",
wl_egl_surface, gsource);
+
+ _thread_wl_egl_surface_fini(wl_egl_surface);
+
+ wl_egl_surface->gsource_finalized = TPL_TRUE;
+
+ tpl_gcond_signal(&wl_egl_surface->surf_cond);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
static tpl_gsource_functions surf_funcs = {
wl_egl_surface->vblank_done = TPL_TRUE;
wl_egl_surface->use_render_done_fence = TPL_FALSE;
wl_egl_surface->set_serial_is_used = TPL_FALSE;
+ wl_egl_surface->gsource_finalized = TPL_FALSE;
+ wl_egl_surface->initialized_in_thread = TPL_FALSE;
wl_egl_surface->latest_transform = -1;
wl_egl_surface->render_done_cnt = 0;
wl_egl_surface->sent_message = INIT_SURFACE;
tpl_gsource_send_message(wl_egl_surface->surf_source,
wl_egl_surface->sent_message);
- tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
+ while (!wl_egl_surface->initialized_in_thread)
+ tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
TPL_ASSERT(wl_egl_surface->tbm_queue);
_tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
- if (wl_egl_surface->surf_source)
+ /* This is a protection to prevent problems that arise in unexpected situations
+ * that g_cond_wait cannot work normally.
+ * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+ * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+ * */
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ while (wl_egl_surface->surf_source && !wl_egl_surface->gsource_finalized) {
tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
+ tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
+ }
wl_egl_surface->surf_source = NULL;
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
_print_buffer_lists(wl_egl_surface);
int last_error; /* errno of the last wl_display error*/
tpl_bool_t wl_initialized;
- tpl_bool_t tdm_initialized;
- tdm_client *tdm_client;
- tpl_gsource *tdm_source;
- int tdm_display_fd;
+ struct {
+ tdm_client *tdm_client;
+ tpl_gsource *tdm_source;
+ int tdm_display_fd;
+ tpl_bool_t tdm_initialized;
+ /* To make sure that tpl_gsource has been successfully finalized. */
+ tpl_bool_t gsource_finalized;
+ tpl_gmutex tdm_mutex;
+ tpl_gcond tdm_cond;
+ } tdm;
tpl_bool_t use_wait_vblank;
tpl_bool_t use_explicit_sync;
tpl_bool_t prepared;
+ /* To make sure that tpl_gsource has been successfully finalized. */
+ tpl_bool_t gsource_finalized;
+ tpl_gmutex disp_mutex;
+ tpl_gcond disp_cond;
+
/* device surface capabilities */
int min_buffer;
int max_buffer;
tpl_wl_vk_surface_t *wl_vk_surface;
tbm_surface_queue_h tbm_queue;
+ tpl_result_t result;
+
+ tpl_bool_t create_done;
struct {
int width;
tpl_bool_t is_activated;
tpl_bool_t reset; /* TRUE if queue reseted by external */
tpl_bool_t vblank_done;
+ tpl_bool_t initialized_in_thread;
+
+ /* To make sure that tpl_gsource has been successfully finalized. */
+ tpl_bool_t gsource_finalized;
surf_message sent_message;
return TPL_FALSE;
}
- tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client);
+ tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client);
/* If an error occurs in tdm_client_handle_events, it cannot be recovered.
* When tdm_source is no longer available due to an unexpected situation,
tpl_gsource_destroy(gsource, TPL_FALSE);
- wl_vk_display->tdm_source = NULL;
+ wl_vk_display->tdm.tdm_source = NULL;
return TPL_FALSE;
}
wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
- TPL_LOG_T("WL_VK",
- "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)",
- wl_vk_display, wl_vk_display->tdm_client, gsource);
+ tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
+
+ TPL_INFO("[TDM_CLIENT_FINI]",
+ "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
+ wl_vk_display, wl_vk_display->tdm.tdm_client,
+ wl_vk_display->tdm.tdm_display_fd);
- if (wl_vk_display->tdm_client) {
- tdm_client_destroy(wl_vk_display->tdm_client);
- wl_vk_display->tdm_client = NULL;
- wl_vk_display->tdm_display_fd = -1;
+ if (wl_vk_display->tdm.tdm_client) {
+ tdm_client_destroy(wl_vk_display->tdm.tdm_client);
+ wl_vk_display->tdm.tdm_client = NULL;
+ wl_vk_display->tdm.tdm_display_fd = -1;
}
- wl_vk_display->tdm_initialized = TPL_FALSE;
+ wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
+ wl_vk_display->tdm.gsource_finalized = TPL_TRUE;
+
+ tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond);
+ tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
}
static tpl_gsource_functions tdm_funcs = {
return TPL_ERROR_INVALID_OPERATION;
}
- wl_vk_display->tdm_display_fd = tdm_display_fd;
- wl_vk_display->tdm_client = tdm_client;
- wl_vk_display->tdm_source = NULL;
- wl_vk_display->tdm_initialized = TPL_TRUE;
+ wl_vk_display->tdm.tdm_display_fd = tdm_display_fd;
+ wl_vk_display->tdm.tdm_client = tdm_client;
+ wl_vk_display->tdm.tdm_source = NULL;
+ wl_vk_display->tdm.tdm_initialized = TPL_TRUE;
TPL_INFO("[TDM_CLIENT_INIT]",
"wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
tpl_wl_vk_display_t *wl_vk_display =
(tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
+ tpl_gmutex_lock(&wl_vk_display->disp_mutex);
+ TPL_DEBUG("[FINALIZE] wl_vk_display(%p) tpl_gsource(%p)",
+ wl_vk_display, gsource);
+
if (wl_vk_display->wl_initialized)
_thread_wl_display_fini(wl_vk_display);
- TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)",
- wl_vk_display, gsource);
+ wl_vk_display->gsource_finalized = TPL_TRUE;
+
+ tpl_gcond_signal(&wl_vk_display->disp_cond);
+ tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
return;
}
display->backend.data = wl_vk_display;
display->bufmgr_fd = -1;
- wl_vk_display->tdm_initialized = TPL_FALSE;
+ wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
wl_vk_display->wl_initialized = TPL_FALSE;
wl_vk_display->ev_queue = NULL;
tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
+ tpl_gmutex_init(&wl_vk_display->disp_mutex);
+ tpl_gcond_init(&wl_vk_display->disp_cond);
+
/* Create gthread */
wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
(tpl_gthread_func)_thread_init,
goto free_display;
}
- wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread,
+ tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex);
+ tpl_gcond_init(&wl_vk_display->tdm.tdm_cond);
+
+ wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread,
(void *)wl_vk_display,
- wl_vk_display->tdm_display_fd,
+ wl_vk_display->tdm.tdm_display_fd,
&tdm_funcs, SOURCE_TYPE_NORMAL);
- if (!wl_vk_display->tdm_source) {
+ if (!wl_vk_display->tdm.tdm_source) {
TPL_ERR("Failed to create tdm_gsource\n");
goto free_display;
}
return TPL_ERROR_NONE;
free_display:
- if (wl_vk_display->thread) {
- if (wl_vk_display->tdm_source)
- tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
- if (wl_vk_display->disp_source)
+ if (wl_vk_display->tdm.tdm_source) {
+ tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
+ while (!wl_vk_display->tdm.gsource_finalized) {
+ tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
+ tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
+ }
+ tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
+ }
+
+ if (wl_vk_display->disp_source) {
+ tpl_gmutex_lock(&wl_vk_display->disp_mutex);
+ while (!wl_vk_display->gsource_finalized) {
tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
+ tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
+ }
+ tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
+ }
+ if (wl_vk_display->thread) {
tpl_gthread_destroy(wl_vk_display->thread);
}
+ tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
+ tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
+ tpl_gcond_clear(&wl_vk_display->disp_cond);
+ tpl_gmutex_clear(&wl_vk_display->disp_mutex);
+
wl_vk_display->thread = NULL;
free(wl_vk_display);
wl_vk_display->thread,
wl_vk_display->wl_display);
- if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) {
- tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
- wl_vk_display->tdm_source = NULL;
+ if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) {
+ /* This is a protection to prevent problems that arise in unexpected situations
+ * that g_cond_wait cannot work normally.
+ * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+ * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+ * */
+ tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
+ while (!wl_vk_display->tdm.gsource_finalized) {
+ tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
+ tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
+ }
+ wl_vk_display->tdm.tdm_source = NULL;
+ tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
}
- if (wl_vk_display->disp_source) {
+ /* This is a protection to prevent problems that arise in unexpected situations
+ * that g_cond_wait cannot work normally.
+ * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+ * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+ * */
+ tpl_gmutex_lock(&wl_vk_display->disp_mutex);
+ while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) {
tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
- wl_vk_display->disp_source = NULL;
+ tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
}
+ wl_vk_display->disp_source = NULL;
+ tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
if (wl_vk_display->thread) {
tpl_gthread_destroy(wl_vk_display->thread);
wl_vk_display->thread = NULL;
}
+ tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
+ tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
+ tpl_gcond_clear(&wl_vk_display->disp_cond);
+ tpl_gmutex_clear(&wl_vk_display->disp_mutex);
+
tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
free(wl_vk_display);
/* tbm_surface_queue will be created at swapchain_create */
wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
- wl_vk_display->tdm_client);
+ wl_vk_display->tdm.tdm_client);
if (wl_vk_surface->vblank) {
TPL_INFO("[VBLANK_INIT]",
"wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
- wl_vk_surface, wl_vk_display->tdm_client,
+ wl_vk_surface, wl_vk_display->tdm.tdm_client,
wl_vk_surface->vblank);
}
static void
_thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
{
- tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
-
TPL_INFO("[SURFACE_FINI]",
"wl_vk_surface(%p) wl_surface(%p)",
wl_vk_surface, wl_vk_surface->wl_surface);
tdm_client_vblank_destroy(wl_vk_surface->vblank);
wl_vk_surface->vblank = NULL;
}
-
- tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
}
static tpl_bool_t
wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
- _thread_wl_vk_surface_fini(wl_vk_surface);
-
+ tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)",
wl_vk_surface, gsource);
+
+ _thread_wl_vk_surface_fini(wl_vk_surface);
+
+ wl_vk_surface->gsource_finalized = TPL_TRUE;
+
+ tpl_gcond_signal(&wl_vk_surface->surf_cond);
+ tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
}
static tpl_gsource_functions surf_funcs = {
wl_vk_surface->reset = TPL_FALSE;
wl_vk_surface->is_activated = TPL_FALSE;
wl_vk_surface->vblank_done = TPL_TRUE;
+ wl_vk_surface->initialized_in_thread = TPL_FALSE;
wl_vk_surface->render_done_cnt = 0;
wl_vk_surface->sent_message = INIT_SURFACE;
tpl_gsource_send_message(wl_vk_surface->surf_source,
wl_vk_surface->sent_message);
- tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
+ while (!wl_vk_surface->initialized_in_thread)
+ tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
TPL_INFO("[SURFACE_INIT]",
wl_vk_surface->swapchain = NULL;
- if (wl_vk_surface->surf_source)
+ /* This is a protection to prevent problems that arise in unexpected situations
+ * that g_cond_wait cannot work normally.
+ * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+ * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+ * */
+ tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+ while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) {
tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
- wl_vk_surface->surf_source = NULL;
+ tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
+ }
+ wl_vk_surface->surf_source = NULL;
+ tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
_print_buffer_lists(wl_vk_surface);
TPL_ERR("buffer count(%d) must be higher than (%d)",
swapchain->properties.buffer_count,
wl_vk_display->min_buffer);
+ swapchain->result = TPL_ERROR_INVALID_PARAMETER;
return TPL_ERROR_INVALID_PARAMETER;
}
TPL_ERR("buffer count(%d) must be lower than (%d)",
swapchain->properties.buffer_count,
wl_vk_display->max_buffer);
+ swapchain->result = TPL_ERROR_INVALID_PARAMETER;
return TPL_ERROR_INVALID_PARAMETER;
}
if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
TPL_ERR("Unsupported present_mode(%d)",
swapchain->properties.present_mode);
+ swapchain->result = TPL_ERROR_INVALID_PARAMETER;
return TPL_ERROR_INVALID_PARAMETER;
}
wl_vk_surface->reset = TPL_FALSE;
__tpl_util_atomic_inc(&swapchain->ref_cnt);
+ swapchain->create_done = TPL_TRUE;
TPL_INFO("[SWAPCHAIN_REUSE]",
"wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
if (!tbm_queue) {
TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
wl_vk_surface);
+ swapchain->result = TPL_ERROR_OUT_OF_MEMORY;
return TPL_ERROR_OUT_OF_MEMORY;
}
TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
tbm_queue);
tbm_surface_queue_destroy(tbm_queue);
+ swapchain->result = TPL_ERROR_INVALID_OPERATION;
return TPL_ERROR_INVALID_OPERATION;
}
TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
tbm_queue);
tbm_surface_queue_destroy(tbm_queue);
+ swapchain->result = TPL_ERROR_INVALID_OPERATION;
return TPL_ERROR_INVALID_OPERATION;
}
TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
tbm_queue);
tbm_surface_queue_destroy(tbm_queue);
+ swapchain->result = TPL_ERROR_INVALID_OPERATION;
return TPL_ERROR_INVALID_OPERATION;
}
swapchain->tbm_queue = tbm_queue;
+ swapchain->create_done = TPL_TRUE;
TPL_INFO("[TBM_QUEUE_CREATED]",
"wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)",
swapchain->wl_vk_surface = wl_vk_surface;
swapchain->properties.format = format;
+ swapchain->result = TPL_ERROR_NONE;
+ swapchain->create_done = TPL_FALSE;
+
wl_vk_surface->swapchain = swapchain;
__tpl_util_atomic_set(&swapchain->ref_cnt, 1);
wl_vk_surface->sent_message = CREATE_QUEUE;
tpl_gsource_send_message(wl_vk_surface->surf_source,
wl_vk_surface->sent_message);
- tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
+ while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE)
+ tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
TPL_CHECK_ON_FALSE_ASSERT_FAIL(
swapchain->tbm_queue != NULL,
"[CRITICAL FAIL] Failed to create tbm_surface_queue");
-
wl_vk_surface->reset = TPL_FALSE;
-
return TPL_ERROR_NONE;
}
return TPL_ERROR_INVALID_OPERATION;
}
+ if (!swapchain->tbm_queue) {
+ TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.",
+ wl_vk_surface, wl_vk_surface->swapchain);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
TPL_INFO("[DESTROY_SWAPCHAIN]",
"wl_vk_surface(%p) swapchain(%p) still valid.",
wl_vk_surface->sent_message = DESTROY_QUEUE;
tpl_gsource_send_message(wl_vk_surface->surf_source,
wl_vk_surface->sent_message);
- tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
+ while (!swapchain->tbm_queue)
+ tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
_print_buffer_lists(wl_vk_surface);
if (wl_vk_surface->vblank == NULL) {
wl_vk_surface->vblank =
- _thread_create_tdm_client_vblank(wl_vk_display->tdm_client);
+ _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client);
if (!wl_vk_surface->vblank) {
TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
wl_vk_surface);