From: Joonbum Ko Date: Mon, 29 Mar 2021 02:07:47 +0000 (+0900) Subject: tpl_wl_vk_thread: Modified wl_vk_display to use tpl_gthread_util X-Git-Tag: submit/tizen/20210504.031804~15 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=368965fc67d639933dc8849dde6dd15fd2bc1056;p=platform%2Fcore%2Fuifw%2Flibtpl-egl.git tpl_wl_vk_thread: Modified wl_vk_display to use tpl_gthread_util Change-Id: I3fb5c37a1a2850a95d1218dc607f6c190e94da1c Signed-off-by: Joonbum Ko --- diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 74e7e0a..3a846b9 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -25,8 +25,9 @@ #include "tpl_utils_gthread.h" #define BUFFER_ARRAY_SIZE 10 +#define VK_CLIENT_QUEUE_SIZE 3 -typedef struct _tpl_wl_vk_surface tpl_wl_vk_display_t; +typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t; typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t; typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t; typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t; @@ -57,7 +58,6 @@ struct _tpl_wl_vk_display { int max_buffer; int present_modes; - struct tizen_surface_shm *tss; /* used for surface buffer_flush */ struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */ }; @@ -86,7 +86,6 @@ struct _tpl_wl_vk_surface { struct wl_surface *wl_surface; struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ - struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */ tdm_client_vblank *vblank; @@ -173,120 +172,624 @@ struct _tpl_wl_vk_buffer { static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( tpl_surface_t *surface); -static TPL_INLINE tpl_bool_t -__tpl_wl_vk_wsi_display_is_wl_display(tpl_handle_t native_dpy) +static tpl_bool_t +_check_native_handle_is_wl_display(tpl_handle_t native_dpy) { - if (!native_dpy) return TPL_FALSE; + struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy; + + if (!wl_vk_native_dpy) { + TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy); + return TPL_FALSE; + } + + /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value + is a memory address pointing the structure of wl_display_interface. */ + if (wl_vk_native_dpy == &wl_display_interface) + return TPL_TRUE; + + if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name, + strlen(wl_display_interface.name)) == 0) { + return TPL_TRUE; + } + + return TPL_FALSE; +} + +static tpl_bool_t +__thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_vk_display_t *wl_vk_display = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; + + TPL_IGNORE(message); + + wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + if (!wl_vk_display) { + TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource); + TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); + return TPL_FALSE; + } + + tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client); + + /* If an error occurs in tdm_client_handle_events, it cannot be recovered. + * When tdm_source is no longer available due to an unexpected situation, + * wl_egl_thread must remove it from the thread and destroy it. + * In that case, tdm_vblank can no longer be used for surfaces and displays + * that used this tdm_source. */ + if (tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)", + tdm_err); + TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); + + tpl_gsource_destroy(gsource, TPL_FALSE); + + wl_vk_display->tdm_source = NULL; + + return TPL_FALSE; + } + + return TPL_TRUE; +} + +static void +__thread_func_tdm_finalize(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = NULL; + + wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + TPL_LOG_T("WL_VK", + "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)", + wl_vk_display, wl_vk_display->tdm_client, gsource); + + if (wl_vk_display->tdm_client) { + tdm_client_destroy(wl_vk_display->tdm_client); + wl_vk_display->tdm_client = NULL; + wl_vk_display->tdm_display_fd = -1; + } + + wl_vk_display->tdm_initialized = TPL_FALSE; +} + +static tpl_gsource_functions tdm_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_tdm_dispatch, + .finalize = __thread_func_tdm_finalize, +}; + +tpl_result_t +_thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display) +{ + tdm_client *tdm_client = NULL; + int tdm_display_fd = -1; + tdm_error tdm_err = TDM_ERROR_NONE; + + tdm_client = tdm_client_create(&tdm_err); + if (!tdm_client || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err); + return TPL_ERROR_INVALID_OPERATION; + } + + tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd); + if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err); + tdm_client_destroy(tdm_client); + return TPL_ERROR_INVALID_OPERATION; + } + + wl_vk_display->tdm_display_fd = tdm_display_fd; + wl_vk_display->tdm_client = tdm_client; + wl_vk_display->tdm_source = NULL; + wl_vk_display->tdm_initialized = TPL_TRUE; + + TPL_INFO("[TDM_CLIENT_INIT]", + "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_vk_display, tdm_client, tdm_display_fd); + + return TPL_ERROR_NONE; +} + +#define IMPL_TIZEN_SURFACE_SHM_VERSION 2 + +static void +__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, + uint32_t name, const char *interface, + uint32_t version) +{ + tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data; + + if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) { + char *env = tpl_getenv("TPL_EFS"); + if (env && !atoi(env)) { + wl_vk_display->use_explicit_sync = TPL_FALSE; + } else { + wl_vk_display->explicit_sync = + wl_registry_bind(wl_registry, name, + &zwp_linux_explicit_synchronization_v1_interface, 1); + wl_vk_display->use_explicit_sync = TPL_TRUE; + TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface"); + } + } +} + +static void +__cb_wl_resistry_global_remove_callback(void *data, + struct wl_registry *wl_registry, + uint32_t name) +{ +} + +static const struct wl_registry_listener registry_listener = { + __cb_wl_resistry_global_callback, + __cb_wl_resistry_global_remove_callback +}; + +static void +_wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display, + const char *func_name) +{ + int dpy_err; + char buf[1024]; + strerror_r(errno, buf, sizeof(buf)); + + if (wl_vk_display->last_error == errno) + return; + + TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf); + + dpy_err = wl_display_get_error(wl_vk_display->wl_display); + if (dpy_err == EPROTO) { + const struct wl_interface *err_interface; + uint32_t err_proxy_id, err_code; + err_code = wl_display_get_protocol_error(wl_vk_display->wl_display, + &err_interface, + &err_proxy_id); + TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d", + err_interface->name, err_code, err_proxy_id); + } + + wl_vk_display->last_error = errno; +} + +tpl_result_t +_thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display) +{ + struct wl_registry *registry = NULL; + struct wl_event_queue *queue = NULL; + struct wl_display *display_wrapper = NULL; + struct wl_proxy *wl_tbm = NULL; + struct wayland_tbm_client *wl_tbm_client = NULL; + int ret; + tpl_result_t result = TPL_ERROR_NONE; + + queue = wl_display_create_queue(wl_vk_display->wl_display); + if (!queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_vk_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display); + if (!wl_vk_display->ev_queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_vk_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display); + if (!display_wrapper) { + TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)", + wl_vk_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue); + + registry = wl_display_get_registry(display_wrapper); + if (!registry) { + TPL_ERR("Failed to create wl_registry"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_proxy_wrapper_destroy(display_wrapper); + display_wrapper = NULL; + + wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display); + if (!wl_tbm_client) { + TPL_ERR("Failed to initialize wl_tbm_client."); + result = TPL_ERROR_INVALID_CONNECTION; + goto fini; + } + + wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client); + if (!wl_tbm) { + TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client); + result = TPL_ERROR_INVALID_CONNECTION; + goto fini; + } + + wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue); + wl_vk_display->wl_tbm_client = wl_tbm_client; + + if (wl_registry_add_listener(registry, ®istry_listener, + wl_vk_display)) { + TPL_ERR("Failed to wl_registry_add_listener"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue); + if (ret == -1) { + _wl_display_print_err(wl_vk_display, "roundtrip_queue"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + if (wl_vk_display->explicit_sync) { + wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync, + wl_vk_display->ev_queue); + TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.", + wl_vk_display->explicit_sync); + } + + wl_vk_display->wl_initialized = TPL_TRUE; + + TPL_INFO("[WAYLAND_INIT]", + "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)", + wl_vk_display, wl_vk_display->wl_display, + wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue); + TPL_INFO("[WAYLAND_INIT]", + "explicit_sync(%p)", + wl_vk_display->explicit_sync); + +fini: + if (display_wrapper) + wl_proxy_wrapper_destroy(display_wrapper); + if (registry) + wl_registry_destroy(registry); + if (queue) + wl_event_queue_destroy(queue); + + return result; +} + +void +_thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display) +{ + /* If wl_vk_display is in prepared state, cancel it */ + if (wl_vk_display->prepared) { + wl_display_cancel_read(wl_vk_display->wl_display); + wl_vk_display->prepared = TPL_FALSE; + } + + if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display, + wl_vk_display->ev_queue) == -1) { + _wl_display_print_err(wl_vk_display, "dispatch_queue_pending"); + } - if (twe_check_native_handle_is_wl_display(native_dpy)) + if (wl_vk_display->explicit_sync) { + TPL_INFO("[EXPLICIT_SYNC_DESTROY]", + "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.", + wl_vk_display, wl_vk_display->explicit_sync); + zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync); + wl_vk_display->explicit_sync = NULL; + } + + if (wl_vk_display->wl_tbm_client) { + struct wl_proxy *wl_tbm = NULL; + + wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm( + wl_vk_display->wl_tbm_client); + if (wl_tbm) { + wl_proxy_set_queue(wl_tbm, NULL); + } + + TPL_INFO("[WL_TBM_DEINIT]", + "wl_vk_display(%p) wl_tbm_client(%p)", + wl_vk_display, wl_vk_display->wl_tbm_client); + wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client); + wl_vk_display->wl_tbm_client = NULL; + } + + wl_event_queue_destroy(wl_vk_display->ev_queue); + + wl_vk_display->wl_initialized = TPL_FALSE; + + TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)", + wl_vk_display, wl_vk_display->wl_display); +} + +static void* +_thread_init(void *data) +{ + tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data; + + if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) { + TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)", + wl_vk_display, wl_vk_display->wl_display); + } + + if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) { + TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED"); + } + + return wl_vk_display; +} + +static tpl_bool_t +__thread_func_disp_prepare(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + /* If this wl_vk_display is already prepared, + * do nothing in this function. */ + if (wl_vk_display->prepared) + return TPL_FALSE; + + /* If there is a last_error, there is no need to poll, + * so skip directly to dispatch. + * prepare -> dispatch */ + if (wl_vk_display->last_error) return TPL_TRUE; + while (wl_display_prepare_read_queue(wl_vk_display->wl_display, + wl_vk_display->ev_queue) != 0) { + if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display, + wl_vk_display->ev_queue) == -1) { + _wl_display_print_err(wl_vk_display, "dispatch_queue_pending"); + } + } + + wl_vk_display->prepared = TPL_TRUE; + + wl_display_flush(wl_vk_display->wl_display); + return TPL_FALSE; } +static tpl_bool_t +__thread_func_disp_check(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + tpl_bool_t ret = TPL_FALSE; + + if (!wl_vk_display->prepared) + return ret; + + /* If prepared, but last_error is set, + * cancel_read is executed and FALSE is returned. + * That can lead to G_SOURCE_REMOVE by calling disp_prepare again + * and skipping disp_check from prepare to disp_dispatch. + * check -> prepare -> dispatch -> G_SOURCE_REMOVE */ + if (wl_vk_display->prepared && wl_vk_display->last_error) { + wl_display_cancel_read(wl_vk_display->wl_display); + return ret; + } + + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_read_events(wl_vk_display->wl_display) == -1) + _wl_display_print_err(wl_vk_display, "read_event"); + ret = TPL_TRUE; + } else { + wl_display_cancel_read(wl_vk_display->wl_display); + ret = TPL_FALSE; + } + + wl_vk_display->prepared = TPL_FALSE; + + return ret; +} + +static tpl_bool_t +__thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + TPL_IGNORE(message); + + /* If there is last_error, SOURCE_REMOVE should be returned + * to remove the gsource from the main loop. + * This is because wl_vk_display is not valid since last_error was set.*/ + if (wl_vk_display->last_error) { + return TPL_FALSE; + } + + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display, + wl_vk_display->ev_queue) == -1) { + _wl_display_print_err(wl_vk_display, "dispatch_queue_pending"); + } + } + + wl_display_flush(wl_vk_display->wl_display); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + + return TPL_TRUE; +} + +static void +__thread_func_disp_finalize(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + if (wl_vk_display->wl_initialized) + _thread_wl_display_fini(wl_vk_display); + + TPL_LOG_T("WL_EGL", "finalize| wl_vk_display(%p) tpl_gsource(%p)", + wl_vk_display, gsource); + + return; +} + + +static tpl_gsource_functions disp_funcs = { + .prepare = __thread_func_disp_prepare, + .check = __thread_func_disp_check, + .dispatch = __thread_func_disp_dispatch, + .finalize = __thread_func_disp_finalize, +}; + static tpl_result_t __tpl_wl_vk_wsi_display_init(tpl_display_t *display) { - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; - TPL_ASSERT(display); + tpl_wl_vk_display_t *wl_vk_display = NULL; + /* Do not allow default display in wayland */ if (!display->native_handle) { TPL_ERR("Invalid native handle for display."); return TPL_ERROR_INVALID_PARAMETER; } - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) calloc(1, - sizeof(tpl_wl_vk_display_t)); - if (!wayland_vk_wsi_display) { + if (!_check_native_handle_is_wl_display(display->native_handle)) { + TPL_ERR("native_handle(%p) is not wl_display", display->native_handle); + return TPL_ERROR_INVALID_PARAMETER; + } + + wl_vk_display = (tpl_wl_vk_display_t *) calloc(1, + sizeof(tpl_wl_vk_display_t)); + if (!wl_vk_display) { TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t."); return TPL_ERROR_OUT_OF_MEMORY; } - display->backend.data = wayland_vk_wsi_display; + display->backend.data = wl_vk_display; + display->bufmgr_fd = -1; - if (twe_check_native_handle_is_wl_display(display->native_handle)) { - wayland_vk_wsi_display->wl_thread = twe_thread_create(); - if (!wayland_vk_wsi_display->wl_thread) { - TPL_ERR("Failed to create twe_thread."); - goto free_display; - } + wl_vk_display->tdm_initialized = TPL_FALSE; + wl_vk_display->wl_initialized = TPL_FALSE; + + wl_vk_display->ev_queue = NULL; + wl_vk_display->wl_display = (struct wl_display *)display->native_handle; + wl_vk_display->last_error = 0; + wl_vk_display->use_explicit_sync = TPL_FALSE; // default disabled + wl_vk_display->prepared = TPL_FALSE; + + /* Wayland Interfaces */ + wl_vk_display->explicit_sync = NULL; + wl_vk_display->wl_tbm_client = NULL; - wayland_vk_wsi_display->twe_display = - twe_display_add(wayland_vk_wsi_display->wl_thread, - display->native_handle, - display->backend.type); - if (!wayland_vk_wsi_display->twe_display) { - TPL_ERR("Failed to add native_display(%p) to thread(%p)", - display->native_handle, - wayland_vk_wsi_display->wl_thread); - goto free_display; + /* Vulkan specific surface capabilities */ + wl_vk_display->min_buffer = 2; + wl_vk_display->max_buffer = VK_CLIENT_QUEUE_SIZE; + wl_vk_display->present_modes = TPL_DISPLAY_PRESENT_MODE_FIFO; + + wl_vk_display->use_wait_vblank = TPL_TRUE; // default enabled + { + char *env = tpl_getenv("TPL_WAIT_VBLANK"); + if (env && !atoi(env)) { + wl_vk_display->use_wait_vblank = TPL_FALSE; } + } - } else { - TPL_ERR("Invalid native handle for display."); + tpl_gmutex_init(&wl_vk_display->wl_event_mutex); + + /* Create gthread */ + wl_vk_display->thread = tpl_gthread_create("wl_egl_thread", + (tpl_gthread_func)_thread_init, + (void *)wl_vk_display); + if (!wl_vk_display->thread) { + TPL_ERR("Failed to create wl_egl_thread"); goto free_display; } - TPL_LOG_T("WL_VK", - "[INIT DISPLAY] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)", - wayland_vk_wsi_display, - wayland_vk_wsi_display->wl_thread, - wayland_vk_wsi_display->twe_display); + wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread, + (void *)wl_vk_display, + wl_display_get_fd(wl_vk_display->wl_display), + &disp_funcs, SOURCE_TYPE_NORMAL); + if (!wl_vk_display->disp_source) { + TPL_ERR("Failed to add native_display(%p) to thread(%p)", + display->native_handle, + wl_vk_display->thread); + goto free_display; + } + + wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread, + (void *)wl_vk_display, + wl_vk_display->tdm_display_fd, + &tdm_funcs, SOURCE_TYPE_NORMAL); + if (!wl_vk_display->tdm_source) { + TPL_ERR("Failed to create tdm_gsource\n"); + goto free_display; + } + + TPL_INFO("[DISPLAY_INIT]", + "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_vk_display, + wl_vk_display->thread, + wl_vk_display->wl_display); + + TPL_INFO("[DISPLAY_INIT]", + "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)", + wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE", + wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE"); return TPL_ERROR_NONE; free_display: - if (wayland_vk_wsi_display) { - if (wayland_vk_wsi_display->twe_display) - twe_display_del(wayland_vk_wsi_display->twe_display); - if (wayland_vk_wsi_display->wl_thread) - twe_thread_destroy(wayland_vk_wsi_display->wl_thread); - - wayland_vk_wsi_display->wl_thread = NULL; - wayland_vk_wsi_display->twe_display = NULL; + if (wl_vk_display->thread) { + if (wl_vk_display->tdm_source) + tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); + if (wl_vk_display->disp_source) + tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); - free(wayland_vk_wsi_display); - display->backend.data = NULL; + tpl_gthread_destroy(wl_vk_display->thread); } + wl_vk_display->thread = NULL; + free(wl_vk_display); + + display->backend.data = NULL; return TPL_ERROR_INVALID_OPERATION; } static void __tpl_wl_vk_wsi_display_fini(tpl_display_t *display) { - tpl_wl_vk_display_t *wayland_vk_wsi_display; + tpl_wl_vk_display_t *wl_vk_display; TPL_ASSERT(display); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; - if (wayland_vk_wsi_display) { - - TPL_LOG_T("WL_VK", - "[FINI] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)", - wayland_vk_wsi_display, - wayland_vk_wsi_display->wl_thread, - wayland_vk_wsi_display->twe_display); - - if (wayland_vk_wsi_display->twe_display) { - tpl_result_t ret = TPL_ERROR_NONE; - ret = twe_display_del(wayland_vk_wsi_display->twe_display); - if (ret != TPL_ERROR_NONE) - TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)", - wayland_vk_wsi_display->twe_display, - wayland_vk_wsi_display->wl_thread); - wayland_vk_wsi_display->twe_display = NULL; + wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data; + if (wl_vk_display) { + TPL_INFO("[DISPLAY_FINI]", + "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_vk_display, + wl_vk_display->thread, + wl_vk_display->wl_display); + + if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) { + tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); + wl_vk_display->tdm_source = NULL; + } + + if (wl_vk_display->disp_source) { + tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); + wl_vk_display->disp_source = NULL; } - if (wayland_vk_wsi_display->wl_thread) { - twe_thread_destroy(wayland_vk_wsi_display->wl_thread); - wayland_vk_wsi_display->wl_thread = NULL; + if (wl_vk_display->thread) { + tpl_gthread_destroy(wl_vk_display->thread); + wl_vk_display->thread = NULL; } - free(wayland_vk_wsi_display); + tpl_gmutex_clear(&wl_vk_display->wl_event_mutex); + + free(wl_vk_display); } + display->backend.data = NULL; } @@ -335,23 +838,16 @@ __tpl_wl_vk_wsi_display_query_window_supported_buffer_count( tpl_display_t *display, tpl_handle_t window, int *min, int *max) { - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; + wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); - if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; - - res = twe_display_get_buffer_count(wayland_vk_wsi_display->twe_display, - min, max); - if (res != TPL_ERROR_NONE) { - TPL_ERR("Failed to query buffer count. twe_display(%p)", - wayland_vk_wsi_display->twe_display); - return res; - } + if (min) *min = wl_vk_display->min_buffer; + if (max) *max = wl_vk_display->max_buffer; return TPL_ERROR_NONE; } @@ -359,26 +855,18 @@ __tpl_wl_vk_wsi_display_query_window_supported_buffer_count( static tpl_result_t __tpl_wl_vk_wsi_display_query_window_supported_present_modes( tpl_display_t *display, - tpl_handle_t window, int *modes) + tpl_handle_t window, int *present_modes) { - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; - - if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; + wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); - if (modes) { - res = twe_display_get_present_mode(wayland_vk_wsi_display->twe_display, - modes); - if (res != TPL_ERROR_NONE) { - TPL_ERR("Failed to query present modes. twe_display(%p)", - wayland_vk_wsi_display->twe_display); - return res; - } + if (present_modes) { + *present_modes = wl_vk_display->present_modes; } return TPL_ERROR_NONE; @@ -388,7 +876,7 @@ static tpl_result_t __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; twe_surface_h twe_surface = NULL; TPL_ASSERT(surface); @@ -402,11 +890,11 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) return TPL_ERROR_OUT_OF_MEMORY; } - wayland_vk_wsi_display = + wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; - if (!wayland_vk_wsi_display) { - TPL_ERR("Invalid parameter. wayland_vk_wsi_display(%p)", - wayland_vk_wsi_display); + if (!wl_vk_display) { + TPL_ERR("Invalid parameter. wl_vk_display(%p)", + wl_vk_display); free(wayland_vk_wsi_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -414,13 +902,13 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) surface->backend.data = (void *)wayland_vk_wsi_surface; wayland_vk_wsi_surface->tbm_queue = NULL; - twe_surface = twe_surface_add(wayland_vk_wsi_display->wl_thread, - wayland_vk_wsi_display->twe_display, + twe_surface = twe_surface_add(wl_vk_display->thread, + wl_vk_display->twe_display, surface->native_handle, surface->format, surface->num_buffers); if (!twe_surface) { TPL_ERR("Failed to add native_surface(%p) to thread(%p)", - surface->native_handle, wayland_vk_wsi_display->wl_thread); + surface->native_handle, wl_vk_display->thread); free(wayland_vk_wsi_surface); surface->backend.data = NULL; return TPL_ERROR_OUT_OF_MEMORY; @@ -441,7 +929,7 @@ static void __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->display); @@ -449,9 +937,9 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; if (wayland_vk_wsi_surface == NULL) return; - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - if (wayland_vk_wsi_display == NULL) return; + if (wl_vk_display == NULL) return; if (wayland_vk_wsi_surface->tbm_queue) __tpl_wl_vk_wsi_surface_destroy_swapchain(surface); @@ -470,7 +958,7 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) != TPL_ERROR_NONE) { TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", wayland_vk_wsi_surface->twe_surface, - wayland_vk_wsi_display->wl_thread); + wl_vk_display->thread); } wayland_vk_wsi_surface->twe_surface = NULL; @@ -596,7 +1084,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface = NULL; tpl_wl_vk_surface_t *wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - tpl_wl_vk_display_t *wayland_vk_wsi_display = + tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; tbm_surface_queue_error_e tsq_err = 0; tpl_result_t lock_res = TPL_ERROR_NONE; @@ -607,7 +1095,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_OBJECT_UNLOCK(surface); TRACE_BEGIN("WAIT_DEQUEUEABLE"); - lock_res = twe_display_lock(wayland_vk_wsi_display->twe_display); + lock_res = twe_display_lock(wl_vk_display->twe_display); res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface, timeout_ns); TRACE_END(); @@ -617,13 +1105,13 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")", timeout_ns); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } else if (res != TPL_ERROR_NONE) { TPL_ERR("Invalid operation. twe_surface(%p) timeout_ns(%" PRIu64 ")", wayland_vk_wsi_surface->twe_surface, timeout_ns); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } @@ -631,7 +1119,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.", wayland_vk_wsi_surface->tbm_queue); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } @@ -642,7 +1130,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ERR("Failed to get tbm_surface from tbm_surface_queue(%p) | tsq_err = %d", wayland_vk_wsi_surface->tbm_queue, tsq_err); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } @@ -657,7 +1145,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return tbm_surface; } @@ -668,7 +1156,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, int *buffer_count) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; int i; tpl_result_t ret = TPL_ERROR_NONE; @@ -680,15 +1168,15 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, TPL_ASSERT(buffer_count); wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)surface->display->backend.data; + wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; - if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { + if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, NULL, buffer_count); if (ret != TPL_ERROR_NONE) { TPL_ERR("Failed to get buffer_count. twe_surface(%p)", wayland_vk_wsi_surface->twe_surface); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return ret; } @@ -697,7 +1185,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, sizeof(tbm_surface_h)); if (!wayland_vk_wsi_surface->swapchain_buffers) { TPL_ERR("Failed to allocate memory for buffers."); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_OUT_OF_MEMORY; } @@ -709,7 +1197,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, wayland_vk_wsi_surface, wayland_vk_wsi_surface->twe_surface); free(wayland_vk_wsi_surface->swapchain_buffers); wayland_vk_wsi_surface->swapchain_buffers = NULL; - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return ret; } @@ -725,7 +1213,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, *buffers = wayland_vk_wsi_surface->swapchain_buffers; - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); } return TPL_ERROR_NONE; @@ -775,7 +1263,7 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, int height, int buffer_count, int present_mode) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(surface); @@ -785,9 +1273,9 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; TPL_ASSERT(wayland_vk_wsi_surface); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - TPL_ASSERT(wayland_vk_wsi_display); + TPL_ASSERT(wl_vk_display); if (wayland_vk_wsi_surface->tbm_queue) { int old_width = tbm_surface_queue_get_width(wayland_vk_wsi_surface->tbm_queue); @@ -862,7 +1350,7 @@ static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; unsigned int ref; @@ -872,15 +1360,15 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) TPL_ASSERT(surface->display->backend.data); wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { + if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference); if (ref > 0) { TPL_LOG_T("WL_VK", "This swapchain is still valid. | twe_surface(%p)", wayland_vk_wsi_surface->twe_surface); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } @@ -888,7 +1376,7 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) if (wayland_vk_wsi_surface->reset) { TPL_LOG_T("WL_VK", "Since reset is in the TRUE state, it will not be destroyed."); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } @@ -909,13 +1397,13 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) if (res != TPL_ERROR_NONE) { TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", wayland_vk_wsi_surface->twe_surface); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return res; } wayland_vk_wsi_surface->tbm_queue = NULL; - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); } return TPL_ERROR_NONE; @@ -926,7 +1414,7 @@ __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) { if (!native_dpy) return TPL_FALSE; - if (twe_check_native_handle_is_wl_display(native_dpy)) + if (_check_native_handle_is_wl_display(native_dpy)) return TPL_TRUE; return TPL_FALSE;