From 44623de646cd885ff3bed81426d374549d556cbc Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 24 Mar 2021 17:06:35 +0900 Subject: [PATCH 01/16] Add IO exception checking to reduce warning logs. - When a problem occurs, there are cases where the result of g_source_query_unix_fd is (cond == 0). - Since too many warning logs may be output, exception handling was added to simply ignore it in this case. Change-Id: I694c5916c82d0969a86db8207ad8a64224e2fb25 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 65a1db2..b024088 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -189,7 +189,9 @@ _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) g_mutex_unlock(&thread->thread_mutex); } } - } else { + } + + if (cond && !(cond & G_IO_IN)) { /* When some io errors occur, it is not considered as a critical error. * There may be problems with the screen, but it does not affect the operation. */ TPL_WARN("Invalid GIOCondition occured. tpl_gsource(%p) fd(%d) cond(%d)", -- 2.7.4 From 98e734005b6b0548fc00773f89237d9a27dd4e92 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 24 Mar 2021 19:48:31 +0900 Subject: [PATCH 02/16] Add intended_destroy flag to destroy only when intended. - G_IO_IN may occur in eventfd of finalizer source due to some unexpected errors. In this case, if gsource destroyed in unintended, a fatal problem may occur in thread. - The intended_destroy flag of the newly added to tpl_gsource will help the finalizer source to operate normally only if it is intended G_IO_IN. Change-Id: I6dd6a2de7e3c4ff667f8d639e30783584e6e8cec Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 55 ++++++++++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index b024088..47a7041 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -23,10 +23,13 @@ struct _tpl_gsource { tpl_gsource_type_t type; tpl_gsource *finalizer; + tpl_bool_t intended_destroy; void *data; }; +static void +__gsource_remove_and_destroy(tpl_gsource *source); static gpointer _tpl_gthread_init(gpointer data) @@ -175,15 +178,14 @@ _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) if (gsource->gsource_funcs && gsource->gsource_funcs->dispatch) ret = gsource->gsource_funcs->dispatch(gsource, message); - if (gsource->type == SOURCE_TYPE_FINALIZER) { + if (gsource->type == SOURCE_TYPE_FINALIZER && + gsource->intended_destroy == TPL_TRUE) { tpl_gsource *del_source = (tpl_gsource *)gsource->data; if (!g_source_is_destroyed(&del_source->gsource)) { tpl_gthread *thread = del_source->thread; g_mutex_lock(&thread->thread_mutex); - g_source_remove_unix_fd(&del_source->gsource, del_source->tag); - g_source_destroy(&del_source->gsource); - g_source_unref(&del_source->gsource); + __gsource_remove_and_destroy(del_source); g_cond_signal(&thread->thread_cond); g_mutex_unlock(&thread->thread_mutex); @@ -200,9 +202,7 @@ _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) if (gsource->type == SOURCE_TYPE_DISPOSABLE || gsource->type == SOURCE_TYPE_FINALIZER) { - g_source_remove_unix_fd(&gsource->gsource, gsource->tag); - g_source_destroy(&gsource->gsource); - g_source_unref(&gsource->gsource); + __gsource_remove_and_destroy(gsource); ret = G_SOURCE_REMOVE; } @@ -265,6 +265,7 @@ tpl_gsource_create(tpl_gthread *thread, void *data, int fd, new_gsource->gsource_funcs = funcs; new_gsource->data = data; new_gsource->type = type; + new_gsource->intended_destroy = TPL_FALSE; if (new_gsource->type == SOURCE_TYPE_NORMAL) { tpl_gsource *finalizer = tpl_gsource_create(thread, new_gsource, -1, @@ -285,6 +286,17 @@ tpl_gsource_create(tpl_gthread *thread, void *data, int fd, return new_gsource; } +static void +__gsource_remove_and_destroy(tpl_gsource *source) +{ + TPL_DEBUG("[GSOURCE_DESTROY] tpl_gsource(%p) type(%d)", + source, source->type); + + g_source_remove_unix_fd(&source->gsource, source->tag); + g_source_destroy(&source->gsource); + g_source_unref(&source->gsource); +} + void tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread) { @@ -294,32 +306,27 @@ tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread) return; } - TPL_DEBUG("[GSOURCE_DESTROY] tpl_gsource(%p) type(%d)", - source, source->type); + if (source->type == SOURCE_TYPE_NORMAL && + source->finalizer) { + tpl_gsource *finalizer = source->finalizer; - if (destroy_in_thread) { - tpl_gthread *thread = source->thread; - if (source->type == SOURCE_TYPE_NORMAL) { + if (destroy_in_thread) { + tpl_gthread *thread = source->thread; g_mutex_lock(&thread->thread_mutex); - tpl_gsource_send_message(source->finalizer, 1); + finalizer->intended_destroy = TPL_TRUE; + tpl_gsource_send_message(finalizer, 1); g_cond_wait(&thread->thread_cond, &thread->thread_mutex); g_mutex_unlock(&thread->thread_mutex); - } - } else { - if (source->type == SOURCE_TYPE_NORMAL && - source->finalizer) { - tpl_gsource *finalize = source->finalizer; - g_source_remove_unix_fd(&finalize->gsource, finalize->tag); - g_source_destroy(&finalize->gsource); - g_source_unref(&finalize->gsource); + } else { + __gsource_remove_and_destroy(finalizer); source->finalizer = NULL; } + } - g_source_remove_unix_fd(&source->gsource, source->tag); - g_source_destroy(&source->gsource); - g_source_unref(&source->gsource); + if (!destroy_in_thread) { + __gsource_remove_and_destroy(source); } } -- 2.7.4 From ab602cc8bceed075aebacd1a344bb03fa68b219c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 25 Mar 2021 10:36:18 +0900 Subject: [PATCH 03/16] Modified the log output from gsource_finalize. Change-Id: I491f012afb0aba638cb40b3f4f47840dfe36dac4 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 4b29924..6147716 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -298,8 +298,9 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - TPL_LOG_T("WL_EGL", "tdm_destroy| wl_egl_display(%p) tdm_client(%p)", - wl_egl_display, wl_egl_display->tdm_client); + TPL_LOG_T("WL_EGL", + "tdm_destroy| wl_egl_display(%p) tdm_client(%p) tpl_gsource(%p)", + wl_egl_display, wl_egl_display->tdm_client, gsource); if (wl_egl_display->tdm_client) { tdm_client_destroy(wl_egl_display->tdm_client); @@ -1508,8 +1509,8 @@ __thread_func_surf_finalize(tpl_gsource *gsource) _thread_wl_egl_surface_fini(wl_egl_surface); - TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%p)", - gsource, wl_egl_surface); + TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)", + wl_egl_surface, gsource); } static tpl_gsource_functions surf_funcs = { -- 2.7.4 From 6f4c654f11ec32009cdef1adac07c4b1b7cf43cb Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 25 Mar 2021 12:46:10 +0900 Subject: [PATCH 04/16] Close release_fence_fd when wl_egl_buffer freed. - The fenced_release event can be dispatched just before the wl_egl_buffer is freed. At this time, release_fence_fd delivered to fenced_release event may leak if it is not closed because wl_egl_buffer cannot be used and is freed. Change-Id: I7584c518b955c83f632b2d5ea281272f8dd2b166 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 6147716..41bc081 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2353,6 +2353,8 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, *release_fence = wl_egl_buffer->release_fence_fd; TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)", wl_egl_surface, wl_egl_buffer, *release_fence); + + wl_egl_buffer->release_fence_fd = -1; } else { *release_fence = -1; } @@ -3286,6 +3288,11 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) wl_egl_buffer->buffer_release = NULL; } + if (wl_egl_buffer->release_fence_fd != -1) { + close(wl_egl_buffer->release_fence_fd); + wl_egl_buffer->release_fence_fd = -1; + } + if (wl_egl_buffer->waiting_source) { tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); wl_egl_buffer->waiting_source = NULL; -- 2.7.4 From acba9102f810718c37af4f7fc0d3e91c385b4e1c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 25 Mar 2021 12:51:06 +0900 Subject: [PATCH 05/16] Fixed wrong use of NULL checking macro. Change-Id: I9d1d11823a15395dac38360aff25d9c991351f53 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 41bc081..d79e3af 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2028,7 +2028,7 @@ __tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface, wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); TPL_INFO("[SET_PREROTATION_CAPABILITY]", "wl_egl_surface(%p) prerotation capability set to [%s]", @@ -2048,7 +2048,7 @@ __tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface, wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); TPL_INFO("[SET_POST_INTERVAL]", "wl_egl_surface(%p) post_interval(%d -> %d)", -- 2.7.4 From 328520a250d372dea039ffa6c5c8a2eef6b44a95 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 25 Mar 2021 13:13:51 +0900 Subject: [PATCH 06/16] Make ready_to_commit to TRUE when acquire_fence_fd is not used. - Missig this line caused a serious problem where wl_surface_commit would not work if acquire_fence_fd was not used. Change-Id: I22b610ad187d90b2d51cb73fa7e4913095997009 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index d79e3af..041f163 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2667,6 +2667,8 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) ready_to_commit = TPL_FALSE; } + } else { + ready_to_commit = TPL_TRUE; } if (ready_to_commit) { -- 2.7.4 From 5c34f864f73625738ae6bad939cf59fac4883ca7 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 25 Mar 2021 13:15:53 +0900 Subject: [PATCH 07/16] Package version up to 1.8.3 Change-Id: Ibf44a2061e89f5b0c9841ebe3a2b9c81b424bd07 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index e3f399c..41e011c 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 8 -%define TPL_VERSION_PATCH 2 +%define TPL_VERSION_PATCH 3 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 0fe8b5ccdd2633328312eeae6ce2c022001961e9 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 1 Apr 2021 16:24:03 +0900 Subject: [PATCH 08/16] Add missing line to return NULL when alloc failed. Change-Id: Ifa88e7a36fd19c144d50c9cec7604541d6aabf24 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 47a7041..41c5101 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -77,6 +77,8 @@ tpl_gthread_create(const char *thread_name, TPL_ERR("Failed to allocate tpl_gthread"); g_main_context_unref(context); g_main_loop_unref(loop); + + return NULL; } g_mutex_init(&new_thread->thread_mutex); @@ -86,7 +88,6 @@ tpl_gthread_create(const char *thread_name, g_mutex_lock(&new_thread->thread_mutex); new_thread->loop = loop; - TPL_DEBUG("loop(%p)", loop); new_thread->init_func = init_func; new_thread->func_data = func_data; new_thread->thread = g_thread_new(thread_name, -- 2.7.4 From 8b70cf229a31f2d9efd5babd714d3e36d8605552 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 1 Apr 2021 16:49:25 +0900 Subject: [PATCH 09/16] Fix potential overflow issue of buffers array. Change-Id: I4d32443eb936f5ed0f0706f9e8fe3d3ac5187624 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 041f163..1f2a63e 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2215,6 +2215,28 @@ _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, for (i = 0; i < BUFFER_ARRAY_SIZE; i++) if (wl_egl_surface->buffers[i] == NULL) break; + /* If this exception is reached, + * it may be a critical memory leak problem. */ + if (i == BUFFER_ARRAY_SIZE) { + tpl_wl_egl_buffer_t *evicted_buffer = NULL; + int evicted_idx = 0; /* evict the frontmost buffer */ + + evicted_buffer = wl_egl_surface->buffers[evicted_idx]; + + TPL_WARN("wl_egl_surface(%p) buffers array is full. evict one.", + wl_egl_surface); + TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)", + evicted_buffer, evicted_buffer->tbm_surface, + status_to_string[evicted_buffer->status]); + + /* [TODO] need to think about whether there will be + * better modifications */ + wl_egl_surface->buffer_cnt--; + wl_egl_surface->buffers[evicted_idx] = NULL; + + i = evicted_idx; + } + wl_egl_surface->buffer_cnt++; wl_egl_surface->buffers[i] = wl_egl_buffer; wl_egl_buffer->idx = i; -- 2.7.4 From 51d6a077c2c6e91e11e53090b2e3a1af2a8a7c82 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 1 Apr 2021 16:52:12 +0900 Subject: [PATCH 10/16] Package version up to 1.8.4 Change-Id: Icdb52e525fe619af52049250dbbc731e0334653d Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index 41e011c..8a81e7f 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 8 -%define TPL_VERSION_PATCH 3 +%define TPL_VERSION_PATCH 4 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From f528e5c50e88d91703185a9040238f934eacf156 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 17 Mar 2021 18:47:54 +0900 Subject: [PATCH 11/16] Modified structures to be used in the vulkan backend. Change-Id: Ia5c4c0843b150f988416c38cefed569b13909272 Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 262 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 201 insertions(+), 61 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index cb4f549..74e7e0a 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -3,31 +3,171 @@ #include "tpl_internal.h" +#include +#include +#include +#include + +#include #include #include #include -#include +#include +#include +#include + +#include + +#include +#include + +#include "tpl_utils_gthread.h" + +#define BUFFER_ARRAY_SIZE 10 + +typedef struct _tpl_wl_vk_surface tpl_wl_vk_display_t; +typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t; +typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t; +typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t; + +struct _tpl_wl_vk_display { + tpl_gsource *disp_source; + tpl_gthread *thread; + tpl_gmutex wl_event_mutex; + + struct wl_display *wl_display; + struct wl_event_queue *ev_queue; + struct wayland_tbm_client *wl_tbm_client; + int last_error; /* errno of the last wl_display error*/ + + tpl_bool_t wl_initialized; + tpl_bool_t tdm_initialized; + + tdm_client *tdm_client; + tpl_gsource *tdm_source; + int tdm_display_fd; + + tpl_bool_t use_wait_vblank; + tpl_bool_t use_explicit_sync; + tpl_bool_t prepared; + + /* device surface capabilities */ + int min_buffer; + int max_buffer; + int present_modes; + + struct tizen_surface_shm *tss; /* used for surface buffer_flush */ + struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */ +}; + +struct _tpl_wl_vk_swapchain { + tpl_wl_vk_surface_t *wl_vk_surface; + + struct { + int width; + int height; + tbm_format format; + int buffer_count; + int present_mode; + } properties; + + tbm_surface_h *swapchain_buffers; + + tpl_util_atomic_uint ref_cnt; +}; + +struct _tpl_wl_vk_surface { + tpl_gsource *surf_source; + + tpl_wl_vk_swapchain_t *swapchain; + + tbm_surface_queue_h tbm_queue; + + struct wl_surface *wl_surface; + struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ + struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */ + + tdm_client_vblank *vblank; -#include "tpl_wayland_egl_thread.h" + /* surface information */ + int render_done_cnt; -typedef struct _tpl_wayland_vk_wsi_display tpl_wayland_vk_wsi_display_t; -typedef struct _tpl_wayland_vk_wsi_surface tpl_wayland_vk_wsi_surface_t; -typedef struct _tpl_wayland_vk_wsi_buffer tpl_wayland_vk_wsi_buffer_t; + tpl_wl_vk_display_t *wl_vk_display; + tpl_surface_t *tpl_surface; + + /* wl_vk_buffer array for buffer tracing */ + tpl_wl_vk_buffer_t *buffers[BUFFER_ARRAY_SIZE]; + int buffer_cnt; /* the number of using wl_vk_buffers */ + tpl_gmutex buffers_mutex; + + tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */ + + tpl_gmutex surf_mutex; + tpl_gcond surf_cond; + + /* for waiting draw done */ + tpl_bool_t is_activated; + tpl_bool_t reset; /* TRUE if queue reseted by external */ + tpl_bool_t vblank_done; +}; -struct _tpl_wayland_vk_wsi_display { - twe_thread *wl_thread; - twe_display_h twe_display; +typedef enum buffer_status { + RELEASED = 0, // 0 + DEQUEUED, // 1 + ENQUEUED, // 2 + ACQUIRED, // 3 + WAITING_SIGNALED, // 4 + WAITING_VBLANK, // 5 + COMMITTED, // 6 +} buffer_status_t; + +static const char *status_to_string[7] = { + "RELEASED", // 0 + "DEQUEUED", // 1 + "ENQUEUED", // 2 + "ACQUIRED", // 3 + "WAITING_SIGNALED", // 4 + "WAITING_VBLANK", // 5 + "COMMITTED", // 6 }; -struct _tpl_wayland_vk_wsi_surface { - twe_surface_h twe_surface; - tbm_surface_queue_h tbm_queue; - tbm_surface_h *swapchain_buffers; - int buffer_count; - tpl_bool_t is_activated; - tpl_bool_t reset; - tpl_util_atomic_uint swapchain_reference; +struct _tpl_wl_vk_buffer { + tbm_surface_h tbm_surface; + int bo_name; + + struct wl_proxy *wl_buffer; + int dx, dy; /* position to attach to wl_surface */ + int width, height; /* size to attach to wl_surface */ + + buffer_status_t status; /* for tracing buffer status */ + int idx; /* position index in buffers array of wl_vk_surface */ + + /* for damage region */ + int num_rects; + int *rects; + + /* for checking need_to_commit (frontbuffer mode) */ + tpl_bool_t need_to_commit; + + /* to get release event via zwp_linux_buffer_release_v1 */ + struct zwp_linux_buffer_release_v1 *buffer_release; + + /* each buffers own its release_fence_fd, until it passes ownership + * to it to EGL */ + int32_t release_fence_fd; + + /* each buffers own its acquire_fence_fd. + * If it use zwp_linux_buffer_release_v1 the ownership of this fd + * will be passed to display server + * Otherwise it will be used as a fence waiting for render done + * on tpl thread */ + int32_t acquire_fence_fd; + + tpl_gmutex mutex; + tpl_gcond cond; + + tpl_wl_vk_surface_t *wl_vk_surface; }; static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( @@ -47,7 +187,7 @@ __tpl_wl_vk_wsi_display_is_wl_display(tpl_handle_t native_dpy) static tpl_result_t __tpl_wl_vk_wsi_display_init(tpl_display_t *display) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; TPL_ASSERT(display); @@ -57,10 +197,10 @@ __tpl_wl_vk_wsi_display_init(tpl_display_t *display) return TPL_ERROR_INVALID_PARAMETER; } - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) calloc(1, - sizeof(tpl_wayland_vk_wsi_display_t)); + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) calloc(1, + sizeof(tpl_wl_vk_display_t)); if (!wayland_vk_wsi_display) { - TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_display_t."); + TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t."); return TPL_ERROR_OUT_OF_MEMORY; } @@ -117,11 +257,11 @@ free_display: static void __tpl_wl_vk_wsi_display_fini(tpl_display_t *display) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display; + tpl_wl_vk_display_t *wayland_vk_wsi_display; TPL_ASSERT(display); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; if (wayland_vk_wsi_display) { TPL_LOG_T("WL_VK", @@ -195,13 +335,13 @@ __tpl_wl_vk_wsi_display_query_window_supported_buffer_count( tpl_display_t *display, tpl_handle_t window, int *min, int *max) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; @@ -221,13 +361,13 @@ __tpl_wl_vk_wsi_display_query_window_supported_present_modes( tpl_display_t *display, tpl_handle_t window, int *modes) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; @@ -247,23 +387,23 @@ __tpl_wl_vk_wsi_display_query_window_supported_present_modes( static tpl_result_t __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; twe_surface_h twe_surface = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); TPL_ASSERT(surface->native_handle); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) calloc(1, - sizeof(tpl_wayland_vk_wsi_surface_t)); + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) calloc(1, + sizeof(tpl_wl_vk_surface_t)); if (!wayland_vk_wsi_surface) { - TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_surface_t."); + TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t."); return TPL_ERROR_OUT_OF_MEMORY; } wayland_vk_wsi_display = - (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + (tpl_wl_vk_display_t *)surface->display->backend.data; if (!wayland_vk_wsi_display) { TPL_ERR("Invalid parameter. wayland_vk_wsi_display(%p)", wayland_vk_wsi_display); @@ -291,7 +431,7 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) wayland_vk_wsi_surface->swapchain_buffers = NULL; TPL_LOG_T("WL_VK", - "[INIT]tpl_surface(%p) tpl_wayland_vk_wsi_surface(%p) twe_surface(%p)", + "[INIT]tpl_surface(%p) tpl_wl_vk_surface(%p) twe_surface(%p)", surface, wayland_vk_wsi_surface, twe_surface); return TPL_ERROR_NONE; @@ -300,16 +440,16 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) static void __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; if (wayland_vk_wsi_surface == NULL) return; - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; if (wayland_vk_wsi_display == NULL) return; @@ -351,8 +491,8 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display->native_handle); TPL_ASSERT(tbm_surface); - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + (tpl_wl_vk_surface_t *) surface->backend.data; tbm_surface_queue_error_e tsq_err; if (!tbm_surface_internal_is_valid(tbm_surface)) { @@ -404,8 +544,8 @@ __tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + (tpl_wl_vk_surface_t *)surface->backend.data; return !(wayland_vk_wsi_surface->reset); } @@ -414,10 +554,10 @@ static tpl_result_t __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; if (!wayland_vk_wsi_surface) { TPL_ERR("Invalid backend surface. surface(%p) wayland_vk_wsi_surface(%p)", surface, wayland_vk_wsi_surface); @@ -454,10 +594,10 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display); tbm_surface_h tbm_surface = NULL; - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = - (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + (tpl_wl_vk_surface_t *)surface->backend.data; + tpl_wl_vk_display_t *wayland_vk_wsi_display = + (tpl_wl_vk_display_t *)surface->display->backend.data; tbm_surface_queue_error_e tsq_err = 0; tpl_result_t lock_res = TPL_ERROR_NONE; tpl_result_t res = TPL_ERROR_NONE; @@ -527,8 +667,8 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, tbm_surface_h **buffers, int *buffer_count) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; int i; tpl_result_t ret = TPL_ERROR_NONE; @@ -539,8 +679,8 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, TPL_ASSERT(buffers); TPL_ASSERT(buffer_count); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)surface->display->backend.data; if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, @@ -596,13 +736,13 @@ __cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue, void *data) { tpl_surface_t *surface = NULL; - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; tpl_bool_t is_activated = TPL_FALSE; surface = (tpl_surface_t *)data; TPL_CHECK_ON_NULL_RETURN(surface); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; TPL_CHECK_ON_NULL_RETURN(wayland_vk_wsi_surface); /* When queue_reset_callback is called, if is_activated is different from @@ -634,18 +774,18 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, int width, int height, int buffer_count, int present_mode) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; TPL_ASSERT(wayland_vk_wsi_surface); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; TPL_ASSERT(wayland_vk_wsi_display); @@ -721,8 +861,8 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; unsigned int ref; @@ -731,8 +871,8 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) TPL_ASSERT(surface->display); TPL_ASSERT(surface->display->backend.data); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) surface->display->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference); -- 2.7.4 From 368965fc67d639933dc8849dde6dd15fd2bc1056 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 29 Mar 2021 11:07:47 +0900 Subject: [PATCH 12/16] tpl_wl_vk_thread: Modified wl_vk_display to use tpl_gthread_util Change-Id: I3fb5c37a1a2850a95d1218dc607f6c190e94da1c Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 748 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 618 insertions(+), 130 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 74e7e0a..3a846b9 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -25,8 +25,9 @@ #include "tpl_utils_gthread.h" #define BUFFER_ARRAY_SIZE 10 +#define VK_CLIENT_QUEUE_SIZE 3 -typedef struct _tpl_wl_vk_surface tpl_wl_vk_display_t; +typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t; typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t; typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t; typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t; @@ -57,7 +58,6 @@ struct _tpl_wl_vk_display { int max_buffer; int present_modes; - struct tizen_surface_shm *tss; /* used for surface buffer_flush */ struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */ }; @@ -86,7 +86,6 @@ struct _tpl_wl_vk_surface { struct wl_surface *wl_surface; struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ - struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */ tdm_client_vblank *vblank; @@ -173,120 +172,624 @@ struct _tpl_wl_vk_buffer { static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( tpl_surface_t *surface); -static TPL_INLINE tpl_bool_t -__tpl_wl_vk_wsi_display_is_wl_display(tpl_handle_t native_dpy) +static tpl_bool_t +_check_native_handle_is_wl_display(tpl_handle_t native_dpy) { - if (!native_dpy) return TPL_FALSE; + struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy; + + if (!wl_vk_native_dpy) { + TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy); + return TPL_FALSE; + } + + /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value + is a memory address pointing the structure of wl_display_interface. */ + if (wl_vk_native_dpy == &wl_display_interface) + return TPL_TRUE; + + if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name, + strlen(wl_display_interface.name)) == 0) { + return TPL_TRUE; + } + + return TPL_FALSE; +} + +static tpl_bool_t +__thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_vk_display_t *wl_vk_display = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; + + TPL_IGNORE(message); + + wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + if (!wl_vk_display) { + TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource); + TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); + return TPL_FALSE; + } + + tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client); + + /* If an error occurs in tdm_client_handle_events, it cannot be recovered. + * When tdm_source is no longer available due to an unexpected situation, + * wl_egl_thread must remove it from the thread and destroy it. + * In that case, tdm_vblank can no longer be used for surfaces and displays + * that used this tdm_source. */ + if (tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)", + tdm_err); + TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); + + tpl_gsource_destroy(gsource, TPL_FALSE); + + wl_vk_display->tdm_source = NULL; + + return TPL_FALSE; + } + + return TPL_TRUE; +} + +static void +__thread_func_tdm_finalize(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = NULL; + + wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + TPL_LOG_T("WL_VK", + "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)", + wl_vk_display, wl_vk_display->tdm_client, gsource); + + if (wl_vk_display->tdm_client) { + tdm_client_destroy(wl_vk_display->tdm_client); + wl_vk_display->tdm_client = NULL; + wl_vk_display->tdm_display_fd = -1; + } + + wl_vk_display->tdm_initialized = TPL_FALSE; +} + +static tpl_gsource_functions tdm_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_tdm_dispatch, + .finalize = __thread_func_tdm_finalize, +}; + +tpl_result_t +_thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display) +{ + tdm_client *tdm_client = NULL; + int tdm_display_fd = -1; + tdm_error tdm_err = TDM_ERROR_NONE; + + tdm_client = tdm_client_create(&tdm_err); + if (!tdm_client || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err); + return TPL_ERROR_INVALID_OPERATION; + } + + tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd); + if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err); + tdm_client_destroy(tdm_client); + return TPL_ERROR_INVALID_OPERATION; + } + + wl_vk_display->tdm_display_fd = tdm_display_fd; + wl_vk_display->tdm_client = tdm_client; + wl_vk_display->tdm_source = NULL; + wl_vk_display->tdm_initialized = TPL_TRUE; + + TPL_INFO("[TDM_CLIENT_INIT]", + "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_vk_display, tdm_client, tdm_display_fd); + + return TPL_ERROR_NONE; +} + +#define IMPL_TIZEN_SURFACE_SHM_VERSION 2 + +static void +__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, + uint32_t name, const char *interface, + uint32_t version) +{ + tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data; + + if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) { + char *env = tpl_getenv("TPL_EFS"); + if (env && !atoi(env)) { + wl_vk_display->use_explicit_sync = TPL_FALSE; + } else { + wl_vk_display->explicit_sync = + wl_registry_bind(wl_registry, name, + &zwp_linux_explicit_synchronization_v1_interface, 1); + wl_vk_display->use_explicit_sync = TPL_TRUE; + TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface"); + } + } +} + +static void +__cb_wl_resistry_global_remove_callback(void *data, + struct wl_registry *wl_registry, + uint32_t name) +{ +} + +static const struct wl_registry_listener registry_listener = { + __cb_wl_resistry_global_callback, + __cb_wl_resistry_global_remove_callback +}; + +static void +_wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display, + const char *func_name) +{ + int dpy_err; + char buf[1024]; + strerror_r(errno, buf, sizeof(buf)); + + if (wl_vk_display->last_error == errno) + return; + + TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf); + + dpy_err = wl_display_get_error(wl_vk_display->wl_display); + if (dpy_err == EPROTO) { + const struct wl_interface *err_interface; + uint32_t err_proxy_id, err_code; + err_code = wl_display_get_protocol_error(wl_vk_display->wl_display, + &err_interface, + &err_proxy_id); + TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d", + err_interface->name, err_code, err_proxy_id); + } + + wl_vk_display->last_error = errno; +} + +tpl_result_t +_thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display) +{ + struct wl_registry *registry = NULL; + struct wl_event_queue *queue = NULL; + struct wl_display *display_wrapper = NULL; + struct wl_proxy *wl_tbm = NULL; + struct wayland_tbm_client *wl_tbm_client = NULL; + int ret; + tpl_result_t result = TPL_ERROR_NONE; + + queue = wl_display_create_queue(wl_vk_display->wl_display); + if (!queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_vk_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display); + if (!wl_vk_display->ev_queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_vk_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display); + if (!display_wrapper) { + TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)", + wl_vk_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue); + + registry = wl_display_get_registry(display_wrapper); + if (!registry) { + TPL_ERR("Failed to create wl_registry"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_proxy_wrapper_destroy(display_wrapper); + display_wrapper = NULL; + + wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display); + if (!wl_tbm_client) { + TPL_ERR("Failed to initialize wl_tbm_client."); + result = TPL_ERROR_INVALID_CONNECTION; + goto fini; + } + + wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client); + if (!wl_tbm) { + TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client); + result = TPL_ERROR_INVALID_CONNECTION; + goto fini; + } + + wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue); + wl_vk_display->wl_tbm_client = wl_tbm_client; + + if (wl_registry_add_listener(registry, ®istry_listener, + wl_vk_display)) { + TPL_ERR("Failed to wl_registry_add_listener"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue); + if (ret == -1) { + _wl_display_print_err(wl_vk_display, "roundtrip_queue"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + if (wl_vk_display->explicit_sync) { + wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync, + wl_vk_display->ev_queue); + TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.", + wl_vk_display->explicit_sync); + } + + wl_vk_display->wl_initialized = TPL_TRUE; + + TPL_INFO("[WAYLAND_INIT]", + "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)", + wl_vk_display, wl_vk_display->wl_display, + wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue); + TPL_INFO("[WAYLAND_INIT]", + "explicit_sync(%p)", + wl_vk_display->explicit_sync); + +fini: + if (display_wrapper) + wl_proxy_wrapper_destroy(display_wrapper); + if (registry) + wl_registry_destroy(registry); + if (queue) + wl_event_queue_destroy(queue); + + return result; +} + +void +_thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display) +{ + /* If wl_vk_display is in prepared state, cancel it */ + if (wl_vk_display->prepared) { + wl_display_cancel_read(wl_vk_display->wl_display); + wl_vk_display->prepared = TPL_FALSE; + } + + if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display, + wl_vk_display->ev_queue) == -1) { + _wl_display_print_err(wl_vk_display, "dispatch_queue_pending"); + } - if (twe_check_native_handle_is_wl_display(native_dpy)) + if (wl_vk_display->explicit_sync) { + TPL_INFO("[EXPLICIT_SYNC_DESTROY]", + "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.", + wl_vk_display, wl_vk_display->explicit_sync); + zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync); + wl_vk_display->explicit_sync = NULL; + } + + if (wl_vk_display->wl_tbm_client) { + struct wl_proxy *wl_tbm = NULL; + + wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm( + wl_vk_display->wl_tbm_client); + if (wl_tbm) { + wl_proxy_set_queue(wl_tbm, NULL); + } + + TPL_INFO("[WL_TBM_DEINIT]", + "wl_vk_display(%p) wl_tbm_client(%p)", + wl_vk_display, wl_vk_display->wl_tbm_client); + wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client); + wl_vk_display->wl_tbm_client = NULL; + } + + wl_event_queue_destroy(wl_vk_display->ev_queue); + + wl_vk_display->wl_initialized = TPL_FALSE; + + TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)", + wl_vk_display, wl_vk_display->wl_display); +} + +static void* +_thread_init(void *data) +{ + tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data; + + if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) { + TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)", + wl_vk_display, wl_vk_display->wl_display); + } + + if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) { + TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED"); + } + + return wl_vk_display; +} + +static tpl_bool_t +__thread_func_disp_prepare(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + /* If this wl_vk_display is already prepared, + * do nothing in this function. */ + if (wl_vk_display->prepared) + return TPL_FALSE; + + /* If there is a last_error, there is no need to poll, + * so skip directly to dispatch. + * prepare -> dispatch */ + if (wl_vk_display->last_error) return TPL_TRUE; + while (wl_display_prepare_read_queue(wl_vk_display->wl_display, + wl_vk_display->ev_queue) != 0) { + if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display, + wl_vk_display->ev_queue) == -1) { + _wl_display_print_err(wl_vk_display, "dispatch_queue_pending"); + } + } + + wl_vk_display->prepared = TPL_TRUE; + + wl_display_flush(wl_vk_display->wl_display); + return TPL_FALSE; } +static tpl_bool_t +__thread_func_disp_check(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + tpl_bool_t ret = TPL_FALSE; + + if (!wl_vk_display->prepared) + return ret; + + /* If prepared, but last_error is set, + * cancel_read is executed and FALSE is returned. + * That can lead to G_SOURCE_REMOVE by calling disp_prepare again + * and skipping disp_check from prepare to disp_dispatch. + * check -> prepare -> dispatch -> G_SOURCE_REMOVE */ + if (wl_vk_display->prepared && wl_vk_display->last_error) { + wl_display_cancel_read(wl_vk_display->wl_display); + return ret; + } + + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_read_events(wl_vk_display->wl_display) == -1) + _wl_display_print_err(wl_vk_display, "read_event"); + ret = TPL_TRUE; + } else { + wl_display_cancel_read(wl_vk_display->wl_display); + ret = TPL_FALSE; + } + + wl_vk_display->prepared = TPL_FALSE; + + return ret; +} + +static tpl_bool_t +__thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + TPL_IGNORE(message); + + /* If there is last_error, SOURCE_REMOVE should be returned + * to remove the gsource from the main loop. + * This is because wl_vk_display is not valid since last_error was set.*/ + if (wl_vk_display->last_error) { + return TPL_FALSE; + } + + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display, + wl_vk_display->ev_queue) == -1) { + _wl_display_print_err(wl_vk_display, "dispatch_queue_pending"); + } + } + + wl_display_flush(wl_vk_display->wl_display); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + + return TPL_TRUE; +} + +static void +__thread_func_disp_finalize(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + if (wl_vk_display->wl_initialized) + _thread_wl_display_fini(wl_vk_display); + + TPL_LOG_T("WL_EGL", "finalize| wl_vk_display(%p) tpl_gsource(%p)", + wl_vk_display, gsource); + + return; +} + + +static tpl_gsource_functions disp_funcs = { + .prepare = __thread_func_disp_prepare, + .check = __thread_func_disp_check, + .dispatch = __thread_func_disp_dispatch, + .finalize = __thread_func_disp_finalize, +}; + static tpl_result_t __tpl_wl_vk_wsi_display_init(tpl_display_t *display) { - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; - TPL_ASSERT(display); + tpl_wl_vk_display_t *wl_vk_display = NULL; + /* Do not allow default display in wayland */ if (!display->native_handle) { TPL_ERR("Invalid native handle for display."); return TPL_ERROR_INVALID_PARAMETER; } - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) calloc(1, - sizeof(tpl_wl_vk_display_t)); - if (!wayland_vk_wsi_display) { + if (!_check_native_handle_is_wl_display(display->native_handle)) { + TPL_ERR("native_handle(%p) is not wl_display", display->native_handle); + return TPL_ERROR_INVALID_PARAMETER; + } + + wl_vk_display = (tpl_wl_vk_display_t *) calloc(1, + sizeof(tpl_wl_vk_display_t)); + if (!wl_vk_display) { TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t."); return TPL_ERROR_OUT_OF_MEMORY; } - display->backend.data = wayland_vk_wsi_display; + display->backend.data = wl_vk_display; + display->bufmgr_fd = -1; - if (twe_check_native_handle_is_wl_display(display->native_handle)) { - wayland_vk_wsi_display->wl_thread = twe_thread_create(); - if (!wayland_vk_wsi_display->wl_thread) { - TPL_ERR("Failed to create twe_thread."); - goto free_display; - } + wl_vk_display->tdm_initialized = TPL_FALSE; + wl_vk_display->wl_initialized = TPL_FALSE; + + wl_vk_display->ev_queue = NULL; + wl_vk_display->wl_display = (struct wl_display *)display->native_handle; + wl_vk_display->last_error = 0; + wl_vk_display->use_explicit_sync = TPL_FALSE; // default disabled + wl_vk_display->prepared = TPL_FALSE; + + /* Wayland Interfaces */ + wl_vk_display->explicit_sync = NULL; + wl_vk_display->wl_tbm_client = NULL; - wayland_vk_wsi_display->twe_display = - twe_display_add(wayland_vk_wsi_display->wl_thread, - display->native_handle, - display->backend.type); - if (!wayland_vk_wsi_display->twe_display) { - TPL_ERR("Failed to add native_display(%p) to thread(%p)", - display->native_handle, - wayland_vk_wsi_display->wl_thread); - goto free_display; + /* Vulkan specific surface capabilities */ + wl_vk_display->min_buffer = 2; + wl_vk_display->max_buffer = VK_CLIENT_QUEUE_SIZE; + wl_vk_display->present_modes = TPL_DISPLAY_PRESENT_MODE_FIFO; + + wl_vk_display->use_wait_vblank = TPL_TRUE; // default enabled + { + char *env = tpl_getenv("TPL_WAIT_VBLANK"); + if (env && !atoi(env)) { + wl_vk_display->use_wait_vblank = TPL_FALSE; } + } - } else { - TPL_ERR("Invalid native handle for display."); + tpl_gmutex_init(&wl_vk_display->wl_event_mutex); + + /* Create gthread */ + wl_vk_display->thread = tpl_gthread_create("wl_egl_thread", + (tpl_gthread_func)_thread_init, + (void *)wl_vk_display); + if (!wl_vk_display->thread) { + TPL_ERR("Failed to create wl_egl_thread"); goto free_display; } - TPL_LOG_T("WL_VK", - "[INIT DISPLAY] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)", - wayland_vk_wsi_display, - wayland_vk_wsi_display->wl_thread, - wayland_vk_wsi_display->twe_display); + wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread, + (void *)wl_vk_display, + wl_display_get_fd(wl_vk_display->wl_display), + &disp_funcs, SOURCE_TYPE_NORMAL); + if (!wl_vk_display->disp_source) { + TPL_ERR("Failed to add native_display(%p) to thread(%p)", + display->native_handle, + wl_vk_display->thread); + goto free_display; + } + + wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread, + (void *)wl_vk_display, + wl_vk_display->tdm_display_fd, + &tdm_funcs, SOURCE_TYPE_NORMAL); + if (!wl_vk_display->tdm_source) { + TPL_ERR("Failed to create tdm_gsource\n"); + goto free_display; + } + + TPL_INFO("[DISPLAY_INIT]", + "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_vk_display, + wl_vk_display->thread, + wl_vk_display->wl_display); + + TPL_INFO("[DISPLAY_INIT]", + "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)", + wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE", + wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE"); return TPL_ERROR_NONE; free_display: - if (wayland_vk_wsi_display) { - if (wayland_vk_wsi_display->twe_display) - twe_display_del(wayland_vk_wsi_display->twe_display); - if (wayland_vk_wsi_display->wl_thread) - twe_thread_destroy(wayland_vk_wsi_display->wl_thread); - - wayland_vk_wsi_display->wl_thread = NULL; - wayland_vk_wsi_display->twe_display = NULL; + if (wl_vk_display->thread) { + if (wl_vk_display->tdm_source) + tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); + if (wl_vk_display->disp_source) + tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); - free(wayland_vk_wsi_display); - display->backend.data = NULL; + tpl_gthread_destroy(wl_vk_display->thread); } + wl_vk_display->thread = NULL; + free(wl_vk_display); + + display->backend.data = NULL; return TPL_ERROR_INVALID_OPERATION; } static void __tpl_wl_vk_wsi_display_fini(tpl_display_t *display) { - tpl_wl_vk_display_t *wayland_vk_wsi_display; + tpl_wl_vk_display_t *wl_vk_display; TPL_ASSERT(display); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; - if (wayland_vk_wsi_display) { - - TPL_LOG_T("WL_VK", - "[FINI] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)", - wayland_vk_wsi_display, - wayland_vk_wsi_display->wl_thread, - wayland_vk_wsi_display->twe_display); - - if (wayland_vk_wsi_display->twe_display) { - tpl_result_t ret = TPL_ERROR_NONE; - ret = twe_display_del(wayland_vk_wsi_display->twe_display); - if (ret != TPL_ERROR_NONE) - TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)", - wayland_vk_wsi_display->twe_display, - wayland_vk_wsi_display->wl_thread); - wayland_vk_wsi_display->twe_display = NULL; + wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data; + if (wl_vk_display) { + TPL_INFO("[DISPLAY_FINI]", + "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_vk_display, + wl_vk_display->thread, + wl_vk_display->wl_display); + + if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) { + tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); + wl_vk_display->tdm_source = NULL; + } + + if (wl_vk_display->disp_source) { + tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); + wl_vk_display->disp_source = NULL; } - if (wayland_vk_wsi_display->wl_thread) { - twe_thread_destroy(wayland_vk_wsi_display->wl_thread); - wayland_vk_wsi_display->wl_thread = NULL; + if (wl_vk_display->thread) { + tpl_gthread_destroy(wl_vk_display->thread); + wl_vk_display->thread = NULL; } - free(wayland_vk_wsi_display); + tpl_gmutex_clear(&wl_vk_display->wl_event_mutex); + + free(wl_vk_display); } + display->backend.data = NULL; } @@ -335,23 +838,16 @@ __tpl_wl_vk_wsi_display_query_window_supported_buffer_count( tpl_display_t *display, tpl_handle_t window, int *min, int *max) { - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; + wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); - if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; - - res = twe_display_get_buffer_count(wayland_vk_wsi_display->twe_display, - min, max); - if (res != TPL_ERROR_NONE) { - TPL_ERR("Failed to query buffer count. twe_display(%p)", - wayland_vk_wsi_display->twe_display); - return res; - } + if (min) *min = wl_vk_display->min_buffer; + if (max) *max = wl_vk_display->max_buffer; return TPL_ERROR_NONE; } @@ -359,26 +855,18 @@ __tpl_wl_vk_wsi_display_query_window_supported_buffer_count( static tpl_result_t __tpl_wl_vk_wsi_display_query_window_supported_present_modes( tpl_display_t *display, - tpl_handle_t window, int *modes) + tpl_handle_t window, int *present_modes) { - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; - - if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; + wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); - if (modes) { - res = twe_display_get_present_mode(wayland_vk_wsi_display->twe_display, - modes); - if (res != TPL_ERROR_NONE) { - TPL_ERR("Failed to query present modes. twe_display(%p)", - wayland_vk_wsi_display->twe_display); - return res; - } + if (present_modes) { + *present_modes = wl_vk_display->present_modes; } return TPL_ERROR_NONE; @@ -388,7 +876,7 @@ static tpl_result_t __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; twe_surface_h twe_surface = NULL; TPL_ASSERT(surface); @@ -402,11 +890,11 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) return TPL_ERROR_OUT_OF_MEMORY; } - wayland_vk_wsi_display = + wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; - if (!wayland_vk_wsi_display) { - TPL_ERR("Invalid parameter. wayland_vk_wsi_display(%p)", - wayland_vk_wsi_display); + if (!wl_vk_display) { + TPL_ERR("Invalid parameter. wl_vk_display(%p)", + wl_vk_display); free(wayland_vk_wsi_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -414,13 +902,13 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) surface->backend.data = (void *)wayland_vk_wsi_surface; wayland_vk_wsi_surface->tbm_queue = NULL; - twe_surface = twe_surface_add(wayland_vk_wsi_display->wl_thread, - wayland_vk_wsi_display->twe_display, + twe_surface = twe_surface_add(wl_vk_display->thread, + wl_vk_display->twe_display, surface->native_handle, surface->format, surface->num_buffers); if (!twe_surface) { TPL_ERR("Failed to add native_surface(%p) to thread(%p)", - surface->native_handle, wayland_vk_wsi_display->wl_thread); + surface->native_handle, wl_vk_display->thread); free(wayland_vk_wsi_surface); surface->backend.data = NULL; return TPL_ERROR_OUT_OF_MEMORY; @@ -441,7 +929,7 @@ static void __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->display); @@ -449,9 +937,9 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; if (wayland_vk_wsi_surface == NULL) return; - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - if (wayland_vk_wsi_display == NULL) return; + if (wl_vk_display == NULL) return; if (wayland_vk_wsi_surface->tbm_queue) __tpl_wl_vk_wsi_surface_destroy_swapchain(surface); @@ -470,7 +958,7 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) != TPL_ERROR_NONE) { TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", wayland_vk_wsi_surface->twe_surface, - wayland_vk_wsi_display->wl_thread); + wl_vk_display->thread); } wayland_vk_wsi_surface->twe_surface = NULL; @@ -596,7 +1084,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface = NULL; tpl_wl_vk_surface_t *wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - tpl_wl_vk_display_t *wayland_vk_wsi_display = + tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; tbm_surface_queue_error_e tsq_err = 0; tpl_result_t lock_res = TPL_ERROR_NONE; @@ -607,7 +1095,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_OBJECT_UNLOCK(surface); TRACE_BEGIN("WAIT_DEQUEUEABLE"); - lock_res = twe_display_lock(wayland_vk_wsi_display->twe_display); + lock_res = twe_display_lock(wl_vk_display->twe_display); res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface, timeout_ns); TRACE_END(); @@ -617,13 +1105,13 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")", timeout_ns); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } else if (res != TPL_ERROR_NONE) { TPL_ERR("Invalid operation. twe_surface(%p) timeout_ns(%" PRIu64 ")", wayland_vk_wsi_surface->twe_surface, timeout_ns); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } @@ -631,7 +1119,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.", wayland_vk_wsi_surface->tbm_queue); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } @@ -642,7 +1130,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ERR("Failed to get tbm_surface from tbm_surface_queue(%p) | tsq_err = %d", wayland_vk_wsi_surface->tbm_queue, tsq_err); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } @@ -657,7 +1145,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return tbm_surface; } @@ -668,7 +1156,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, int *buffer_count) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; int i; tpl_result_t ret = TPL_ERROR_NONE; @@ -680,15 +1168,15 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, TPL_ASSERT(buffer_count); wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)surface->display->backend.data; + wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; - if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { + if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, NULL, buffer_count); if (ret != TPL_ERROR_NONE) { TPL_ERR("Failed to get buffer_count. twe_surface(%p)", wayland_vk_wsi_surface->twe_surface); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return ret; } @@ -697,7 +1185,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, sizeof(tbm_surface_h)); if (!wayland_vk_wsi_surface->swapchain_buffers) { TPL_ERR("Failed to allocate memory for buffers."); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_OUT_OF_MEMORY; } @@ -709,7 +1197,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, wayland_vk_wsi_surface, wayland_vk_wsi_surface->twe_surface); free(wayland_vk_wsi_surface->swapchain_buffers); wayland_vk_wsi_surface->swapchain_buffers = NULL; - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return ret; } @@ -725,7 +1213,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, *buffers = wayland_vk_wsi_surface->swapchain_buffers; - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); } return TPL_ERROR_NONE; @@ -775,7 +1263,7 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, int height, int buffer_count, int present_mode) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(surface); @@ -785,9 +1273,9 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; TPL_ASSERT(wayland_vk_wsi_surface); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - TPL_ASSERT(wayland_vk_wsi_display); + TPL_ASSERT(wl_vk_display); if (wayland_vk_wsi_surface->tbm_queue) { int old_width = tbm_surface_queue_get_width(wayland_vk_wsi_surface->tbm_queue); @@ -862,7 +1350,7 @@ static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; unsigned int ref; @@ -872,15 +1360,15 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) TPL_ASSERT(surface->display->backend.data); wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { + if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference); if (ref > 0) { TPL_LOG_T("WL_VK", "This swapchain is still valid. | twe_surface(%p)", wayland_vk_wsi_surface->twe_surface); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } @@ -888,7 +1376,7 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) if (wayland_vk_wsi_surface->reset) { TPL_LOG_T("WL_VK", "Since reset is in the TRUE state, it will not be destroyed."); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } @@ -909,13 +1397,13 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) if (res != TPL_ERROR_NONE) { TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", wayland_vk_wsi_surface->twe_surface); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return res; } wayland_vk_wsi_surface->tbm_queue = NULL; - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); } return TPL_ERROR_NONE; @@ -926,7 +1414,7 @@ __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) { if (!native_dpy) return TPL_FALSE; - if (twe_check_native_handle_is_wl_display(native_dpy)) + if (_check_native_handle_is_wl_display(native_dpy)) return TPL_TRUE; return TPL_FALSE; -- 2.7.4 From 5fefb2956af2ed1f6d8f4f048d86febd82e59946 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 1 Apr 2021 16:22:02 +0900 Subject: [PATCH 13/16] Implement tpl_wl_vk_surface using tpl_gthread_utils. Change-Id: Ibb58ef10fa02fb6220453c5c18b7760a7a4d9994 Signed-off-by: Joonbum Ko Re-implement tpl_wl_vk_surface using tpl_gthread_utils Change-Id: I59ce5fb2092f60956ac1a2322f701b4a610016fe Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 690 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 540 insertions(+), 150 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 3a846b9..fceee7e 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -64,6 +64,8 @@ struct _tpl_wl_vk_display { struct _tpl_wl_vk_swapchain { tpl_wl_vk_surface_t *wl_vk_surface; + tbm_surface_queue_h tbm_queue; + struct { int width; int height; @@ -82,8 +84,6 @@ struct _tpl_wl_vk_surface { tpl_wl_vk_swapchain_t *swapchain; - tbm_surface_queue_h tbm_queue; - struct wl_surface *wl_surface; struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ @@ -872,55 +872,341 @@ __tpl_wl_vk_wsi_display_query_window_supported_present_modes( return TPL_ERROR_NONE; } +static void +_tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tpl_bool_t need_to_release = TPL_FALSE; + tpl_bool_t need_to_cancel = TPL_FALSE; + buffer_status_t status = RELEASED; + int idx = 0; + + while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) { + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + wl_vk_buffer = wl_vk_surface->buffers[idx]; + + if (wl_vk_buffer) { + wl_vk_surface->buffers[idx] = NULL; + wl_vk_surface->buffer_cnt--; + } else { + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + idx++; + continue; + } + + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + status = wl_vk_buffer->status; + + TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)", + idx, wl_vk_buffer, + wl_vk_buffer->tbm_surface, + status_to_string[status]); + + if (status >= ENQUEUED) { + tpl_bool_t need_to_wait = TPL_FALSE; + tpl_result_t wait_result = TPL_ERROR_NONE; + + if (!wl_vk_display->use_explicit_sync && + status < WAITING_VBLANK) + need_to_wait = TPL_TRUE; + + if (wl_vk_display->use_explicit_sync && + status < COMMITTED) + need_to_wait = TPL_TRUE; + + if (need_to_wait) { + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + wait_result = tpl_cond_timed_wait(&wl_vk_buffer->cond, + &wl_vk_buffer->mutex, + 16); /* 16ms */ + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + + status = wl_vk_buffer->status; + + if (wait_result == TPL_ERROR_TIME_OUT) + TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)", + wl_vk_buffer); + } + } + + /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */ + /* It has been acquired but has not yet been released, so this + * buffer must be released. */ + need_to_release = (status >= ACQUIRED && status <= COMMITTED); + + /* After dequeue, it has not been enqueued yet + * so cancel_dequeue must be performed. */ + need_to_cancel = (status == DEQUEUED); + + if (swapchain && swapchain->tbm_queue) { + if (need_to_release) { + tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, + wl_vk_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + wl_vk_buffer->tbm_surface, tsq_err); + } + + if (need_to_cancel) { + tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue, + wl_vk_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)", + wl_vk_buffer->tbm_surface, tsq_err); + } + } + + wl_vk_buffer->status = RELEASED; + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + if (need_to_release || need_to_cancel) + tbm_surface_internal_unref(wl_vk_buffer->tbm_surface); + + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + + idx++; + } +} + +static tdm_client_vblank* +_thread_create_tdm_client_vblank(tdm_client *tdm_client) +{ + tdm_client_vblank *vblank = NULL; + tdm_client_output *tdm_output = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; + + if (!tdm_client) { + TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client); + return NULL; + } + + tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err); + if (!tdm_output || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err); + return NULL; + } + + vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err); + if (!vblank || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err); + return NULL; + } + + tdm_client_vblank_set_enable_fake(vblank, 1); + tdm_client_vblank_set_sync(vblank, 0); + + return vblank; +} + +static void +_thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + + /* tbm_surface_queue will be created at swapchain_create */ + + wl_vk_surface->vblank = _thread_create_tdm_client_vblank( + wl_vk_display->tdm_client); + if (wl_vk_surface->vblank) { + TPL_INFO("[VBLANK_INIT]", + "wl_vk_surface(%p) tdm_client(%p) vblank(%p)", + wl_vk_surface, wl_vk_display->tdm_client, + wl_vk_surface->vblank); + } + + if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) { + wl_vk_surface->surface_sync = + zwp_linux_explicit_synchronization_v1_get_synchronization( + wl_vk_display->explicit_sync, wl_vk_surface->wl_surface); + if (wl_vk_surface->surface_sync) { + TPL_INFO("[EXPLICIT_SYNC_INIT]", + "wl_vk_surface(%p) surface_sync(%p)", + wl_vk_surface, wl_vk_surface->surface_sync); + } else { + TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)", + wl_vk_surface); + wl_vk_display->use_explicit_sync = TPL_FALSE; + } + } + + wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc(); +} + +static void +_thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + + TPL_INFO("[SURFACE_FINI]", + "wl_vk_surface(%p) wl_surface(%p)", + wl_vk_surface, wl_vk_surface->wl_surface); + + if (wl_vk_surface->vblank_waiting_buffers) { + __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL); + wl_vk_surface->vblank_waiting_buffers = NULL; + } + + if (wl_vk_surface->surface_sync) { + TPL_INFO("[SURFACE_SYNC_DESTROY]", + "wl_vk_surface(%p) surface_sync(%p)", + wl_vk_surface, wl_vk_surface->surface_sync); + zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync); + wl_vk_surface->surface_sync = NULL; + } + + if (wl_vk_surface->vblank) { + TPL_INFO("[VBLANK_DESTROY]", + "wl_vk_surface(%p) vblank(%p)", + wl_vk_surface, wl_vk_surface->vblank); + tdm_client_vblank_destroy(wl_vk_surface->vblank); + wl_vk_surface->vblank = NULL; + } + + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); +} + +static tpl_bool_t +__thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + + wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource); + + if (message == 1) { /* Initialize surface */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) initialize message received!", + wl_vk_surface); + _thread_wl_vk_surface_init(wl_vk_surface); + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } else if (message == 2) { /* Create tbm_surface_queue */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) queue creation message received!", + wl_vk_surface); + + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } else if (message == 3) { /* Acquirable message */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) acquirable message received!", + wl_vk_surface); + _thread_surface_queue_acquire(wl_vk_surface); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } else if (message == 4) { /* swapchain destroy */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!", + wl_vk_surface); + + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } + + return TPL_TRUE; +} + +static void +__thread_func_surf_finalize(tpl_gsource *gsource) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + + wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource); + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); + + _thread_wl_vk_surface_fini(wl_vk_surface); + + TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)", + wl_vk_surface, gsource); +} + +static tpl_gsource_functions surf_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_surf_dispatch, + .finalize = __thread_func_surf_finalize, +}; + + static tpl_result_t __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wl_vk_display = NULL; - twe_surface_h twe_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; + tpl_gsource *surf_source = NULL; TPL_ASSERT(surface); + TPL_ASSERT(surface->display); TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); TPL_ASSERT(surface->native_handle); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) calloc(1, + wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); + + wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1, sizeof(tpl_wl_vk_surface_t)); - if (!wayland_vk_wsi_surface) { + if (!wl_vk_surface) { TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t."); return TPL_ERROR_OUT_OF_MEMORY; } - wl_vk_display = - (tpl_wl_vk_display_t *)surface->display->backend.data; - if (!wl_vk_display) { - TPL_ERR("Invalid parameter. wl_vk_display(%p)", - wl_vk_display); - free(wayland_vk_wsi_surface); - return TPL_ERROR_INVALID_PARAMETER; + surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface, + -1, &surf_funcs, SOURCE_TYPE_NORMAL); + if (!surf_source) { + TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)", + wl_vk_surface); + free(wl_vk_surface); + surface->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; } - surface->backend.data = (void *)wayland_vk_wsi_surface; - wayland_vk_wsi_surface->tbm_queue = NULL; + surface->backend.data = (void *)wl_vk_surface; + surface->width = -1; + surface->height = -1; - twe_surface = twe_surface_add(wl_vk_display->thread, - wl_vk_display->twe_display, - surface->native_handle, - surface->format, surface->num_buffers); - if (!twe_surface) { - TPL_ERR("Failed to add native_surface(%p) to thread(%p)", - surface->native_handle, wl_vk_display->thread); - free(wayland_vk_wsi_surface); - surface->backend.data = NULL; - return TPL_ERROR_OUT_OF_MEMORY; + wl_vk_surface->surf_source = surf_source; + wl_vk_surface->swapchain = NULL; + + wl_vk_surface->wl_vk_display = wl_vk_display; + wl_vk_surface->wl_surface = (struct wl_surface *)surface->native_handle; + + wl_vk_surface->reset = TPL_FALSE; + wl_vk_surface->is_activated = TPL_FALSE; + wl_vk_surface->vblank_done = TPL_FALSE; + + wl_vk_surface->render_done_cnt = 0; + + wl_vk_surface->vblank = NULL; + wl_vk_surface->surface_sync = NULL; + + { + int i = 0; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) + wl_vk_surface->buffers[i] = NULL; + wl_vk_surface->buffer_cnt = 0; } - wayland_vk_wsi_surface->twe_surface = twe_surface; - wayland_vk_wsi_surface->is_activated = TPL_FALSE; - wayland_vk_wsi_surface->swapchain_buffers = NULL; + tpl_gmutex_init(&wl_vk_surface->surf_mutex); + tpl_gcond_init(&wl_vk_surface->surf_cond); - TPL_LOG_T("WL_VK", - "[INIT]tpl_surface(%p) tpl_wl_vk_surface(%p) twe_surface(%p)", - surface, wayland_vk_wsi_surface, twe_surface); + tpl_gmutex_init(&wl_vk_surface->buffers_mutex); + + /* Initialize in thread */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + tpl_gsource_send_message(wl_vk_surface->surf_source, 1); + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + + TPL_INFO("[SURFACE_INIT]", + "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)", + surface, wl_vk_surface, wl_vk_surface->surf_source); return TPL_ERROR_NONE; } @@ -928,42 +1214,48 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) static void __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - if (wayland_vk_wsi_surface == NULL) return; + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - if (wl_vk_display == NULL) return; + TPL_CHECK_ON_NULL_RETURN(wl_vk_display); - if (wayland_vk_wsi_surface->tbm_queue) - __tpl_wl_vk_wsi_surface_destroy_swapchain(surface); + TPL_INFO("[SURFACE_FINI][BEGIN]", + "wl_vk_surface(%p) wl_surface(%p)", + wl_vk_surface, wl_vk_surface->wl_surface); - if (wayland_vk_wsi_surface->swapchain_buffers) { - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; - } + if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) { + /* finalize swapchain */ - TPL_LOG_T("WL_VK", - "[FINI] wayland_vk_wsi_surface(%p) native_surface(%p) twe_surface(%p)", - wayland_vk_wsi_surface, surface->native_handle, - wayland_vk_wsi_surface->twe_surface); - - if (twe_surface_del(wayland_vk_wsi_surface->twe_surface) - != TPL_ERROR_NONE) { - TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", - wayland_vk_wsi_surface->twe_surface, - wl_vk_display->thread); } - wayland_vk_wsi_surface->twe_surface = NULL; + wl_vk_surface->swapchain = NULL; + + if (wl_vk_surface->surf_source) + tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE); + wl_vk_surface->surf_source = NULL; - free(wayland_vk_wsi_surface); + _print_buffer_lists(wl_vk_surface); + + wl_vk_surface->wl_surface = NULL; + wl_vk_surface->wl_vk_display = NULL; + wl_vk_surface->tpl_surface = NULL; + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + tpl_gmutex_clear(&wl_vk_surface->surf_mutex); + tpl_gcond_clear(&wl_vk_surface->surf_cond); + + TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface); + + free(wl_vk_surface); surface->backend.data = NULL; } @@ -979,7 +1271,7 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display->native_handle); TPL_ASSERT(tbm_surface); - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; tbm_surface_queue_error_e tsq_err; @@ -999,7 +1291,7 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, num_rects, rects); } } - tsq_err = tbm_surface_queue_enqueue(wayland_vk_wsi_surface->tbm_queue, + tsq_err = tbm_surface_queue_enqueue(wl_vk_surface->tbm_queue, tbm_surface); if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) { tbm_surface_internal_unref(tbm_surface); @@ -1010,7 +1302,7 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, if (sync_fence != -1) { tpl_result_t res = TPL_ERROR_NONE; - res = twe_surface_set_sync_fd(wayland_vk_wsi_surface->twe_surface, + res = twe_surface_set_sync_fd(wl_vk_surface->twe_surface, tbm_surface, sync_fence); if (res != TPL_ERROR_NONE) { TPL_WARN("Failed to set sync_fd(%d). Fallback to async mode.", @@ -1032,23 +1324,23 @@ __tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - return !(wayland_vk_wsi_surface->reset); + return !(wl_vk_surface->reset); } static tpl_result_t __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - if (!wayland_vk_wsi_surface) { - TPL_ERR("Invalid backend surface. surface(%p) wayland_vk_wsi_surface(%p)", - surface, wayland_vk_wsi_surface); + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + if (!wl_vk_surface) { + TPL_ERR("Invalid backend surface. surface(%p) wl_vk_surface(%p)", + surface, wl_vk_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -1059,7 +1351,7 @@ __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); - tsq_err = tbm_surface_queue_cancel_dequeue(wayland_vk_wsi_surface->tbm_queue, + tsq_err = tbm_surface_queue_cancel_dequeue(wl_vk_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface); @@ -1082,7 +1374,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display); tbm_surface_h tbm_surface = NULL; - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; @@ -1096,7 +1388,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_OBJECT_UNLOCK(surface); TRACE_BEGIN("WAIT_DEQUEUEABLE"); lock_res = twe_display_lock(wl_vk_display->twe_display); - res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface, + res = twe_surface_wait_dequeueable(wl_vk_surface->twe_surface, timeout_ns); TRACE_END(); TPL_OBJECT_LOCK(surface); @@ -1109,26 +1401,26 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, return NULL; } else if (res != TPL_ERROR_NONE) { TPL_ERR("Invalid operation. twe_surface(%p) timeout_ns(%" PRIu64 ")", - wayland_vk_wsi_surface->twe_surface, timeout_ns); + wl_vk_surface->twe_surface, timeout_ns); if (lock_res == TPL_ERROR_NONE) twe_display_unlock(wl_vk_display->twe_display); return NULL; } - if (wayland_vk_wsi_surface->reset) { + if (wl_vk_surface->reset) { TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.", - wayland_vk_wsi_surface->tbm_queue); + wl_vk_surface->tbm_queue); if (lock_res == TPL_ERROR_NONE) twe_display_unlock(wl_vk_display->twe_display); return NULL; } - tsq_err = tbm_surface_queue_dequeue(wayland_vk_wsi_surface->tbm_queue, + tsq_err = tbm_surface_queue_dequeue(wl_vk_surface->tbm_queue, &tbm_surface); if (!tbm_surface) { TPL_ERR("Failed to get tbm_surface from tbm_surface_queue(%p) | tsq_err = %d", - wayland_vk_wsi_surface->tbm_queue, tsq_err); + wl_vk_surface->tbm_queue, tsq_err); if (lock_res == TPL_ERROR_NONE) twe_display_unlock(wl_vk_display->twe_display); return NULL; @@ -1141,7 +1433,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, } TPL_LOG_T("WL_VK", "[DEQ] tbm_queue(%p) tbm_surface(%p) bo(%d)", - wayland_vk_wsi_surface->tbm_queue, tbm_surface, + wl_vk_surface->tbm_queue, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); if (lock_res == TPL_ERROR_NONE) @@ -1155,7 +1447,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, tbm_surface_h **buffers, int *buffer_count) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; int i; tpl_result_t ret = TPL_ERROR_NONE; @@ -1167,51 +1459,51 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, TPL_ASSERT(buffers); TPL_ASSERT(buffer_count); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { - ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, + ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface, NULL, buffer_count); if (ret != TPL_ERROR_NONE) { TPL_ERR("Failed to get buffer_count. twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); twe_display_unlock(wl_vk_display->twe_display); return ret; } - wayland_vk_wsi_surface->swapchain_buffers = (tbm_surface_h *)calloc( + wl_vk_surface->swapchain_buffers = (tbm_surface_h *)calloc( *buffer_count, sizeof(tbm_surface_h)); - if (!wayland_vk_wsi_surface->swapchain_buffers) { + if (!wl_vk_surface->swapchain_buffers) { TPL_ERR("Failed to allocate memory for buffers."); twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_OUT_OF_MEMORY; } - ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, - wayland_vk_wsi_surface->swapchain_buffers, + ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface, + wl_vk_surface->swapchain_buffers, buffer_count); if (ret != TPL_ERROR_NONE) { - TPL_ERR("Failed to get swapchain_buffers. wayland_vk_wsi_surface(%p) twe_surface(%p)", - wayland_vk_wsi_surface, wayland_vk_wsi_surface->twe_surface); - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; + TPL_ERR("Failed to get swapchain_buffers. wl_vk_surface(%p) twe_surface(%p)", + wl_vk_surface, wl_vk_surface->twe_surface); + free(wl_vk_surface->swapchain_buffers); + wl_vk_surface->swapchain_buffers = NULL; twe_display_unlock(wl_vk_display->twe_display); return ret; } for (i = 0; i < *buffer_count; i++) { - if (wayland_vk_wsi_surface->swapchain_buffers[i]) { + if (wl_vk_surface->swapchain_buffers[i]) { TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)", - i, wayland_vk_wsi_surface->swapchain_buffers[i], + i, wl_vk_surface->swapchain_buffers[i], tbm_bo_export(tbm_surface_internal_get_bo( - wayland_vk_wsi_surface->swapchain_buffers[i], 0))); - tbm_surface_internal_ref(wayland_vk_wsi_surface->swapchain_buffers[i]); + wl_vk_surface->swapchain_buffers[i], 0))); + tbm_surface_internal_ref(wl_vk_surface->swapchain_buffers[i]); } } - *buffers = wayland_vk_wsi_surface->swapchain_buffers; + *buffers = wl_vk_surface->swapchain_buffers; twe_display_unlock(wl_vk_display->twe_display); } @@ -1224,34 +1516,34 @@ __cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue, void *data) { tpl_surface_t *surface = NULL; - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_bool_t is_activated = TPL_FALSE; surface = (tpl_surface_t *)data; TPL_CHECK_ON_NULL_RETURN(surface); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - TPL_CHECK_ON_NULL_RETURN(wayland_vk_wsi_surface); + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); /* When queue_reset_callback is called, if is_activated is different from * its previous state change the reset flag to TPL_TRUE to get a new buffer * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ - is_activated = twe_surface_check_activated(wayland_vk_wsi_surface->twe_surface); + is_activated = twe_surface_check_activated(wl_vk_surface->twe_surface); - if (wayland_vk_wsi_surface->is_activated != is_activated) { + if (wl_vk_surface->is_activated != is_activated) { if (is_activated) { TPL_LOG_T("WL_VK", - "[ACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", - wayland_vk_wsi_surface, surface_queue); + "[ACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)", + wl_vk_surface, surface_queue); } else { TPL_LOG_T("WL_VK", - "[DEACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", - wayland_vk_wsi_surface, surface_queue); + "[DEACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)", + wl_vk_surface, surface_queue); } - wayland_vk_wsi_surface->is_activated = is_activated; + wl_vk_surface->is_activated = is_activated; } - wayland_vk_wsi_surface->reset = TPL_TRUE; + wl_vk_surface->reset = TPL_TRUE; if (surface->reset_cb) surface->reset_cb(surface->reset_data); @@ -1262,7 +1554,7 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, int width, int height, int buffer_count, int present_mode) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; @@ -1270,78 +1562,78 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, TPL_ASSERT(surface->backend.data); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - TPL_ASSERT(wayland_vk_wsi_surface); + wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + TPL_ASSERT(wl_vk_surface); wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; TPL_ASSERT(wl_vk_display); - if (wayland_vk_wsi_surface->tbm_queue) { - int old_width = tbm_surface_queue_get_width(wayland_vk_wsi_surface->tbm_queue); - int old_height = tbm_surface_queue_get_height(wayland_vk_wsi_surface->tbm_queue); + if (wl_vk_surface->tbm_queue) { + int old_width = tbm_surface_queue_get_width(wl_vk_surface->tbm_queue); + int old_height = tbm_surface_queue_get_height(wl_vk_surface->tbm_queue); if (old_width != width || old_height != height) { - tbm_surface_queue_reset(wayland_vk_wsi_surface->tbm_queue, + tbm_surface_queue_reset(wl_vk_surface->tbm_queue, width, height, format); TPL_LOG_T("WL_VK", - "[RESIZE] wayland_vk_wsi_surface(%p) tbm_queue(%p), (%d x %d) -> (%d x %d)", - wayland_vk_wsi_surface, wayland_vk_wsi_surface->tbm_queue, + "[RESIZE] wl_vk_surface(%p) tbm_queue(%p), (%d x %d) -> (%d x %d)", + wl_vk_surface, wl_vk_surface->tbm_queue, old_width, old_height, width, height); } - if (wayland_vk_wsi_surface->swapchain_buffers) { + if (wl_vk_surface->swapchain_buffers) { int i; - for (i = 0; i < wayland_vk_wsi_surface->buffer_count; i++) { - if (wayland_vk_wsi_surface->swapchain_buffers[i]) { - TPL_DEBUG("unref tbm_surface(%p)", wayland_vk_wsi_surface->swapchain_buffers[i]); - tbm_surface_internal_unref(wayland_vk_wsi_surface->swapchain_buffers[i]); - wayland_vk_wsi_surface->swapchain_buffers[i] = NULL; + for (i = 0; i < wl_vk_surface->buffer_count; i++) { + if (wl_vk_surface->swapchain_buffers[i]) { + TPL_DEBUG("unref tbm_surface(%p)", wl_vk_surface->swapchain_buffers[i]); + tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]); + wl_vk_surface->swapchain_buffers[i] = NULL; } } - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; + free(wl_vk_surface->swapchain_buffers); + wl_vk_surface->swapchain_buffers = NULL; } - wayland_vk_wsi_surface->buffer_count = - tbm_surface_queue_get_size(wayland_vk_wsi_surface->tbm_queue); - wayland_vk_wsi_surface->reset = TPL_FALSE; + wl_vk_surface->buffer_count = + tbm_surface_queue_get_size(wl_vk_surface->tbm_queue); + wl_vk_surface->reset = TPL_FALSE; - __tpl_util_atomic_inc(&wayland_vk_wsi_surface->swapchain_reference); + __tpl_util_atomic_inc(&wl_vk_surface->swapchain_reference); - TPL_LOG_T("WL_VK", "[REUSE] wayland_vk_wsi_surface(%p) tbm_queue(%p) size(%d)", - wayland_vk_wsi_surface, wayland_vk_wsi_surface->tbm_queue, - wayland_vk_wsi_surface->buffer_count); + TPL_LOG_T("WL_VK", "[REUSE] wl_vk_surface(%p) tbm_queue(%p) size(%d)", + wl_vk_surface, wl_vk_surface->tbm_queue, + wl_vk_surface->buffer_count); return TPL_ERROR_NONE; } - res = twe_surface_create_swapchain(wayland_vk_wsi_surface->twe_surface, + res = twe_surface_create_swapchain(wl_vk_surface->twe_surface, width, height, format, buffer_count, present_mode); if (res != TPL_ERROR_NONE) { TPL_ERR("Failed to create swapchain. twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); return res; } - wayland_vk_wsi_surface->tbm_queue = twe_surface_get_tbm_queue( - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->tbm_queue = twe_surface_get_tbm_queue( + wl_vk_surface->twe_surface); /* Set reset_callback to tbm_queue */ - if (tbm_surface_queue_add_reset_cb(wayland_vk_wsi_surface->tbm_queue, + if (tbm_surface_queue_add_reset_cb(wl_vk_surface->tbm_queue, __cb_tbm_queue_reset_callback, (void *)surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("TBM surface queue add reset cb failed!"); - twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); - wayland_vk_wsi_surface->tbm_queue = NULL; + twe_surface_destroy_swapchain(wl_vk_surface->twe_surface); + wl_vk_surface->tbm_queue = NULL; return TPL_ERROR_INVALID_OPERATION; } - wayland_vk_wsi_surface->buffer_count = buffer_count; - wayland_vk_wsi_surface->reset = TPL_FALSE; + wl_vk_surface->buffer_count = buffer_count; + wl_vk_surface->reset = TPL_FALSE; - __tpl_util_atomic_set(&wayland_vk_wsi_surface->swapchain_reference, 1); + __tpl_util_atomic_set(&wl_vk_surface->swapchain_reference, 1); return TPL_ERROR_NONE; } @@ -1349,7 +1641,7 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; unsigned int ref; @@ -1359,49 +1651,49 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) TPL_ASSERT(surface->display); TPL_ASSERT(surface->display->backend.data); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { - ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference); + ref = __tpl_util_atomic_dec(&wl_vk_surface->swapchain_reference); if (ref > 0) { TPL_LOG_T("WL_VK", "This swapchain is still valid. | twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } - if (wayland_vk_wsi_surface->reset) { + if (wl_vk_surface->reset) { TPL_LOG_T("WL_VK", "Since reset is in the TRUE state, it will not be destroyed."); twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } - if (wayland_vk_wsi_surface->swapchain_buffers) { + if (wl_vk_surface->swapchain_buffers) { int i; - for (i = 0; i < wayland_vk_wsi_surface->buffer_count; i++) { + for (i = 0; i < wl_vk_surface->buffer_count; i++) { TPL_DEBUG("Stop tracking tbm_surface(%p)", - wayland_vk_wsi_surface->swapchain_buffers[i]); - tbm_surface_internal_unref(wayland_vk_wsi_surface->swapchain_buffers[i]); - wayland_vk_wsi_surface->swapchain_buffers[i] = NULL; + wl_vk_surface->swapchain_buffers[i]); + tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]); + wl_vk_surface->swapchain_buffers[i] = NULL; } - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; + free(wl_vk_surface->swapchain_buffers); + wl_vk_surface->swapchain_buffers = NULL; } - res = twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); + res = twe_surface_destroy_swapchain(wl_vk_surface->twe_surface); if (res != TPL_ERROR_NONE) { TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); twe_display_unlock(wl_vk_display->twe_display); return res; } - wayland_vk_wsi_surface->tbm_queue = NULL; + wl_vk_surface->tbm_queue = NULL; twe_display_unlock(wl_vk_display->twe_display); } @@ -1458,3 +1750,101 @@ __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) backend->create_swapchain = __tpl_wl_vk_wsi_surface_create_swapchain; backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain; } + +static void +__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) +{ + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + + TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) { + wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL; + wl_egl_surface->buffer_cnt--; + + wl_egl_buffer->idx = -1; + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + + wl_display_flush(wl_egl_display->wl_display); + + if (wl_egl_buffer->wl_buffer) { + wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer); + wl_egl_buffer->wl_buffer = NULL; + } + + if (wl_egl_buffer->buffer_release) { + zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); + wl_egl_buffer->buffer_release = NULL; + } + + if (wl_egl_buffer->release_fence_fd != -1) { + close(wl_egl_buffer->release_fence_fd); + wl_egl_buffer->release_fence_fd = -1; + } + + if (wl_egl_buffer->waiting_source) { + tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); + wl_egl_buffer->waiting_source = NULL; + } + + if (wl_egl_buffer->commit_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); + if (ret == -1) + TPL_ERR("Failed to send commit_sync signal to fd(%d)", + wl_egl_buffer->commit_sync_fd); + close(wl_egl_buffer->commit_sync_fd); + wl_egl_buffer->commit_sync_fd = -1; + } + + if (wl_egl_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (ret == -1) + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + wl_egl_buffer->presentation_sync_fd); + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; + } + + if (wl_egl_buffer->rects) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; + } + + wl_egl_buffer->tbm_surface = NULL; + wl_egl_buffer->bo_name = -1; + + free(wl_egl_buffer); +} + +static int +_get_tbm_surface_bo_name(tbm_surface_h tbm_surface) +{ + return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); +} + +static void +_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) +{ + int idx = 0; + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)", + wl_egl_surface, wl_egl_surface->buffer_cnt); + for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) { + tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx]; + if (wl_egl_buffer) { + TPL_INFO("[INFO]", + "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", + idx, wl_egl_buffer, wl_egl_buffer->tbm_surface, + wl_egl_buffer->bo_name, + status_to_string[wl_egl_buffer->status]); + } + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); +} -- 2.7.4 From 2b3e45de48873051c66365b2df997411b4019a21 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 9 Apr 2021 14:24:31 +0900 Subject: [PATCH 14/16] Implement swapchain create/destroy/get_swapchain_buffers. Change-Id: Ic6b07b8247230409808db35ec308dbee2df5861c Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 844 ++++++++++++++++++++++++++++++------------------- 1 file changed, 511 insertions(+), 333 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index fceee7e..caa2ce2 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -169,8 +169,16 @@ struct _tpl_wl_vk_buffer { tpl_wl_vk_surface_t *wl_vk_surface; }; -static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( - tpl_surface_t *surface); +static void +_print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface); +static int +_get_tbm_surface_bo_name(tbm_surface_h tbm_surface); +static void +__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer); +static tpl_result_t +_thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); +static void +_thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); static tpl_bool_t _check_native_handle_is_wl_display(tpl_handle_t native_dpy) @@ -214,7 +222,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) /* If an error occurs in tdm_client_handle_events, it cannot be recovered. * When tdm_source is no longer available due to an unexpected situation, - * wl_egl_thread must remove it from the thread and destroy it. + * wl_vk_thread must remove it from the thread and destroy it. * In that case, tdm_vblank can no longer be used for surfaces and displays * that used this tdm_source. */ if (tdm_err != TDM_ERROR_NONE) { @@ -434,7 +442,7 @@ _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display) if (wl_vk_display->explicit_sync) { wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync, wl_vk_display->ev_queue); - TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.", + TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.", wl_vk_display->explicit_sync); } @@ -626,7 +634,7 @@ __thread_func_disp_finalize(tpl_gsource *gsource) if (wl_vk_display->wl_initialized) _thread_wl_display_fini(wl_vk_display); - TPL_LOG_T("WL_EGL", "finalize| wl_vk_display(%p) tpl_gsource(%p)", + TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)", wl_vk_display, gsource); return; @@ -697,11 +705,11 @@ __tpl_wl_vk_wsi_display_init(tpl_display_t *display) tpl_gmutex_init(&wl_vk_display->wl_event_mutex); /* Create gthread */ - wl_vk_display->thread = tpl_gthread_create("wl_egl_thread", + wl_vk_display->thread = tpl_gthread_create("wl_vk_thread", (tpl_gthread_func)_thread_init, (void *)wl_vk_display); if (!wl_vk_display->thread) { - TPL_ERR("Failed to create wl_egl_thread"); + TPL_ERR("Failed to create wl_vk_thread"); goto free_display; } @@ -1092,7 +1100,11 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_lock(&wl_vk_surface->surf_mutex); TPL_DEBUG("wl_vk_surface(%p) queue creation message received!", wl_vk_surface); - + if (_thread_swapchain_create_tbm_queue(wl_vk_surface) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)", + wl_vk_surface); + } tpl_gcond_signal(&wl_vk_surface->surf_cond); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } else if (message == 3) { /* Acquirable message */ @@ -1105,7 +1117,7 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_lock(&wl_vk_surface->surf_mutex); TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!", wl_vk_surface); - + _thread_swapchain_destroy_tbm_queue(wl_vk_surface); tpl_gcond_signal(&wl_vk_surface->surf_cond); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } @@ -1259,28 +1271,453 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) surface->backend.data = NULL; } +static void +__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, + void *data) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_surface_t *surface = NULL; + tpl_bool_t is_activated = TPL_FALSE; + int width, height; + + wl_vk_surface = (tpl_wl_vk_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); + + wl_vk_display = wl_vk_surface->wl_vk_display; + TPL_CHECK_ON_NULL_RETURN(wl_vk_display); + + surface = wl_vk_surface->tpl_surface; + TPL_CHECK_ON_NULL_RETURN(surface); + + swapchain = wl_vk_surface->swapchain; + TPL_CHECK_ON_NULL_RETURN(swapchain); + + /* When the queue is resized, change the reset flag to TPL_TRUE to reflect + * the changed window size at the next frame. */ + width = tbm_surface_queue_get_width(tbm_queue); + height = tbm_surface_queue_get_height(tbm_queue); + if (surface->width != width || surface->height != height) { + TPL_INFO("[QUEUE_RESIZE]", + "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)", + wl_vk_surface, tbm_queue, + surface->width, surface->height, width, height); + } + + /* When queue_reset_callback is called, if is_activated is different from + * its previous state change the reset flag to TPL_TRUE to get a new buffer + * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ + is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client, + swapchain->tbm_queue); + if (wl_vk_surface->is_activated != is_activated) { + if (is_activated) { + TPL_INFO("[ACTIVATED]", + "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue); + } else { + TPL_LOG_T("[DEACTIVATED]", + " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue); + } + } + + wl_vk_surface->reset = TPL_TRUE; + + if (surface->reset_cb) + surface->reset_cb(surface->reset_data); +} + +static void +__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue, + void *data) +{ + TPL_IGNORE(tbm_queue); + + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + + tpl_gsource_send_message(wl_vk_surface->surf_source, 3); + + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); +} + static tpl_result_t -__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface, - int num_rects, const int *rects, - tbm_fd sync_fence) +_thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) +{ + TPL_ASSERT (wl_vk_surface); + + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tbm_surface_queue_h tbm_queue = NULL; + tbm_bufmgr bufmgr = NULL; + unsigned int capability; + + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + + if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) { + TPL_ERR("buffer count(%d) must be higher than (%d)", + swapchain->properties.buffer_count, + wl_vk_display->min_buffer); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) { + TPL_ERR("buffer count(%d) must be lower than (%d)", + swapchain->properties.buffer_count, + wl_vk_display->max_buffer); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) { + TPL_ERR("Unsupported present_mode(%d)", + swapchain->properties.present_mode); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (swapchain->tbm_queue) { + int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue); + int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue); + + if (swapchain->swapchain_buffers) { + int i; + for (i = 0; i < swapchain->properties.buffer_count; i++) { + if (swapchain->swapchain_buffers[i]) { + TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]); + tbm_surface_internal_unref(swapchain->swapchain_buffers[i]); + swapchain->swapchain_buffers[i] = NULL; + } + } + + free(swapchain->swapchain_buffers); + swapchain->swapchain_buffers = NULL; + } + + if (old_width != swapchain->properties.width || + old_height != swapchain->properties.height) { + tbm_surface_queue_reset(swapchain->tbm_queue, + swapchain->properties.width, + swapchain->properties.height, + swapchain->properties.format); + TPL_INFO("[RESIZE]", + "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)", + wl_vk_surface, swapchain, swapchain->tbm_queue, + old_width, old_height, + swapchain->properties.width, + swapchain->properties.height); + } + + swapchain->properties.buffer_count = + tbm_surface_queue_get_size(swapchain->tbm_queue); + + wl_vk_surface->reset = TPL_FALSE; + + __tpl_util_atomic_inc(&swapchain->ref_cnt); + + TPL_INFO("[SWAPCHAIN_REUSE]", + "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)", + wl_vk_surface, swapchain, swapchain->tbm_queue, + swapchain->properties.buffer_count); + + return TPL_ERROR_NONE; + } + + bufmgr = tbm_bufmgr_init(-1); + capability = tbm_bufmgr_get_capability(bufmgr); + tbm_bufmgr_deinit(bufmgr); + + if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) { + tbm_queue = wayland_tbm_client_create_surface_queue_tiled( + wl_vk_display->wl_tbm_client, + wl_vk_surface->wl_surface, + swapchain->properties.buffer_count, + swapchain->properties.width, + swapchain->properties.height, + TBM_FORMAT_ARGB8888); + } else { + tbm_queue = wayland_tbm_client_create_surface_queue( + wl_vk_display->wl_tbm_client, + wl_vk_surface->wl_surface, + swapchain->properties.buffer_count, + swapchain->properties.width, + swapchain->properties.height, + TBM_FORMAT_ARGB8888); + } + + if (!tbm_queue) { + TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)", + wl_vk_surface); + return TPL_ERROR_OUT_OF_MEMORY; + } + + if (tbm_surface_queue_set_modes( + tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) != + TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + + if (tbm_surface_queue_add_reset_cb( + tbm_queue, + __cb_tbm_queue_reset_callback, + (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + + if (tbm_surface_queue_add_acquirable_cb( + tbm_queue, + __cb_tbm_queue_acquirable_callback, + (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + + swapchain->tbm_queue = tbm_queue; + + TPL_INFO("[TBM_QUEUE_CREATED]", + "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)", + wl_vk_surface, swapchain, tbm_queue); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, + tbm_format format, int width, + int height, int buffer_count, int present_mode) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_result_t res = TPL_ERROR_NONE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER); + + wl_vk_display = (tpl_wl_vk_display_t *) + surface->display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); + + swapchain = wl_vk_surface->swapchain; + + if (swapchain == NULL) { + swapchain = + (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t)); + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY); + swapchain->tbm_queue = NULL; + } + + swapchain->properties.buffer_count = buffer_count; + swapchain->properties.width = width; + swapchain->properties.height = height; + swapchain->properties.present_mode = present_mode; + swapchain->wl_vk_surface = wl_vk_surface; + + wl_vk_surface->swapchain = swapchain; + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + /* send swapchain create tbm_queue message */ + tpl_gsource_send_message(wl_vk_surface->surf_source, 2); + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + + TPL_CHECK_ON_FALSE_ASSERT_FAIL( + swapchain->tbm_queue != NULL, + "[CRITICAL FAIL] Failed to create tbm_surface_queue"); + + wl_vk_surface->reset = TPL_FALSE; + + __tpl_util_atomic_set(&swapchain->ref_cnt, 1); + + return TPL_ERROR_NONE; +} + +static void +_thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) +{ + TPL_ASSERT(wl_vk_surface); + + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + + TPL_CHECK_ON_NULL_RETURN(swapchain); + + if (swapchain->tbm_queue) { + TPL_INFO("[TBM_QUEUE_DESTROY]", + "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)", + wl_vk_surface, swapchain, swapchain->tbm_queue); + tbm_surface_queue_destroy(swapchain->tbm_queue); + swapchain->tbm_queue = NULL; + } +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; + tpl_result_t res = TPL_ERROR_NONE; + unsigned int ref; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + + wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER); + + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); + + swapchain = wl_vk_surface->swapchain; + if (!swapchain) { + TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.", + wl_vk_surface); + return TPL_ERROR_INVALID_OPERATION; + } + + if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) { + TPL_INFO("[DESTROY_SWAPCHAIN]", + "wl_vk_surface(%p) swapchain(%p) still valid.", + wl_vk_surface, swapchain); + return TPL_ERROR_NONE; + } + + TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]", + "wl_vk_surface(%p) swapchain(%p)", + wl_vk_surface, wl_vk_surface->swapchain); + + if (swapchain->swapchain_buffers) { + for (int i = 0; i < swapchain->properties.buffer_count; i++) { + if (swapchain->swapchain_buffers[i]) { + TPL_DEBUG("Stop tracking tbm_surface(%p)", + swapchain->swapchain_buffers[i]); + tbm_surface_internal_unref(swapchain->swapchain_buffers[i]); + swapchain->swapchain_buffers[i] = NULL; + } + } + + free(swapchain->swapchain_buffers); + swapchain->swapchain_buffers = NULL; + } + + _tpl_wl_vk_surface_buffer_clear(wl_vk_surface); + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + tpl_gsource_send_message(wl_vk_surface->surf_source, 4); + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + + _print_buffer_lists(wl_vk_surface); + + free(swapchain); + wl_vk_surface->swapchain = NULL; + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, + tbm_surface_h **buffers, + int *buffer_count) +{ TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); TPL_ASSERT(surface->display); - TPL_ASSERT(surface->display->native_handle); - TPL_ASSERT(tbm_surface); + TPL_ASSERT(surface->display->backend.data); tpl_wl_vk_surface_t *wl_vk_surface = - (tpl_wl_vk_surface_t *) surface->backend.data; - tbm_surface_queue_error_e tsq_err; + (tpl_wl_vk_surface_t *)surface->backend.data; + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)surface->display->backend.data; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tpl_result_t ret = TPL_ERROR_NONE; + int i; - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", - tbm_surface); - return TPL_ERROR_INVALID_PARAMETER; + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER); + + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + + if (!buffers) { + *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + return TPL_ERROR_NONE; + } + + swapchain->swapchain_buffers = (tbm_surface_h *)calloc( + *buffer_count, + sizeof(tbm_surface_h)); + if (!swapchain->swapchain_buffers) { + TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)", + *buffer_count); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + return TPL_ERROR_OUT_OF_MEMORY; + } + + ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client, + swapchain->tbm_queue, + swapchain->swapchain_buffers, + buffer_count); + if (!ret) { + TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)", + wl_vk_display->wl_tbm_client, swapchain->tbm_queue); + free(swapchain->swapchain_buffers); + swapchain->swapchain_buffers = NULL; + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + return TPL_ERROR_INVALID_OPERATION; + } + + for (i = 0; i < *buffer_count; i++) { + if (swapchain->swapchain_buffers[i]) { + TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)", + i, swapchain->swapchain_buffers[i], + _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i])); + tbm_surface_internal_ref(swapchain->swapchain_buffers[i]); + } } + *buffers = swapchain->swapchain_buffers; + + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, + tbm_fd sync_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wl_vk_surface_t *wl_vk_surface = + (tpl_wl_vk_surface_t *) surface->backend.data; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + int bo_name = -1; + + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface), + TPL_ERROR_INVALID_PARAMETER); + + bo_name = _get_tbm_surface_bo_name(tbm_surface); + /* If there are received region information, * save it to buf_info in tbm_surface user_data using below API. */ if (num_rects && rects) { @@ -1442,265 +1879,6 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, return tbm_surface; } -static tpl_result_t -__tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, - tbm_surface_h **buffers, - int *buffer_count) -{ - tpl_wl_vk_surface_t *wl_vk_surface = NULL; - tpl_wl_vk_display_t *wl_vk_display = NULL; - int i; - tpl_result_t ret = TPL_ERROR_NONE; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->display->backend.data); - TPL_ASSERT(buffers); - TPL_ASSERT(buffer_count); - - wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; - - if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { - ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface, - NULL, buffer_count); - if (ret != TPL_ERROR_NONE) { - TPL_ERR("Failed to get buffer_count. twe_surface(%p)", - wl_vk_surface->twe_surface); - twe_display_unlock(wl_vk_display->twe_display); - return ret; - } - - wl_vk_surface->swapchain_buffers = (tbm_surface_h *)calloc( - *buffer_count, - sizeof(tbm_surface_h)); - if (!wl_vk_surface->swapchain_buffers) { - TPL_ERR("Failed to allocate memory for buffers."); - twe_display_unlock(wl_vk_display->twe_display); - return TPL_ERROR_OUT_OF_MEMORY; - } - - ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface, - wl_vk_surface->swapchain_buffers, - buffer_count); - if (ret != TPL_ERROR_NONE) { - TPL_ERR("Failed to get swapchain_buffers. wl_vk_surface(%p) twe_surface(%p)", - wl_vk_surface, wl_vk_surface->twe_surface); - free(wl_vk_surface->swapchain_buffers); - wl_vk_surface->swapchain_buffers = NULL; - twe_display_unlock(wl_vk_display->twe_display); - return ret; - } - - for (i = 0; i < *buffer_count; i++) { - if (wl_vk_surface->swapchain_buffers[i]) { - TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)", - i, wl_vk_surface->swapchain_buffers[i], - tbm_bo_export(tbm_surface_internal_get_bo( - wl_vk_surface->swapchain_buffers[i], 0))); - tbm_surface_internal_ref(wl_vk_surface->swapchain_buffers[i]); - } - } - - *buffers = wl_vk_surface->swapchain_buffers; - - twe_display_unlock(wl_vk_display->twe_display); - } - - return TPL_ERROR_NONE; -} - -static void -__cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue, - void *data) -{ - tpl_surface_t *surface = NULL; - tpl_wl_vk_surface_t *wl_vk_surface = NULL; - tpl_bool_t is_activated = TPL_FALSE; - - surface = (tpl_surface_t *)data; - TPL_CHECK_ON_NULL_RETURN(surface); - - wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); - - /* When queue_reset_callback is called, if is_activated is different from - * its previous state change the reset flag to TPL_TRUE to get a new buffer - * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ - is_activated = twe_surface_check_activated(wl_vk_surface->twe_surface); - - if (wl_vk_surface->is_activated != is_activated) { - if (is_activated) { - TPL_LOG_T("WL_VK", - "[ACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)", - wl_vk_surface, surface_queue); - } else { - TPL_LOG_T("WL_VK", - "[DEACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)", - wl_vk_surface, surface_queue); - } - wl_vk_surface->is_activated = is_activated; - } - - wl_vk_surface->reset = TPL_TRUE; - - if (surface->reset_cb) - surface->reset_cb(surface->reset_data); -} - -static tpl_result_t -__tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, - tbm_format format, int width, - int height, int buffer_count, int present_mode) -{ - tpl_wl_vk_surface_t *wl_vk_surface = NULL; - tpl_wl_vk_display_t *wl_vk_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(surface->display); - - wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - TPL_ASSERT(wl_vk_surface); - - wl_vk_display = (tpl_wl_vk_display_t *) - surface->display->backend.data; - TPL_ASSERT(wl_vk_display); - - if (wl_vk_surface->tbm_queue) { - int old_width = tbm_surface_queue_get_width(wl_vk_surface->tbm_queue); - int old_height = tbm_surface_queue_get_height(wl_vk_surface->tbm_queue); - - if (old_width != width || old_height != height) { - tbm_surface_queue_reset(wl_vk_surface->tbm_queue, - width, height, format); - TPL_LOG_T("WL_VK", - "[RESIZE] wl_vk_surface(%p) tbm_queue(%p), (%d x %d) -> (%d x %d)", - wl_vk_surface, wl_vk_surface->tbm_queue, - old_width, old_height, width, height); - } - - if (wl_vk_surface->swapchain_buffers) { - int i; - for (i = 0; i < wl_vk_surface->buffer_count; i++) { - if (wl_vk_surface->swapchain_buffers[i]) { - TPL_DEBUG("unref tbm_surface(%p)", wl_vk_surface->swapchain_buffers[i]); - tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]); - wl_vk_surface->swapchain_buffers[i] = NULL; - } - } - - free(wl_vk_surface->swapchain_buffers); - wl_vk_surface->swapchain_buffers = NULL; - } - - wl_vk_surface->buffer_count = - tbm_surface_queue_get_size(wl_vk_surface->tbm_queue); - wl_vk_surface->reset = TPL_FALSE; - - __tpl_util_atomic_inc(&wl_vk_surface->swapchain_reference); - - TPL_LOG_T("WL_VK", "[REUSE] wl_vk_surface(%p) tbm_queue(%p) size(%d)", - wl_vk_surface, wl_vk_surface->tbm_queue, - wl_vk_surface->buffer_count); - return TPL_ERROR_NONE; - } - - res = twe_surface_create_swapchain(wl_vk_surface->twe_surface, - width, height, format, - buffer_count, present_mode); - if (res != TPL_ERROR_NONE) { - TPL_ERR("Failed to create swapchain. twe_surface(%p)", - wl_vk_surface->twe_surface); - return res; - } - - wl_vk_surface->tbm_queue = twe_surface_get_tbm_queue( - wl_vk_surface->twe_surface); - - /* Set reset_callback to tbm_queue */ - if (tbm_surface_queue_add_reset_cb(wl_vk_surface->tbm_queue, - __cb_tbm_queue_reset_callback, - (void *)surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("TBM surface queue add reset cb failed!"); - twe_surface_destroy_swapchain(wl_vk_surface->twe_surface); - wl_vk_surface->tbm_queue = NULL; - return TPL_ERROR_INVALID_OPERATION; - } - - wl_vk_surface->buffer_count = buffer_count; - wl_vk_surface->reset = TPL_FALSE; - - __tpl_util_atomic_set(&wl_vk_surface->swapchain_reference, 1); - - return TPL_ERROR_NONE; -} - -static tpl_result_t -__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) -{ - tpl_wl_vk_surface_t *wl_vk_surface = NULL; - tpl_wl_vk_display_t *wl_vk_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; - unsigned int ref; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->display->backend.data); - - wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - - if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { - ref = __tpl_util_atomic_dec(&wl_vk_surface->swapchain_reference); - if (ref > 0) { - TPL_LOG_T("WL_VK", - "This swapchain is still valid. | twe_surface(%p)", - wl_vk_surface->twe_surface); - twe_display_unlock(wl_vk_display->twe_display); - return TPL_ERROR_NONE; - } - - - if (wl_vk_surface->reset) { - TPL_LOG_T("WL_VK", - "Since reset is in the TRUE state, it will not be destroyed."); - twe_display_unlock(wl_vk_display->twe_display); - return TPL_ERROR_NONE; - } - - if (wl_vk_surface->swapchain_buffers) { - int i; - for (i = 0; i < wl_vk_surface->buffer_count; i++) { - TPL_DEBUG("Stop tracking tbm_surface(%p)", - wl_vk_surface->swapchain_buffers[i]); - tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]); - wl_vk_surface->swapchain_buffers[i] = NULL; - } - - free(wl_vk_surface->swapchain_buffers); - wl_vk_surface->swapchain_buffers = NULL; - } - - res = twe_surface_destroy_swapchain(wl_vk_surface->twe_surface); - if (res != TPL_ERROR_NONE) { - TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", - wl_vk_surface->twe_surface); - twe_display_unlock(wl_vk_display->twe_display); - return res; - } - - wl_vk_surface->tbm_queue = NULL; - - twe_display_unlock(wl_vk_display->twe_display); - } - - return TPL_ERROR_NONE; -} - tpl_bool_t __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) { @@ -1752,74 +1930,74 @@ __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) } static void -__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) +__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer) { - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; - TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", - wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); + TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface); - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) { - wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL; - wl_egl_surface->buffer_cnt--; + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) { + wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL; + wl_vk_surface->buffer_cnt--; - wl_egl_buffer->idx = -1; + wl_vk_buffer->idx = -1; } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); - wl_display_flush(wl_egl_display->wl_display); + wl_display_flush(wl_vk_display->wl_display); - if (wl_egl_buffer->wl_buffer) { - wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, - (void *)wl_egl_buffer->wl_buffer); - wl_egl_buffer->wl_buffer = NULL; + if (wl_vk_buffer->wl_buffer) { + wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client, + (void *)wl_vk_buffer->wl_buffer); + wl_vk_buffer->wl_buffer = NULL; } - if (wl_egl_buffer->buffer_release) { - zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); - wl_egl_buffer->buffer_release = NULL; + if (wl_vk_buffer->buffer_release) { + zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release); + wl_vk_buffer->buffer_release = NULL; } - if (wl_egl_buffer->release_fence_fd != -1) { - close(wl_egl_buffer->release_fence_fd); - wl_egl_buffer->release_fence_fd = -1; + if (wl_vk_buffer->release_fence_fd != -1) { + close(wl_vk_buffer->release_fence_fd); + wl_vk_buffer->release_fence_fd = -1; } - if (wl_egl_buffer->waiting_source) { - tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); - wl_egl_buffer->waiting_source = NULL; + if (wl_vk_buffer->waiting_source) { + tpl_gsource_destroy(wl_vk_buffer->waiting_source, TPL_FALSE); + wl_vk_buffer->waiting_source = NULL; } - if (wl_egl_buffer->commit_sync_fd != -1) { - int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); + if (wl_vk_buffer->commit_sync_fd != -1) { + int ret = _write_to_eventfd(wl_vk_buffer->commit_sync_fd); if (ret == -1) TPL_ERR("Failed to send commit_sync signal to fd(%d)", - wl_egl_buffer->commit_sync_fd); - close(wl_egl_buffer->commit_sync_fd); - wl_egl_buffer->commit_sync_fd = -1; + wl_vk_buffer->commit_sync_fd); + close(wl_vk_buffer->commit_sync_fd); + wl_vk_buffer->commit_sync_fd = -1; } - if (wl_egl_buffer->presentation_sync_fd != -1) { - int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (wl_vk_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_vk_buffer->presentation_sync_fd); if (ret == -1) TPL_ERR("Failed to send presentation_sync signal to fd(%d)", - wl_egl_buffer->presentation_sync_fd); - close(wl_egl_buffer->presentation_sync_fd); - wl_egl_buffer->presentation_sync_fd = -1; + wl_vk_buffer->presentation_sync_fd); + close(wl_vk_buffer->presentation_sync_fd); + wl_vk_buffer->presentation_sync_fd = -1; } - if (wl_egl_buffer->rects) { - free(wl_egl_buffer->rects); - wl_egl_buffer->rects = NULL; - wl_egl_buffer->num_rects = 0; + if (wl_vk_buffer->rects) { + free(wl_vk_buffer->rects); + wl_vk_buffer->rects = NULL; + wl_vk_buffer->num_rects = 0; } - wl_egl_buffer->tbm_surface = NULL; - wl_egl_buffer->bo_name = -1; + wl_vk_buffer->tbm_surface = NULL; + wl_vk_buffer->bo_name = -1; - free(wl_egl_buffer); + free(wl_vk_buffer); } static int @@ -1829,22 +2007,22 @@ _get_tbm_surface_bo_name(tbm_surface_h tbm_surface) } static void -_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) +_print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface) { int idx = 0; - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)", - wl_egl_surface, wl_egl_surface->buffer_cnt); + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)", + wl_vk_surface, wl_vk_surface->buffer_cnt); for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) { - tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx]; - if (wl_egl_buffer) { + tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx]; + if (wl_vk_buffer) { TPL_INFO("[INFO]", - "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", - idx, wl_egl_buffer, wl_egl_buffer->tbm_surface, - wl_egl_buffer->bo_name, - status_to_string[wl_egl_buffer->status]); + "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", + idx, wl_vk_buffer, wl_vk_buffer->tbm_surface, + wl_vk_buffer->bo_name, + status_to_string[wl_vk_buffer->status]); } } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); } -- 2.7.4 From ed179b9b4bff5dfca418f48b678d5cde68684d51 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 26 Apr 2021 19:35:31 +0900 Subject: [PATCH 15/16] Implement DEQ/CANCEL/ACQ/ENQ buffer. Change-Id: If00f9b4e7c2aedefc9b20b76ac7abb99b6e6202d Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 562 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 376 insertions(+), 186 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index caa2ce2..31dc351 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -27,6 +27,9 @@ #define BUFFER_ARRAY_SIZE 10 #define VK_CLIENT_QUEUE_SIZE 3 +static int wl_vk_buffer_key; +#define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key) + typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t; typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t; typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t; @@ -179,6 +182,8 @@ static tpl_result_t _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); static void _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); +static tpl_result_t +_thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface); static tpl_bool_t _check_native_handle_is_wl_display(tpl_handle_t native_dpy) @@ -1111,7 +1116,11 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_lock(&wl_vk_surface->surf_mutex); TPL_DEBUG("wl_vk_surface(%p) acquirable message received!", wl_vk_surface); - _thread_surface_queue_acquire(wl_vk_surface); + if (_thread_surface_queue_acquire(wl_vk_surface) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)", + wl_vk_surface); + } tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } else if (message == 4) { /* swapchain destroy */ tpl_gmutex_lock(&wl_vk_surface->surf_mutex); @@ -1271,6 +1280,18 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) surface->backend.data = NULL; } +static tpl_bool_t +__tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wl_vk_surface_t *wl_vk_surface = + (tpl_wl_vk_surface_t *)surface->backend.data; + + return !(wl_vk_surface->reset); +} + static void __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, void *data) @@ -1696,189 +1717,429 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, return TPL_ERROR_NONE; } -static tpl_result_t -__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface, - int num_rects, const int *rects, - tbm_fd sync_fence) +static void +__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer) { - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); + tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; - tpl_wl_vk_surface_t *wl_vk_surface = - (tpl_wl_vk_surface_t *) surface->backend.data; - tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - int bo_name = -1; + TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface); - TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); - TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER); - TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface), - TPL_ERROR_INVALID_PARAMETER); + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) { + wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL; + wl_vk_surface->buffer_cnt--; - bo_name = _get_tbm_surface_bo_name(tbm_surface); + wl_vk_buffer->idx = -1; + } + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); - /* If there are received region information, - * save it to buf_info in tbm_surface user_data using below API. */ - if (num_rects && rects) { - tpl_result_t ret = TPL_ERROR_NONE; - ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects); - if (ret != TPL_ERROR_NONE) { - TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)", - num_rects, rects); - } + wl_display_flush(wl_vk_display->wl_display); + + if (wl_vk_buffer->wl_buffer) { + wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client, + (void *)wl_vk_buffer->wl_buffer); + wl_vk_buffer->wl_buffer = NULL; } - tsq_err = tbm_surface_queue_enqueue(wl_vk_surface->tbm_queue, - tbm_surface); - if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) { - tbm_surface_internal_unref(tbm_surface); - } else { - TPL_ERR("Failed to enqeueue tbm_surface. | tsq_err = %d", tsq_err); - return TPL_ERROR_INVALID_OPERATION; + + if (wl_vk_buffer->buffer_release) { + zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release); + wl_vk_buffer->buffer_release = NULL; } - if (sync_fence != -1) { - tpl_result_t res = TPL_ERROR_NONE; - res = twe_surface_set_sync_fd(wl_vk_surface->twe_surface, - tbm_surface, sync_fence); - if (res != TPL_ERROR_NONE) { - TPL_WARN("Failed to set sync_fd(%d). Fallback to async mode.", - sync_fence); - } + if (wl_vk_buffer->release_fence_fd != -1) { + close(wl_vk_buffer->release_fence_fd); + wl_vk_buffer->release_fence_fd = -1; } - TPL_LOG_T("WL_VK", "[ENQ] tbm_surface(%p) bo(%d) sync_fence(%d)", - tbm_surface, - tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)), - sync_fence); + if (wl_vk_buffer->rects) { + free(wl_vk_buffer->rects); + wl_vk_buffer->rects = NULL; + wl_vk_buffer->num_rects = 0; + } - return TPL_ERROR_NONE; + wl_vk_buffer->tbm_surface = NULL; + wl_vk_buffer->bo_name = -1; + + free(wl_vk_buffer); } -static tpl_bool_t -__tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) +static tpl_wl_vk_buffer_t * +_get_wl_vk_buffer(tbm_surface_h tbm_surface) { - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - - tpl_wl_vk_surface_t *wl_vk_surface = - (tpl_wl_vk_surface_t *)surface->backend.data; - - return !(wl_vk_surface->reset); + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER, + (void **)&wl_vk_buffer); + return wl_vk_buffer; } -static tpl_result_t -__tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface) +static tpl_wl_vk_buffer_t * +_wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface, + tbm_surface_h tbm_surface) { - tpl_wl_vk_surface_t *wl_vk_surface = NULL; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; - wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - if (!wl_vk_surface) { - TPL_ERR("Invalid backend surface. surface(%p) wl_vk_surface(%p)", - surface, wl_vk_surface); - return TPL_ERROR_INVALID_PARAMETER; - } + wl_vk_buffer = _get_wl_vk_buffer(tbm_surface); - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); - return TPL_ERROR_INVALID_PARAMETER; - } + if (!wl_vk_buffer) { + wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t)); + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL); - tbm_surface_internal_unref(tbm_surface); + tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER, + (tbm_data_free)__cb_wl_vk_buffer_free); + tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER, + wl_vk_buffer); - tsq_err = tbm_surface_queue_cancel_dequeue(wl_vk_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface); - return TPL_ERROR_INVALID_OPERATION; + wl_vk_buffer->wl_buffer = NULL; + wl_vk_buffer->tbm_surface = tbm_surface; + wl_vk_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface); + wl_vk_buffer->wl_vk_surface = wl_vk_surface; + + wl_vk_buffer->status = RELEASED; + + wl_vk_buffer->acquire_fence_fd = -1; + wl_vk_buffer->release_fence_fd = -1; + + wl_vk_buffer->dx = 0; + wl_vk_buffer->dy = 0; + wl_vk_buffer->width = tbm_surface_get_width(tbm_surface); + wl_vk_buffer->height = tbm_surface_get_height(tbm_surface); + + wl_vk_buffer->rects = NULL; + wl_vk_buffer->num_rects = 0; + + tpl_gmutex_init(&wl_vk_buffer->mutex); + tpl_gcond_init(&wl_vk_buffer->cond); + + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + { + int i; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) + if (wl_vk_surface->buffers[i] == NULL) break; + + /* If this exception is reached, + * it may be a critical memory leak problem. */ + if (i == BUFFER_ARRAY_SIZE) { + tpl_wl_vk_buffer_t *evicted_buffer = NULL; + int evicted_idx = 0; /* evict the frontmost buffer */ + + evicted_buffer = wl_vk_surface->buffers[evicted_idx]; + + TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.", + wl_vk_surface); + TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)", + evicted_buffer, evicted_buffer->tbm_surface, + status_to_string[evicted_buffer->status]); + + /* [TODO] need to think about whether there will be + * better modifications */ + wl_vk_surface->buffer_cnt--; + wl_vk_surface->buffers[evicted_idx] = NULL; + + i = evicted_idx; + } + + wl_vk_surface->buffer_cnt++; + wl_vk_surface->buffers[i] = wl_vk_buffer; + wl_vk_buffer->idx = i; + } + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); + + TPL_INFO("[WL_VK_BUFFER_CREATE]", + "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)", + wl_vk_surface, wl_vk_buffer, tbm_surface, + wl_vk_buffer->bo_name); } - TPL_LOG_T("WL_VK", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)", - surface, tbm_surface); + wl_vk_buffer->need_to_commit = TPL_FALSE; + wl_vk_buffer->buffer_release = NULL; - return TPL_ERROR_NONE; + return wl_vk_buffer; } static tbm_surface_h __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, - uint64_t timeout_ns, - tbm_fd *sync_fence) + uint64_t timeout_ns, + int32_t *release_fence) { TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); TPL_ASSERT(surface->display); + TPL_ASSERT(surface->display->backend.data); + TPL_OBJECT_CHECK_RETURN(surface, NULL); - tbm_surface_h tbm_surface = NULL; tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; - tbm_surface_queue_error_e tsq_err = 0; - tpl_result_t lock_res = TPL_ERROR_NONE; - tpl_result_t res = TPL_ERROR_NONE; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + + tpl_result_t res = TPL_ERROR_NONE; - if (sync_fence) - *sync_fence = -1; + tbm_surface_h tbm_surface = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL); + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL); TPL_OBJECT_UNLOCK(surface); TRACE_BEGIN("WAIT_DEQUEUEABLE"); - lock_res = twe_display_lock(wl_vk_display->twe_display); - res = twe_surface_wait_dequeueable(wl_vk_surface->twe_surface, - timeout_ns); + if (timeout_ns != UINT64_MAX) { + tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( + swapchain->tbm_queue, timeout_ns/1000); + } else { + tsq_err = tbm_surface_queue_can_dequeue( + swapchain->tbm_queue, 1); + } TRACE_END(); TPL_OBJECT_LOCK(surface); if (res == TPL_ERROR_TIME_OUT) { TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")", timeout_ns); - if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wl_vk_display->twe_display); return NULL; } else if (res != TPL_ERROR_NONE) { - TPL_ERR("Invalid operation. twe_surface(%p) timeout_ns(%" PRIu64 ")", - wl_vk_surface->twe_surface, timeout_ns); - if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wl_vk_display->twe_display); + TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p)", + wl_vk_surface, swapchain->tbm_queue); return NULL; } + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + if (wl_vk_surface->reset) { - TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.", - wl_vk_surface->tbm_queue); - if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wl_vk_display->twe_display); + TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.", + swapchain, swapchain->tbm_queue); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); return NULL; } - - tsq_err = tbm_surface_queue_dequeue(wl_vk_surface->tbm_queue, + tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue, &tbm_surface); if (!tbm_surface) { - TPL_ERR("Failed to get tbm_surface from tbm_surface_queue(%p) | tsq_err = %d", - wl_vk_surface->tbm_queue, tsq_err); - if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wl_vk_display->twe_display); + TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d", + swapchain->tbm_queue, wl_vk_surface, tsq_err); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); return NULL; } tbm_surface_internal_ref(tbm_surface); - if (sync_fence) { - *sync_fence = twe_surface_create_sync_fd(tbm_surface); + wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer"); + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + wl_vk_buffer->status = DEQUEUED; + + if (release_fence) { + if (wl_vk_surface->surface_sync) { + *release_fence = wl_vk_buffer->release_fence_fd; + TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)", + wl_vk_surface, wl_vk_buffer, *release_fence); + wl_vk_buffer->release_fence_fd = -1; + } else { + *release_fence = -1; + } } - TPL_LOG_T("WL_VK", "[DEQ] tbm_queue(%p) tbm_surface(%p) bo(%d)", - wl_vk_surface->tbm_queue, tbm_surface, - tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + wl_vk_surface->reset = TPL_FALSE; - if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wl_vk_display->twe_display); + TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name, + release_fence ? *release_fence : -1); + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); return tbm_surface; } +static tpl_result_t +__tpl_wl_vk_wsi_surface_cancel_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wl_vk_surface_t *wl_vk_surface = + (tpl_wl_vk_surface_t *)surface->backend.data; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface), + TPL_ERROR_INVALID_PARAMETER); + + swapchain = wl_vk_surface->swapchain; + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, + TPL_ERROR_INVALID_PARAMETER); + + wl_vk_buffer = _get_wl_vk_buffer(tbm_surface); + if (wl_vk_buffer) { + tpl_gmutex_lock(&wl_vk_buffer->mutex); + wl_vk_buffer->status = RELEASED; + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + } + + tbm_surface_internal_unref(tbm_surface); + + TPL_INFO("[CANCEL BUFFER]", + "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)", + wl_vk_surface, swapchain, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_OPERATION; + } + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, + int32_t acquire_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->backend.data); + TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); + + tpl_wl_vk_surface_t *wl_vk_surface = + (tpl_wl_vk_surface_t *) surface->backend.data; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + int bo_name = -1; + + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface), + TPL_ERROR_INVALID_PARAMETER); + + wl_vk_buffer = _get_wl_vk_buffer(tbm_surface); + bo_name = wl_vk_buffer->bo_name; + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + /* If there are received region information, save it to wl_vk_buffer */ + if (num_rects && rects) { + if (wl_vk_buffer->rects != NULL) { + free(wl_vk_buffer->rects); + wl_vk_buffer->rects = NULL; + wl_vk_buffer->num_rects = 0; + } + + wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects)); + wl_vk_buffer->num_rects = num_rects; + + if (wl_vk_buffer->rects) { + memcpy((char *)wl_vk_buffer->rects, (char *)rects, + sizeof(int) * 4 * num_rects); + } else { + TPL_ERR("Failed to allocate memory for rects info."); + } + } + + if (wl_vk_buffer->acquire_fence_fd != -1) + close(wl_vk_buffer->acquire_fence_fd); + + wl_vk_buffer->acquire_fence_fd = acquire_fence; + + wl_vk_buffer->status = ENQUEUED; + TPL_LOG_T("WL_VK", + "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)", + wl_vk_buffer, tbm_surface, bo_name, acquire_fence); + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + tbm_surface_internal_unref(tbm_surface); + TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d", + tbm_surface, wl_vk_surface, tsq_err); + return TPL_ERROR_INVALID_OPERATION; + } + + tbm_surface_internal_unref(tbm_surface); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +_thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tbm_surface_h tbm_surface = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tpl_bool_t ready_to_commit = TPL_TRUE; + + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + + while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) { + tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue, + &tbm_surface); + if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to acquire from tbm_queue(%p)", + swapchain->tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + + tbm_surface_internal_ref(tbm_surface); + + wl_vk_buffer = _get_wl_vk_buffer(tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL, + "wl_vk_buffer sould be not NULL"); + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + wl_vk_buffer->status = ACQUIRED; + + TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)", + wl_vk_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + if (wl_vk_buffer->wl_buffer == NULL) { + wl_vk_buffer->wl_buffer = + (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_vk_display->wl_tbm_client, tbm_surface); + + if (!wl_vk_buffer->wl_buffer) { + TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)", + wl_vk_display->wl_tbm_client, tbm_surface); + } else { + TPL_LOG_T("WL_EGL", + "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface); + } + } + + if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done) + ready_to_commit = TPL_TRUE; + else { + wl_vk_buffer->status = WAITING_VBLANK; + __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer); + ready_to_commit = TPL_FALSE; + } + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + if (ready_to_commit) + _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer); + } + + return TPL_ERROR_NONE; +} + tpl_bool_t __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) { @@ -1920,7 +2181,7 @@ __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) backend->fini = __tpl_wl_vk_wsi_surface_fini; backend->validate = __tpl_wl_vk_wsi_surface_validate; backend->cancel_dequeued_buffer = - __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer; + __tpl_wl_vk_wsi_surface_cancel_buffer; backend->dequeue_buffer = __tpl_wl_vk_wsi_surface_dequeue_buffer; backend->enqueue_buffer = __tpl_wl_vk_wsi_surface_enqueue_buffer; backend->get_swapchain_buffers = @@ -1929,77 +2190,6 @@ __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain; } -static void -__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer) -{ - tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; - tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; - - TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", - wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface); - - tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); - if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) { - wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL; - wl_vk_surface->buffer_cnt--; - - wl_vk_buffer->idx = -1; - } - tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); - - wl_display_flush(wl_vk_display->wl_display); - - if (wl_vk_buffer->wl_buffer) { - wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client, - (void *)wl_vk_buffer->wl_buffer); - wl_vk_buffer->wl_buffer = NULL; - } - - if (wl_vk_buffer->buffer_release) { - zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release); - wl_vk_buffer->buffer_release = NULL; - } - - if (wl_vk_buffer->release_fence_fd != -1) { - close(wl_vk_buffer->release_fence_fd); - wl_vk_buffer->release_fence_fd = -1; - } - - if (wl_vk_buffer->waiting_source) { - tpl_gsource_destroy(wl_vk_buffer->waiting_source, TPL_FALSE); - wl_vk_buffer->waiting_source = NULL; - } - - if (wl_vk_buffer->commit_sync_fd != -1) { - int ret = _write_to_eventfd(wl_vk_buffer->commit_sync_fd); - if (ret == -1) - TPL_ERR("Failed to send commit_sync signal to fd(%d)", - wl_vk_buffer->commit_sync_fd); - close(wl_vk_buffer->commit_sync_fd); - wl_vk_buffer->commit_sync_fd = -1; - } - - if (wl_vk_buffer->presentation_sync_fd != -1) { - int ret = _write_to_eventfd(wl_vk_buffer->presentation_sync_fd); - if (ret == -1) - TPL_ERR("Failed to send presentation_sync signal to fd(%d)", - wl_vk_buffer->presentation_sync_fd); - close(wl_vk_buffer->presentation_sync_fd); - wl_vk_buffer->presentation_sync_fd = -1; - } - - if (wl_vk_buffer->rects) { - free(wl_vk_buffer->rects); - wl_vk_buffer->rects = NULL; - wl_vk_buffer->num_rects = 0; - } - - wl_vk_buffer->tbm_surface = NULL; - wl_vk_buffer->bo_name = -1; - - free(wl_vk_buffer); -} - static int _get_tbm_surface_bo_name(tbm_surface_h tbm_surface) { -- 2.7.4 From 63f9a32027d784ca55abeb9c30e67acae980d9ee Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 27 Apr 2021 15:22:56 +0900 Subject: [PATCH 16/16] Implement buffer commit/release Change-Id: I7f6685b20da489603e9661333420a09980f93182 Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 352 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 348 insertions(+), 4 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 31dc351..9bfb4ed 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -184,6 +184,9 @@ static void _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); static tpl_result_t _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface); +static void +_thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface, + tpl_wl_vk_buffer_t *wl_vk_buffer); static tpl_bool_t _check_native_handle_is_wl_display(tpl_handle_t native_dpy) @@ -1520,7 +1523,6 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_wl_vk_swapchain_t *swapchain = NULL; - tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(surface); TPL_ASSERT(surface->display); @@ -1590,8 +1592,6 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) tpl_wl_vk_swapchain_t *swapchain = NULL; tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; - unsigned int ref; TPL_ASSERT(surface); TPL_ASSERT(surface->display); @@ -2117,7 +2117,7 @@ _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface) TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)", wl_vk_display->wl_tbm_client, tbm_surface); } else { - TPL_LOG_T("WL_EGL", + TPL_LOG_T("WL_VK", "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface); } @@ -2140,6 +2140,350 @@ _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface) return TPL_ERROR_NONE; } +static void +__cb_buffer_fenced_release(void *data, + struct zwp_linux_buffer_release_v1 *release, + int32_t fence) +{ + tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer); + + tbm_surface = wl_vk_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; + tpl_wl_vk_swapchain_t *swapchain = NULL; + + if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) { + TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface); + tbm_surface_internal_unref(tbm_surface); + return; + } + + swapchain = wl_vk_surface->swapchain; + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + if (wl_vk_buffer->status == COMMITTED) { + tbm_surface_queue_error_e tsq_err; + + zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release); + wl_vk_buffer->buffer_release = NULL; + + wl_vk_buffer->release_fence_fd = fence; + wl_vk_buffer->status = RELEASED; + + TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", + wl_vk_buffer->bo_name, + fence); + TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)", + wl_vk_buffer->bo_name); + + TPL_LOG_T("WL_VK", + "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_vk_buffer, tbm_surface, + wl_vk_buffer->bo_name, + fence); + + tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); + } + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +static void +__cb_buffer_immediate_release(void *data, + struct zwp_linux_buffer_release_v1 *release) +{ + tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer); + + tbm_surface = wl_vk_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; + tpl_wl_vk_swapchain_t *swapchain = NULL; + + if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) { + TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface); + tbm_surface_internal_unref(tbm_surface); + return; + } + + swapchain = wl_vk_surface->swapchain; + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + if (wl_vk_buffer->status == COMMITTED) { + tbm_surface_queue_error_e tsq_err; + + zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release); + wl_vk_buffer->buffer_release = NULL; + + wl_vk_buffer->release_fence_fd = -1; + wl_vk_buffer->status = RELEASED; + + TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + TPL_LOG_T("WL_VK", + "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)", + wl_vk_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); + } + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = { + __cb_buffer_fenced_release, + __cb_buffer_immediate_release, +}; + +static void +__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) +{ + tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer) + + tbm_surface = wl_vk_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE; + + if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) { + TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface); + tbm_surface_internal_unref(tbm_surface); + return; + } + + swapchain = wl_vk_surface->swapchain; + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + if (wl_vk_buffer->status == COMMITTED) { + + tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + wl_vk_buffer->status = RELEASED; + + TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name); + TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)", + wl_vk_buffer->bo_name); + + TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_vk_buffer->wl_buffer, tbm_surface, + wl_vk_buffer->bo_name); + + tbm_surface_internal_unref(tbm_surface); + } + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +static const struct wl_buffer_listener wl_buffer_release_listener = { + (void *)__cb_wl_buffer_release, +}; + +static void +__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, + unsigned int sequence, unsigned int tv_sec, + unsigned int tv_usec, void *user_data) +{ + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + + TRACE_ASYNC_END((int)wl_vk_surface, "WAIT_VBLANK"); + TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface); + + if (error == TDM_ERROR_TIMEOUT) + TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)", + wl_vk_surface); + + wl_vk_surface->vblank_done = TPL_TRUE; + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front( + wl_vk_surface->vblank_waiting_buffers, + NULL); + if (wl_vk_buffer) + _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); +} + +static tpl_result_t +_thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tdm_error tdm_err = TDM_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + + if (wl_vk_surface->vblank == NULL) { + wl_vk_surface->vblank = + _thread_create_tdm_client_vblank(wl_vk_display->tdm_client); + if (!wl_vk_surface->vblank) { + TPL_WARN("Failed to create vblank. wl_vk_surface(%p)", + wl_vk_surface); + return TPL_ERROR_OUT_OF_MEMORY; + } + } + + tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank, + wl_vk_surface->post_interval, + __cb_tdm_client_vblank, + (void *)wl_vk_surface); + + if (tdm_err == TDM_ERROR_NONE) { + wl_vk_surface->vblank_done = TPL_FALSE; + TRACE_ASYNC_BEGIN((int)wl_vk_surface, "WAIT_VBLANK"); + } else { + TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); + return TPL_ERROR_INVALID_OPERATION; + } + + return TPL_ERROR_NONE; +} + +static void +_thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface, + tpl_wl_vk_buffer_t *wl_vk_buffer) +{ + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + struct wl_surface *wl_surface = wl_vk_surface->wl_surface; + uint32_t version; + + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL, + "wl_vk_buffer sould be not NULL"); + + if (wl_vk_buffer->wl_buffer == NULL) { + wl_vk_buffer->wl_buffer = + (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_vk_display->wl_tbm_client, + wl_vk_buffer->tbm_surface); + } + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL, + "[FATAL] Failed to create wl_buffer"); + + version = wl_proxy_get_version((struct wl_proxy *)wl_surface); + + wl_surface_attach(wl_surface, (void *)wl_vk_buffer->wl_buffer, + wl_vk_buffer->dx, wl_vk_buffer->dy); + + if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) { + if (version < 4) { + wl_surface_damage(wl_surface, + wl_vk_buffer->dx, wl_vk_buffer->dy, + wl_vk_buffer->width, wl_vk_buffer->height); + } else { + wl_surface_damage_buffer(wl_surface, + 0, 0, + wl_vk_buffer->width, wl_vk_buffer->height); + } + } else { + int i; + for (i = 0; i < wl_vk_buffer->num_rects; i++) { + int inverted_y = + wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] + + wl_vk_buffer->rects[i * 4 + 3]); + if (version < 4) { + wl_surface_damage(wl_surface, + wl_vk_buffer->rects[i * 4 + 0], + inverted_y, + wl_vk_buffer->rects[i * 4 + 2], + wl_vk_buffer->rects[i * 4 + 3]); + } else { + wl_surface_damage_buffer(wl_surface, + wl_vk_buffer->rects[i * 4 + 0], + inverted_y, + wl_vk_buffer->rects[i * 4 + 2], + wl_vk_buffer->rects[i * 4 + 3]); + } + } + } + + if (wl_vk_display->use_explicit_sync && + wl_vk_surface->surface_sync) { + + zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync, + wl_vk_buffer->acquire_fence_fd); + TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)", + wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd); + close(wl_vk_buffer->acquire_fence_fd); + wl_vk_buffer->acquire_fence_fd = -1; + + wl_vk_buffer->buffer_release = + zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync); + if (!wl_vk_buffer->buffer_release) { + TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface); + } else { + zwp_linux_buffer_release_v1_add_listener( + wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer); + TPL_DEBUG("add explicit_sync_release_listener."); + } + } else { + wl_buffer_add_listener((void *)wl_vk_buffer->wl_buffer, + &wl_buffer_release_listener, wl_vk_buffer); + } + + wl_surface_commit(wl_surface); + + wl_display_flush(wl_vk_display->wl_display); + + TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)", + wl_vk_buffer->bo_name); + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + wl_vk_buffer->need_to_commit = TPL_FALSE; + wl_vk_buffer->status = COMMITTED; + + tpl_gcond_signal(&wl_vk_buffer->cond); + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + TPL_LOG_T("WL_VK", + "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface, + wl_vk_buffer->bo_name); + + if (wl_vk_display->use_wait_vblank && + _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE) + TPL_ERR("Failed to set wait vblank."); +} + tpl_bool_t __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) { -- 2.7.4