From 51d6a077c2c6e91e11e53090b2e3a1af2a8a7c82 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 1 Apr 2021 16:52:12 +0900 Subject: [PATCH 01/16] Package version up to 1.8.4 Change-Id: Icdb52e525fe619af52049250dbbc731e0334653d Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index 41e011c..8a81e7f 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 8 -%define TPL_VERSION_PATCH 3 +%define TPL_VERSION_PATCH 4 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From f528e5c50e88d91703185a9040238f934eacf156 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 17 Mar 2021 18:47:54 +0900 Subject: [PATCH 02/16] Modified structures to be used in the vulkan backend. Change-Id: Ia5c4c0843b150f988416c38cefed569b13909272 Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 262 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 201 insertions(+), 61 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index cb4f549..74e7e0a 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -3,31 +3,171 @@ #include "tpl_internal.h" +#include +#include +#include +#include + +#include #include #include #include -#include +#include +#include +#include + +#include + +#include +#include + +#include "tpl_utils_gthread.h" + +#define BUFFER_ARRAY_SIZE 10 + +typedef struct _tpl_wl_vk_surface tpl_wl_vk_display_t; +typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t; +typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t; +typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t; + +struct _tpl_wl_vk_display { + tpl_gsource *disp_source; + tpl_gthread *thread; + tpl_gmutex wl_event_mutex; + + struct wl_display *wl_display; + struct wl_event_queue *ev_queue; + struct wayland_tbm_client *wl_tbm_client; + int last_error; /* errno of the last wl_display error*/ + + tpl_bool_t wl_initialized; + tpl_bool_t tdm_initialized; + + tdm_client *tdm_client; + tpl_gsource *tdm_source; + int tdm_display_fd; + + tpl_bool_t use_wait_vblank; + tpl_bool_t use_explicit_sync; + tpl_bool_t prepared; + + /* device surface capabilities */ + int min_buffer; + int max_buffer; + int present_modes; + + struct tizen_surface_shm *tss; /* used for surface buffer_flush */ + struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */ +}; + +struct _tpl_wl_vk_swapchain { + tpl_wl_vk_surface_t *wl_vk_surface; + + struct { + int width; + int height; + tbm_format format; + int buffer_count; + int present_mode; + } properties; + + tbm_surface_h *swapchain_buffers; + + tpl_util_atomic_uint ref_cnt; +}; + +struct _tpl_wl_vk_surface { + tpl_gsource *surf_source; + + tpl_wl_vk_swapchain_t *swapchain; + + tbm_surface_queue_h tbm_queue; + + struct wl_surface *wl_surface; + struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ + struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */ + + tdm_client_vblank *vblank; -#include "tpl_wayland_egl_thread.h" + /* surface information */ + int render_done_cnt; -typedef struct _tpl_wayland_vk_wsi_display tpl_wayland_vk_wsi_display_t; -typedef struct _tpl_wayland_vk_wsi_surface tpl_wayland_vk_wsi_surface_t; -typedef struct _tpl_wayland_vk_wsi_buffer tpl_wayland_vk_wsi_buffer_t; + tpl_wl_vk_display_t *wl_vk_display; + tpl_surface_t *tpl_surface; + + /* wl_vk_buffer array for buffer tracing */ + tpl_wl_vk_buffer_t *buffers[BUFFER_ARRAY_SIZE]; + int buffer_cnt; /* the number of using wl_vk_buffers */ + tpl_gmutex buffers_mutex; + + tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */ + + tpl_gmutex surf_mutex; + tpl_gcond surf_cond; + + /* for waiting draw done */ + tpl_bool_t is_activated; + tpl_bool_t reset; /* TRUE if queue reseted by external */ + tpl_bool_t vblank_done; +}; -struct _tpl_wayland_vk_wsi_display { - twe_thread *wl_thread; - twe_display_h twe_display; +typedef enum buffer_status { + RELEASED = 0, // 0 + DEQUEUED, // 1 + ENQUEUED, // 2 + ACQUIRED, // 3 + WAITING_SIGNALED, // 4 + WAITING_VBLANK, // 5 + COMMITTED, // 6 +} buffer_status_t; + +static const char *status_to_string[7] = { + "RELEASED", // 0 + "DEQUEUED", // 1 + "ENQUEUED", // 2 + "ACQUIRED", // 3 + "WAITING_SIGNALED", // 4 + "WAITING_VBLANK", // 5 + "COMMITTED", // 6 }; -struct _tpl_wayland_vk_wsi_surface { - twe_surface_h twe_surface; - tbm_surface_queue_h tbm_queue; - tbm_surface_h *swapchain_buffers; - int buffer_count; - tpl_bool_t is_activated; - tpl_bool_t reset; - tpl_util_atomic_uint swapchain_reference; +struct _tpl_wl_vk_buffer { + tbm_surface_h tbm_surface; + int bo_name; + + struct wl_proxy *wl_buffer; + int dx, dy; /* position to attach to wl_surface */ + int width, height; /* size to attach to wl_surface */ + + buffer_status_t status; /* for tracing buffer status */ + int idx; /* position index in buffers array of wl_vk_surface */ + + /* for damage region */ + int num_rects; + int *rects; + + /* for checking need_to_commit (frontbuffer mode) */ + tpl_bool_t need_to_commit; + + /* to get release event via zwp_linux_buffer_release_v1 */ + struct zwp_linux_buffer_release_v1 *buffer_release; + + /* each buffers own its release_fence_fd, until it passes ownership + * to it to EGL */ + int32_t release_fence_fd; + + /* each buffers own its acquire_fence_fd. + * If it use zwp_linux_buffer_release_v1 the ownership of this fd + * will be passed to display server + * Otherwise it will be used as a fence waiting for render done + * on tpl thread */ + int32_t acquire_fence_fd; + + tpl_gmutex mutex; + tpl_gcond cond; + + tpl_wl_vk_surface_t *wl_vk_surface; }; static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( @@ -47,7 +187,7 @@ __tpl_wl_vk_wsi_display_is_wl_display(tpl_handle_t native_dpy) static tpl_result_t __tpl_wl_vk_wsi_display_init(tpl_display_t *display) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; TPL_ASSERT(display); @@ -57,10 +197,10 @@ __tpl_wl_vk_wsi_display_init(tpl_display_t *display) return TPL_ERROR_INVALID_PARAMETER; } - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) calloc(1, - sizeof(tpl_wayland_vk_wsi_display_t)); + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) calloc(1, + sizeof(tpl_wl_vk_display_t)); if (!wayland_vk_wsi_display) { - TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_display_t."); + TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t."); return TPL_ERROR_OUT_OF_MEMORY; } @@ -117,11 +257,11 @@ free_display: static void __tpl_wl_vk_wsi_display_fini(tpl_display_t *display) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display; + tpl_wl_vk_display_t *wayland_vk_wsi_display; TPL_ASSERT(display); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; if (wayland_vk_wsi_display) { TPL_LOG_T("WL_VK", @@ -195,13 +335,13 @@ __tpl_wl_vk_wsi_display_query_window_supported_buffer_count( tpl_display_t *display, tpl_handle_t window, int *min, int *max) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; @@ -221,13 +361,13 @@ __tpl_wl_vk_wsi_display_query_window_supported_present_modes( tpl_display_t *display, tpl_handle_t window, int *modes) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; @@ -247,23 +387,23 @@ __tpl_wl_vk_wsi_display_query_window_supported_present_modes( static tpl_result_t __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; twe_surface_h twe_surface = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); TPL_ASSERT(surface->native_handle); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) calloc(1, - sizeof(tpl_wayland_vk_wsi_surface_t)); + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) calloc(1, + sizeof(tpl_wl_vk_surface_t)); if (!wayland_vk_wsi_surface) { - TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_surface_t."); + TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t."); return TPL_ERROR_OUT_OF_MEMORY; } wayland_vk_wsi_display = - (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + (tpl_wl_vk_display_t *)surface->display->backend.data; if (!wayland_vk_wsi_display) { TPL_ERR("Invalid parameter. wayland_vk_wsi_display(%p)", wayland_vk_wsi_display); @@ -291,7 +431,7 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) wayland_vk_wsi_surface->swapchain_buffers = NULL; TPL_LOG_T("WL_VK", - "[INIT]tpl_surface(%p) tpl_wayland_vk_wsi_surface(%p) twe_surface(%p)", + "[INIT]tpl_surface(%p) tpl_wl_vk_surface(%p) twe_surface(%p)", surface, wayland_vk_wsi_surface, twe_surface); return TPL_ERROR_NONE; @@ -300,16 +440,16 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) static void __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; if (wayland_vk_wsi_surface == NULL) return; - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; if (wayland_vk_wsi_display == NULL) return; @@ -351,8 +491,8 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display->native_handle); TPL_ASSERT(tbm_surface); - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + (tpl_wl_vk_surface_t *) surface->backend.data; tbm_surface_queue_error_e tsq_err; if (!tbm_surface_internal_is_valid(tbm_surface)) { @@ -404,8 +544,8 @@ __tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + (tpl_wl_vk_surface_t *)surface->backend.data; return !(wayland_vk_wsi_surface->reset); } @@ -414,10 +554,10 @@ static tpl_result_t __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; if (!wayland_vk_wsi_surface) { TPL_ERR("Invalid backend surface. surface(%p) wayland_vk_wsi_surface(%p)", surface, wayland_vk_wsi_surface); @@ -454,10 +594,10 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display); tbm_surface_h tbm_surface = NULL; - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = - (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + (tpl_wl_vk_surface_t *)surface->backend.data; + tpl_wl_vk_display_t *wayland_vk_wsi_display = + (tpl_wl_vk_display_t *)surface->display->backend.data; tbm_surface_queue_error_e tsq_err = 0; tpl_result_t lock_res = TPL_ERROR_NONE; tpl_result_t res = TPL_ERROR_NONE; @@ -527,8 +667,8 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, tbm_surface_h **buffers, int *buffer_count) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; int i; tpl_result_t ret = TPL_ERROR_NONE; @@ -539,8 +679,8 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, TPL_ASSERT(buffers); TPL_ASSERT(buffer_count); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)surface->display->backend.data; if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, @@ -596,13 +736,13 @@ __cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue, void *data) { tpl_surface_t *surface = NULL; - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; tpl_bool_t is_activated = TPL_FALSE; surface = (tpl_surface_t *)data; TPL_CHECK_ON_NULL_RETURN(surface); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; TPL_CHECK_ON_NULL_RETURN(wayland_vk_wsi_surface); /* When queue_reset_callback is called, if is_activated is different from @@ -634,18 +774,18 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, int width, int height, int buffer_count, int present_mode) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; TPL_ASSERT(wayland_vk_wsi_surface); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; TPL_ASSERT(wayland_vk_wsi_display); @@ -721,8 +861,8 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; unsigned int ref; @@ -731,8 +871,8 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) TPL_ASSERT(surface->display); TPL_ASSERT(surface->display->backend.data); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) surface->display->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference); -- 2.7.4 From 368965fc67d639933dc8849dde6dd15fd2bc1056 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 29 Mar 2021 11:07:47 +0900 Subject: [PATCH 03/16] tpl_wl_vk_thread: Modified wl_vk_display to use tpl_gthread_util Change-Id: I3fb5c37a1a2850a95d1218dc607f6c190e94da1c Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 748 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 618 insertions(+), 130 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 74e7e0a..3a846b9 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -25,8 +25,9 @@ #include "tpl_utils_gthread.h" #define BUFFER_ARRAY_SIZE 10 +#define VK_CLIENT_QUEUE_SIZE 3 -typedef struct _tpl_wl_vk_surface tpl_wl_vk_display_t; +typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t; typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t; typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t; typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t; @@ -57,7 +58,6 @@ struct _tpl_wl_vk_display { int max_buffer; int present_modes; - struct tizen_surface_shm *tss; /* used for surface buffer_flush */ struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */ }; @@ -86,7 +86,6 @@ struct _tpl_wl_vk_surface { struct wl_surface *wl_surface; struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ - struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */ tdm_client_vblank *vblank; @@ -173,120 +172,624 @@ struct _tpl_wl_vk_buffer { static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( tpl_surface_t *surface); -static TPL_INLINE tpl_bool_t -__tpl_wl_vk_wsi_display_is_wl_display(tpl_handle_t native_dpy) +static tpl_bool_t +_check_native_handle_is_wl_display(tpl_handle_t native_dpy) { - if (!native_dpy) return TPL_FALSE; + struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy; + + if (!wl_vk_native_dpy) { + TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy); + return TPL_FALSE; + } + + /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value + is a memory address pointing the structure of wl_display_interface. */ + if (wl_vk_native_dpy == &wl_display_interface) + return TPL_TRUE; + + if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name, + strlen(wl_display_interface.name)) == 0) { + return TPL_TRUE; + } + + return TPL_FALSE; +} + +static tpl_bool_t +__thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_vk_display_t *wl_vk_display = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; + + TPL_IGNORE(message); + + wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + if (!wl_vk_display) { + TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource); + TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); + return TPL_FALSE; + } + + tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client); + + /* If an error occurs in tdm_client_handle_events, it cannot be recovered. + * When tdm_source is no longer available due to an unexpected situation, + * wl_egl_thread must remove it from the thread and destroy it. + * In that case, tdm_vblank can no longer be used for surfaces and displays + * that used this tdm_source. */ + if (tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)", + tdm_err); + TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); + + tpl_gsource_destroy(gsource, TPL_FALSE); + + wl_vk_display->tdm_source = NULL; + + return TPL_FALSE; + } + + return TPL_TRUE; +} + +static void +__thread_func_tdm_finalize(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = NULL; + + wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + TPL_LOG_T("WL_VK", + "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)", + wl_vk_display, wl_vk_display->tdm_client, gsource); + + if (wl_vk_display->tdm_client) { + tdm_client_destroy(wl_vk_display->tdm_client); + wl_vk_display->tdm_client = NULL; + wl_vk_display->tdm_display_fd = -1; + } + + wl_vk_display->tdm_initialized = TPL_FALSE; +} + +static tpl_gsource_functions tdm_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_tdm_dispatch, + .finalize = __thread_func_tdm_finalize, +}; + +tpl_result_t +_thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display) +{ + tdm_client *tdm_client = NULL; + int tdm_display_fd = -1; + tdm_error tdm_err = TDM_ERROR_NONE; + + tdm_client = tdm_client_create(&tdm_err); + if (!tdm_client || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err); + return TPL_ERROR_INVALID_OPERATION; + } + + tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd); + if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err); + tdm_client_destroy(tdm_client); + return TPL_ERROR_INVALID_OPERATION; + } + + wl_vk_display->tdm_display_fd = tdm_display_fd; + wl_vk_display->tdm_client = tdm_client; + wl_vk_display->tdm_source = NULL; + wl_vk_display->tdm_initialized = TPL_TRUE; + + TPL_INFO("[TDM_CLIENT_INIT]", + "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_vk_display, tdm_client, tdm_display_fd); + + return TPL_ERROR_NONE; +} + +#define IMPL_TIZEN_SURFACE_SHM_VERSION 2 + +static void +__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, + uint32_t name, const char *interface, + uint32_t version) +{ + tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data; + + if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) { + char *env = tpl_getenv("TPL_EFS"); + if (env && !atoi(env)) { + wl_vk_display->use_explicit_sync = TPL_FALSE; + } else { + wl_vk_display->explicit_sync = + wl_registry_bind(wl_registry, name, + &zwp_linux_explicit_synchronization_v1_interface, 1); + wl_vk_display->use_explicit_sync = TPL_TRUE; + TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface"); + } + } +} + +static void +__cb_wl_resistry_global_remove_callback(void *data, + struct wl_registry *wl_registry, + uint32_t name) +{ +} + +static const struct wl_registry_listener registry_listener = { + __cb_wl_resistry_global_callback, + __cb_wl_resistry_global_remove_callback +}; + +static void +_wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display, + const char *func_name) +{ + int dpy_err; + char buf[1024]; + strerror_r(errno, buf, sizeof(buf)); + + if (wl_vk_display->last_error == errno) + return; + + TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf); + + dpy_err = wl_display_get_error(wl_vk_display->wl_display); + if (dpy_err == EPROTO) { + const struct wl_interface *err_interface; + uint32_t err_proxy_id, err_code; + err_code = wl_display_get_protocol_error(wl_vk_display->wl_display, + &err_interface, + &err_proxy_id); + TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d", + err_interface->name, err_code, err_proxy_id); + } + + wl_vk_display->last_error = errno; +} + +tpl_result_t +_thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display) +{ + struct wl_registry *registry = NULL; + struct wl_event_queue *queue = NULL; + struct wl_display *display_wrapper = NULL; + struct wl_proxy *wl_tbm = NULL; + struct wayland_tbm_client *wl_tbm_client = NULL; + int ret; + tpl_result_t result = TPL_ERROR_NONE; + + queue = wl_display_create_queue(wl_vk_display->wl_display); + if (!queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_vk_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display); + if (!wl_vk_display->ev_queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_vk_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display); + if (!display_wrapper) { + TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)", + wl_vk_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue); + + registry = wl_display_get_registry(display_wrapper); + if (!registry) { + TPL_ERR("Failed to create wl_registry"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_proxy_wrapper_destroy(display_wrapper); + display_wrapper = NULL; + + wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display); + if (!wl_tbm_client) { + TPL_ERR("Failed to initialize wl_tbm_client."); + result = TPL_ERROR_INVALID_CONNECTION; + goto fini; + } + + wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client); + if (!wl_tbm) { + TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client); + result = TPL_ERROR_INVALID_CONNECTION; + goto fini; + } + + wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue); + wl_vk_display->wl_tbm_client = wl_tbm_client; + + if (wl_registry_add_listener(registry, ®istry_listener, + wl_vk_display)) { + TPL_ERR("Failed to wl_registry_add_listener"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue); + if (ret == -1) { + _wl_display_print_err(wl_vk_display, "roundtrip_queue"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + if (wl_vk_display->explicit_sync) { + wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync, + wl_vk_display->ev_queue); + TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.", + wl_vk_display->explicit_sync); + } + + wl_vk_display->wl_initialized = TPL_TRUE; + + TPL_INFO("[WAYLAND_INIT]", + "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)", + wl_vk_display, wl_vk_display->wl_display, + wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue); + TPL_INFO("[WAYLAND_INIT]", + "explicit_sync(%p)", + wl_vk_display->explicit_sync); + +fini: + if (display_wrapper) + wl_proxy_wrapper_destroy(display_wrapper); + if (registry) + wl_registry_destroy(registry); + if (queue) + wl_event_queue_destroy(queue); + + return result; +} + +void +_thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display) +{ + /* If wl_vk_display is in prepared state, cancel it */ + if (wl_vk_display->prepared) { + wl_display_cancel_read(wl_vk_display->wl_display); + wl_vk_display->prepared = TPL_FALSE; + } + + if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display, + wl_vk_display->ev_queue) == -1) { + _wl_display_print_err(wl_vk_display, "dispatch_queue_pending"); + } - if (twe_check_native_handle_is_wl_display(native_dpy)) + if (wl_vk_display->explicit_sync) { + TPL_INFO("[EXPLICIT_SYNC_DESTROY]", + "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.", + wl_vk_display, wl_vk_display->explicit_sync); + zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync); + wl_vk_display->explicit_sync = NULL; + } + + if (wl_vk_display->wl_tbm_client) { + struct wl_proxy *wl_tbm = NULL; + + wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm( + wl_vk_display->wl_tbm_client); + if (wl_tbm) { + wl_proxy_set_queue(wl_tbm, NULL); + } + + TPL_INFO("[WL_TBM_DEINIT]", + "wl_vk_display(%p) wl_tbm_client(%p)", + wl_vk_display, wl_vk_display->wl_tbm_client); + wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client); + wl_vk_display->wl_tbm_client = NULL; + } + + wl_event_queue_destroy(wl_vk_display->ev_queue); + + wl_vk_display->wl_initialized = TPL_FALSE; + + TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)", + wl_vk_display, wl_vk_display->wl_display); +} + +static void* +_thread_init(void *data) +{ + tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data; + + if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) { + TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)", + wl_vk_display, wl_vk_display->wl_display); + } + + if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) { + TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED"); + } + + return wl_vk_display; +} + +static tpl_bool_t +__thread_func_disp_prepare(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + /* If this wl_vk_display is already prepared, + * do nothing in this function. */ + if (wl_vk_display->prepared) + return TPL_FALSE; + + /* If there is a last_error, there is no need to poll, + * so skip directly to dispatch. + * prepare -> dispatch */ + if (wl_vk_display->last_error) return TPL_TRUE; + while (wl_display_prepare_read_queue(wl_vk_display->wl_display, + wl_vk_display->ev_queue) != 0) { + if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display, + wl_vk_display->ev_queue) == -1) { + _wl_display_print_err(wl_vk_display, "dispatch_queue_pending"); + } + } + + wl_vk_display->prepared = TPL_TRUE; + + wl_display_flush(wl_vk_display->wl_display); + return TPL_FALSE; } +static tpl_bool_t +__thread_func_disp_check(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + tpl_bool_t ret = TPL_FALSE; + + if (!wl_vk_display->prepared) + return ret; + + /* If prepared, but last_error is set, + * cancel_read is executed and FALSE is returned. + * That can lead to G_SOURCE_REMOVE by calling disp_prepare again + * and skipping disp_check from prepare to disp_dispatch. + * check -> prepare -> dispatch -> G_SOURCE_REMOVE */ + if (wl_vk_display->prepared && wl_vk_display->last_error) { + wl_display_cancel_read(wl_vk_display->wl_display); + return ret; + } + + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_read_events(wl_vk_display->wl_display) == -1) + _wl_display_print_err(wl_vk_display, "read_event"); + ret = TPL_TRUE; + } else { + wl_display_cancel_read(wl_vk_display->wl_display); + ret = TPL_FALSE; + } + + wl_vk_display->prepared = TPL_FALSE; + + return ret; +} + +static tpl_bool_t +__thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + TPL_IGNORE(message); + + /* If there is last_error, SOURCE_REMOVE should be returned + * to remove the gsource from the main loop. + * This is because wl_vk_display is not valid since last_error was set.*/ + if (wl_vk_display->last_error) { + return TPL_FALSE; + } + + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display, + wl_vk_display->ev_queue) == -1) { + _wl_display_print_err(wl_vk_display, "dispatch_queue_pending"); + } + } + + wl_display_flush(wl_vk_display->wl_display); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + + return TPL_TRUE; +} + +static void +__thread_func_disp_finalize(tpl_gsource *gsource) +{ + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource); + + if (wl_vk_display->wl_initialized) + _thread_wl_display_fini(wl_vk_display); + + TPL_LOG_T("WL_EGL", "finalize| wl_vk_display(%p) tpl_gsource(%p)", + wl_vk_display, gsource); + + return; +} + + +static tpl_gsource_functions disp_funcs = { + .prepare = __thread_func_disp_prepare, + .check = __thread_func_disp_check, + .dispatch = __thread_func_disp_dispatch, + .finalize = __thread_func_disp_finalize, +}; + static tpl_result_t __tpl_wl_vk_wsi_display_init(tpl_display_t *display) { - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; - TPL_ASSERT(display); + tpl_wl_vk_display_t *wl_vk_display = NULL; + /* Do not allow default display in wayland */ if (!display->native_handle) { TPL_ERR("Invalid native handle for display."); return TPL_ERROR_INVALID_PARAMETER; } - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) calloc(1, - sizeof(tpl_wl_vk_display_t)); - if (!wayland_vk_wsi_display) { + if (!_check_native_handle_is_wl_display(display->native_handle)) { + TPL_ERR("native_handle(%p) is not wl_display", display->native_handle); + return TPL_ERROR_INVALID_PARAMETER; + } + + wl_vk_display = (tpl_wl_vk_display_t *) calloc(1, + sizeof(tpl_wl_vk_display_t)); + if (!wl_vk_display) { TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t."); return TPL_ERROR_OUT_OF_MEMORY; } - display->backend.data = wayland_vk_wsi_display; + display->backend.data = wl_vk_display; + display->bufmgr_fd = -1; - if (twe_check_native_handle_is_wl_display(display->native_handle)) { - wayland_vk_wsi_display->wl_thread = twe_thread_create(); - if (!wayland_vk_wsi_display->wl_thread) { - TPL_ERR("Failed to create twe_thread."); - goto free_display; - } + wl_vk_display->tdm_initialized = TPL_FALSE; + wl_vk_display->wl_initialized = TPL_FALSE; + + wl_vk_display->ev_queue = NULL; + wl_vk_display->wl_display = (struct wl_display *)display->native_handle; + wl_vk_display->last_error = 0; + wl_vk_display->use_explicit_sync = TPL_FALSE; // default disabled + wl_vk_display->prepared = TPL_FALSE; + + /* Wayland Interfaces */ + wl_vk_display->explicit_sync = NULL; + wl_vk_display->wl_tbm_client = NULL; - wayland_vk_wsi_display->twe_display = - twe_display_add(wayland_vk_wsi_display->wl_thread, - display->native_handle, - display->backend.type); - if (!wayland_vk_wsi_display->twe_display) { - TPL_ERR("Failed to add native_display(%p) to thread(%p)", - display->native_handle, - wayland_vk_wsi_display->wl_thread); - goto free_display; + /* Vulkan specific surface capabilities */ + wl_vk_display->min_buffer = 2; + wl_vk_display->max_buffer = VK_CLIENT_QUEUE_SIZE; + wl_vk_display->present_modes = TPL_DISPLAY_PRESENT_MODE_FIFO; + + wl_vk_display->use_wait_vblank = TPL_TRUE; // default enabled + { + char *env = tpl_getenv("TPL_WAIT_VBLANK"); + if (env && !atoi(env)) { + wl_vk_display->use_wait_vblank = TPL_FALSE; } + } - } else { - TPL_ERR("Invalid native handle for display."); + tpl_gmutex_init(&wl_vk_display->wl_event_mutex); + + /* Create gthread */ + wl_vk_display->thread = tpl_gthread_create("wl_egl_thread", + (tpl_gthread_func)_thread_init, + (void *)wl_vk_display); + if (!wl_vk_display->thread) { + TPL_ERR("Failed to create wl_egl_thread"); goto free_display; } - TPL_LOG_T("WL_VK", - "[INIT DISPLAY] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)", - wayland_vk_wsi_display, - wayland_vk_wsi_display->wl_thread, - wayland_vk_wsi_display->twe_display); + wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread, + (void *)wl_vk_display, + wl_display_get_fd(wl_vk_display->wl_display), + &disp_funcs, SOURCE_TYPE_NORMAL); + if (!wl_vk_display->disp_source) { + TPL_ERR("Failed to add native_display(%p) to thread(%p)", + display->native_handle, + wl_vk_display->thread); + goto free_display; + } + + wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread, + (void *)wl_vk_display, + wl_vk_display->tdm_display_fd, + &tdm_funcs, SOURCE_TYPE_NORMAL); + if (!wl_vk_display->tdm_source) { + TPL_ERR("Failed to create tdm_gsource\n"); + goto free_display; + } + + TPL_INFO("[DISPLAY_INIT]", + "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_vk_display, + wl_vk_display->thread, + wl_vk_display->wl_display); + + TPL_INFO("[DISPLAY_INIT]", + "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)", + wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE", + wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE"); return TPL_ERROR_NONE; free_display: - if (wayland_vk_wsi_display) { - if (wayland_vk_wsi_display->twe_display) - twe_display_del(wayland_vk_wsi_display->twe_display); - if (wayland_vk_wsi_display->wl_thread) - twe_thread_destroy(wayland_vk_wsi_display->wl_thread); - - wayland_vk_wsi_display->wl_thread = NULL; - wayland_vk_wsi_display->twe_display = NULL; + if (wl_vk_display->thread) { + if (wl_vk_display->tdm_source) + tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); + if (wl_vk_display->disp_source) + tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); - free(wayland_vk_wsi_display); - display->backend.data = NULL; + tpl_gthread_destroy(wl_vk_display->thread); } + wl_vk_display->thread = NULL; + free(wl_vk_display); + + display->backend.data = NULL; return TPL_ERROR_INVALID_OPERATION; } static void __tpl_wl_vk_wsi_display_fini(tpl_display_t *display) { - tpl_wl_vk_display_t *wayland_vk_wsi_display; + tpl_wl_vk_display_t *wl_vk_display; TPL_ASSERT(display); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; - if (wayland_vk_wsi_display) { - - TPL_LOG_T("WL_VK", - "[FINI] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)", - wayland_vk_wsi_display, - wayland_vk_wsi_display->wl_thread, - wayland_vk_wsi_display->twe_display); - - if (wayland_vk_wsi_display->twe_display) { - tpl_result_t ret = TPL_ERROR_NONE; - ret = twe_display_del(wayland_vk_wsi_display->twe_display); - if (ret != TPL_ERROR_NONE) - TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)", - wayland_vk_wsi_display->twe_display, - wayland_vk_wsi_display->wl_thread); - wayland_vk_wsi_display->twe_display = NULL; + wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data; + if (wl_vk_display) { + TPL_INFO("[DISPLAY_FINI]", + "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_vk_display, + wl_vk_display->thread, + wl_vk_display->wl_display); + + if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) { + tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE); + wl_vk_display->tdm_source = NULL; + } + + if (wl_vk_display->disp_source) { + tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE); + wl_vk_display->disp_source = NULL; } - if (wayland_vk_wsi_display->wl_thread) { - twe_thread_destroy(wayland_vk_wsi_display->wl_thread); - wayland_vk_wsi_display->wl_thread = NULL; + if (wl_vk_display->thread) { + tpl_gthread_destroy(wl_vk_display->thread); + wl_vk_display->thread = NULL; } - free(wayland_vk_wsi_display); + tpl_gmutex_clear(&wl_vk_display->wl_event_mutex); + + free(wl_vk_display); } + display->backend.data = NULL; } @@ -335,23 +838,16 @@ __tpl_wl_vk_wsi_display_query_window_supported_buffer_count( tpl_display_t *display, tpl_handle_t window, int *min, int *max) { - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; + wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); - if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; - - res = twe_display_get_buffer_count(wayland_vk_wsi_display->twe_display, - min, max); - if (res != TPL_ERROR_NONE) { - TPL_ERR("Failed to query buffer count. twe_display(%p)", - wayland_vk_wsi_display->twe_display); - return res; - } + if (min) *min = wl_vk_display->min_buffer; + if (max) *max = wl_vk_display->max_buffer; return TPL_ERROR_NONE; } @@ -359,26 +855,18 @@ __tpl_wl_vk_wsi_display_query_window_supported_buffer_count( static tpl_result_t __tpl_wl_vk_wsi_display_query_window_supported_present_modes( tpl_display_t *display, - tpl_handle_t window, int *modes) + tpl_handle_t window, int *present_modes) { - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; - - if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; + wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); - if (modes) { - res = twe_display_get_present_mode(wayland_vk_wsi_display->twe_display, - modes); - if (res != TPL_ERROR_NONE) { - TPL_ERR("Failed to query present modes. twe_display(%p)", - wayland_vk_wsi_display->twe_display); - return res; - } + if (present_modes) { + *present_modes = wl_vk_display->present_modes; } return TPL_ERROR_NONE; @@ -388,7 +876,7 @@ static tpl_result_t __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; twe_surface_h twe_surface = NULL; TPL_ASSERT(surface); @@ -402,11 +890,11 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) return TPL_ERROR_OUT_OF_MEMORY; } - wayland_vk_wsi_display = + wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; - if (!wayland_vk_wsi_display) { - TPL_ERR("Invalid parameter. wayland_vk_wsi_display(%p)", - wayland_vk_wsi_display); + if (!wl_vk_display) { + TPL_ERR("Invalid parameter. wl_vk_display(%p)", + wl_vk_display); free(wayland_vk_wsi_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -414,13 +902,13 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) surface->backend.data = (void *)wayland_vk_wsi_surface; wayland_vk_wsi_surface->tbm_queue = NULL; - twe_surface = twe_surface_add(wayland_vk_wsi_display->wl_thread, - wayland_vk_wsi_display->twe_display, + twe_surface = twe_surface_add(wl_vk_display->thread, + wl_vk_display->twe_display, surface->native_handle, surface->format, surface->num_buffers); if (!twe_surface) { TPL_ERR("Failed to add native_surface(%p) to thread(%p)", - surface->native_handle, wayland_vk_wsi_display->wl_thread); + surface->native_handle, wl_vk_display->thread); free(wayland_vk_wsi_surface); surface->backend.data = NULL; return TPL_ERROR_OUT_OF_MEMORY; @@ -441,7 +929,7 @@ static void __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->display); @@ -449,9 +937,9 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; if (wayland_vk_wsi_surface == NULL) return; - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - if (wayland_vk_wsi_display == NULL) return; + if (wl_vk_display == NULL) return; if (wayland_vk_wsi_surface->tbm_queue) __tpl_wl_vk_wsi_surface_destroy_swapchain(surface); @@ -470,7 +958,7 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) != TPL_ERROR_NONE) { TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", wayland_vk_wsi_surface->twe_surface, - wayland_vk_wsi_display->wl_thread); + wl_vk_display->thread); } wayland_vk_wsi_surface->twe_surface = NULL; @@ -596,7 +1084,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface = NULL; tpl_wl_vk_surface_t *wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - tpl_wl_vk_display_t *wayland_vk_wsi_display = + tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; tbm_surface_queue_error_e tsq_err = 0; tpl_result_t lock_res = TPL_ERROR_NONE; @@ -607,7 +1095,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_OBJECT_UNLOCK(surface); TRACE_BEGIN("WAIT_DEQUEUEABLE"); - lock_res = twe_display_lock(wayland_vk_wsi_display->twe_display); + lock_res = twe_display_lock(wl_vk_display->twe_display); res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface, timeout_ns); TRACE_END(); @@ -617,13 +1105,13 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")", timeout_ns); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } else if (res != TPL_ERROR_NONE) { TPL_ERR("Invalid operation. twe_surface(%p) timeout_ns(%" PRIu64 ")", wayland_vk_wsi_surface->twe_surface, timeout_ns); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } @@ -631,7 +1119,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.", wayland_vk_wsi_surface->tbm_queue); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } @@ -642,7 +1130,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ERR("Failed to get tbm_surface from tbm_surface_queue(%p) | tsq_err = %d", wayland_vk_wsi_surface->tbm_queue, tsq_err); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return NULL; } @@ -657,7 +1145,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return tbm_surface; } @@ -668,7 +1156,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, int *buffer_count) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; int i; tpl_result_t ret = TPL_ERROR_NONE; @@ -680,15 +1168,15 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, TPL_ASSERT(buffer_count); wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - wayland_vk_wsi_display = (tpl_wl_vk_display_t *)surface->display->backend.data; + wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; - if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { + if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, NULL, buffer_count); if (ret != TPL_ERROR_NONE) { TPL_ERR("Failed to get buffer_count. twe_surface(%p)", wayland_vk_wsi_surface->twe_surface); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return ret; } @@ -697,7 +1185,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, sizeof(tbm_surface_h)); if (!wayland_vk_wsi_surface->swapchain_buffers) { TPL_ERR("Failed to allocate memory for buffers."); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_OUT_OF_MEMORY; } @@ -709,7 +1197,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, wayland_vk_wsi_surface, wayland_vk_wsi_surface->twe_surface); free(wayland_vk_wsi_surface->swapchain_buffers); wayland_vk_wsi_surface->swapchain_buffers = NULL; - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return ret; } @@ -725,7 +1213,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, *buffers = wayland_vk_wsi_surface->swapchain_buffers; - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); } return TPL_ERROR_NONE; @@ -775,7 +1263,7 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, int height, int buffer_count, int present_mode) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(surface); @@ -785,9 +1273,9 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; TPL_ASSERT(wayland_vk_wsi_surface); - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - TPL_ASSERT(wayland_vk_wsi_display); + TPL_ASSERT(wl_vk_display); if (wayland_vk_wsi_surface->tbm_queue) { int old_width = tbm_surface_queue_get_width(wayland_vk_wsi_surface->tbm_queue); @@ -862,7 +1350,7 @@ static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; unsigned int ref; @@ -872,15 +1360,15 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) TPL_ASSERT(surface->display->backend.data); wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { + if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference); if (ref > 0) { TPL_LOG_T("WL_VK", "This swapchain is still valid. | twe_surface(%p)", wayland_vk_wsi_surface->twe_surface); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } @@ -888,7 +1376,7 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) if (wayland_vk_wsi_surface->reset) { TPL_LOG_T("WL_VK", "Since reset is in the TRUE state, it will not be destroyed."); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } @@ -909,13 +1397,13 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) if (res != TPL_ERROR_NONE) { TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", wayland_vk_wsi_surface->twe_surface); - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); return res; } wayland_vk_wsi_surface->tbm_queue = NULL; - twe_display_unlock(wayland_vk_wsi_display->twe_display); + twe_display_unlock(wl_vk_display->twe_display); } return TPL_ERROR_NONE; @@ -926,7 +1414,7 @@ __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) { if (!native_dpy) return TPL_FALSE; - if (twe_check_native_handle_is_wl_display(native_dpy)) + if (_check_native_handle_is_wl_display(native_dpy)) return TPL_TRUE; return TPL_FALSE; -- 2.7.4 From 5fefb2956af2ed1f6d8f4f048d86febd82e59946 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 1 Apr 2021 16:22:02 +0900 Subject: [PATCH 04/16] Implement tpl_wl_vk_surface using tpl_gthread_utils. Change-Id: Ibb58ef10fa02fb6220453c5c18b7760a7a4d9994 Signed-off-by: Joonbum Ko Re-implement tpl_wl_vk_surface using tpl_gthread_utils Change-Id: I59ce5fb2092f60956ac1a2322f701b4a610016fe Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 690 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 540 insertions(+), 150 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 3a846b9..fceee7e 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -64,6 +64,8 @@ struct _tpl_wl_vk_display { struct _tpl_wl_vk_swapchain { tpl_wl_vk_surface_t *wl_vk_surface; + tbm_surface_queue_h tbm_queue; + struct { int width; int height; @@ -82,8 +84,6 @@ struct _tpl_wl_vk_surface { tpl_wl_vk_swapchain_t *swapchain; - tbm_surface_queue_h tbm_queue; - struct wl_surface *wl_surface; struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ @@ -872,55 +872,341 @@ __tpl_wl_vk_wsi_display_query_window_supported_present_modes( return TPL_ERROR_NONE; } +static void +_tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tpl_bool_t need_to_release = TPL_FALSE; + tpl_bool_t need_to_cancel = TPL_FALSE; + buffer_status_t status = RELEASED; + int idx = 0; + + while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) { + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + wl_vk_buffer = wl_vk_surface->buffers[idx]; + + if (wl_vk_buffer) { + wl_vk_surface->buffers[idx] = NULL; + wl_vk_surface->buffer_cnt--; + } else { + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + idx++; + continue; + } + + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + status = wl_vk_buffer->status; + + TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)", + idx, wl_vk_buffer, + wl_vk_buffer->tbm_surface, + status_to_string[status]); + + if (status >= ENQUEUED) { + tpl_bool_t need_to_wait = TPL_FALSE; + tpl_result_t wait_result = TPL_ERROR_NONE; + + if (!wl_vk_display->use_explicit_sync && + status < WAITING_VBLANK) + need_to_wait = TPL_TRUE; + + if (wl_vk_display->use_explicit_sync && + status < COMMITTED) + need_to_wait = TPL_TRUE; + + if (need_to_wait) { + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + wait_result = tpl_cond_timed_wait(&wl_vk_buffer->cond, + &wl_vk_buffer->mutex, + 16); /* 16ms */ + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + + status = wl_vk_buffer->status; + + if (wait_result == TPL_ERROR_TIME_OUT) + TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)", + wl_vk_buffer); + } + } + + /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */ + /* It has been acquired but has not yet been released, so this + * buffer must be released. */ + need_to_release = (status >= ACQUIRED && status <= COMMITTED); + + /* After dequeue, it has not been enqueued yet + * so cancel_dequeue must be performed. */ + need_to_cancel = (status == DEQUEUED); + + if (swapchain && swapchain->tbm_queue) { + if (need_to_release) { + tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, + wl_vk_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + wl_vk_buffer->tbm_surface, tsq_err); + } + + if (need_to_cancel) { + tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue, + wl_vk_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)", + wl_vk_buffer->tbm_surface, tsq_err); + } + } + + wl_vk_buffer->status = RELEASED; + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + if (need_to_release || need_to_cancel) + tbm_surface_internal_unref(wl_vk_buffer->tbm_surface); + + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + + idx++; + } +} + +static tdm_client_vblank* +_thread_create_tdm_client_vblank(tdm_client *tdm_client) +{ + tdm_client_vblank *vblank = NULL; + tdm_client_output *tdm_output = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; + + if (!tdm_client) { + TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client); + return NULL; + } + + tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err); + if (!tdm_output || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err); + return NULL; + } + + vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err); + if (!vblank || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err); + return NULL; + } + + tdm_client_vblank_set_enable_fake(vblank, 1); + tdm_client_vblank_set_sync(vblank, 0); + + return vblank; +} + +static void +_thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + + /* tbm_surface_queue will be created at swapchain_create */ + + wl_vk_surface->vblank = _thread_create_tdm_client_vblank( + wl_vk_display->tdm_client); + if (wl_vk_surface->vblank) { + TPL_INFO("[VBLANK_INIT]", + "wl_vk_surface(%p) tdm_client(%p) vblank(%p)", + wl_vk_surface, wl_vk_display->tdm_client, + wl_vk_surface->vblank); + } + + if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) { + wl_vk_surface->surface_sync = + zwp_linux_explicit_synchronization_v1_get_synchronization( + wl_vk_display->explicit_sync, wl_vk_surface->wl_surface); + if (wl_vk_surface->surface_sync) { + TPL_INFO("[EXPLICIT_SYNC_INIT]", + "wl_vk_surface(%p) surface_sync(%p)", + wl_vk_surface, wl_vk_surface->surface_sync); + } else { + TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)", + wl_vk_surface); + wl_vk_display->use_explicit_sync = TPL_FALSE; + } + } + + wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc(); +} + +static void +_thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + + TPL_INFO("[SURFACE_FINI]", + "wl_vk_surface(%p) wl_surface(%p)", + wl_vk_surface, wl_vk_surface->wl_surface); + + if (wl_vk_surface->vblank_waiting_buffers) { + __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL); + wl_vk_surface->vblank_waiting_buffers = NULL; + } + + if (wl_vk_surface->surface_sync) { + TPL_INFO("[SURFACE_SYNC_DESTROY]", + "wl_vk_surface(%p) surface_sync(%p)", + wl_vk_surface, wl_vk_surface->surface_sync); + zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync); + wl_vk_surface->surface_sync = NULL; + } + + if (wl_vk_surface->vblank) { + TPL_INFO("[VBLANK_DESTROY]", + "wl_vk_surface(%p) vblank(%p)", + wl_vk_surface, wl_vk_surface->vblank); + tdm_client_vblank_destroy(wl_vk_surface->vblank); + wl_vk_surface->vblank = NULL; + } + + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); +} + +static tpl_bool_t +__thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + + wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource); + + if (message == 1) { /* Initialize surface */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) initialize message received!", + wl_vk_surface); + _thread_wl_vk_surface_init(wl_vk_surface); + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } else if (message == 2) { /* Create tbm_surface_queue */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) queue creation message received!", + wl_vk_surface); + + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } else if (message == 3) { /* Acquirable message */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) acquirable message received!", + wl_vk_surface); + _thread_surface_queue_acquire(wl_vk_surface); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } else if (message == 4) { /* swapchain destroy */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!", + wl_vk_surface); + + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } + + return TPL_TRUE; +} + +static void +__thread_func_surf_finalize(tpl_gsource *gsource) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + + wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource); + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); + + _thread_wl_vk_surface_fini(wl_vk_surface); + + TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)", + wl_vk_surface, gsource); +} + +static tpl_gsource_functions surf_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_surf_dispatch, + .finalize = __thread_func_surf_finalize, +}; + + static tpl_result_t __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wl_vk_display = NULL; - twe_surface_h twe_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; + tpl_gsource *surf_source = NULL; TPL_ASSERT(surface); + TPL_ASSERT(surface->display); TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); TPL_ASSERT(surface->native_handle); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) calloc(1, + wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); + + wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1, sizeof(tpl_wl_vk_surface_t)); - if (!wayland_vk_wsi_surface) { + if (!wl_vk_surface) { TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t."); return TPL_ERROR_OUT_OF_MEMORY; } - wl_vk_display = - (tpl_wl_vk_display_t *)surface->display->backend.data; - if (!wl_vk_display) { - TPL_ERR("Invalid parameter. wl_vk_display(%p)", - wl_vk_display); - free(wayland_vk_wsi_surface); - return TPL_ERROR_INVALID_PARAMETER; + surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface, + -1, &surf_funcs, SOURCE_TYPE_NORMAL); + if (!surf_source) { + TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)", + wl_vk_surface); + free(wl_vk_surface); + surface->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; } - surface->backend.data = (void *)wayland_vk_wsi_surface; - wayland_vk_wsi_surface->tbm_queue = NULL; + surface->backend.data = (void *)wl_vk_surface; + surface->width = -1; + surface->height = -1; - twe_surface = twe_surface_add(wl_vk_display->thread, - wl_vk_display->twe_display, - surface->native_handle, - surface->format, surface->num_buffers); - if (!twe_surface) { - TPL_ERR("Failed to add native_surface(%p) to thread(%p)", - surface->native_handle, wl_vk_display->thread); - free(wayland_vk_wsi_surface); - surface->backend.data = NULL; - return TPL_ERROR_OUT_OF_MEMORY; + wl_vk_surface->surf_source = surf_source; + wl_vk_surface->swapchain = NULL; + + wl_vk_surface->wl_vk_display = wl_vk_display; + wl_vk_surface->wl_surface = (struct wl_surface *)surface->native_handle; + + wl_vk_surface->reset = TPL_FALSE; + wl_vk_surface->is_activated = TPL_FALSE; + wl_vk_surface->vblank_done = TPL_FALSE; + + wl_vk_surface->render_done_cnt = 0; + + wl_vk_surface->vblank = NULL; + wl_vk_surface->surface_sync = NULL; + + { + int i = 0; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) + wl_vk_surface->buffers[i] = NULL; + wl_vk_surface->buffer_cnt = 0; } - wayland_vk_wsi_surface->twe_surface = twe_surface; - wayland_vk_wsi_surface->is_activated = TPL_FALSE; - wayland_vk_wsi_surface->swapchain_buffers = NULL; + tpl_gmutex_init(&wl_vk_surface->surf_mutex); + tpl_gcond_init(&wl_vk_surface->surf_cond); - TPL_LOG_T("WL_VK", - "[INIT]tpl_surface(%p) tpl_wl_vk_surface(%p) twe_surface(%p)", - surface, wayland_vk_wsi_surface, twe_surface); + tpl_gmutex_init(&wl_vk_surface->buffers_mutex); + + /* Initialize in thread */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + tpl_gsource_send_message(wl_vk_surface->surf_source, 1); + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + + TPL_INFO("[SURFACE_INIT]", + "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)", + surface, wl_vk_surface, wl_vk_surface->surf_source); return TPL_ERROR_NONE; } @@ -928,42 +1214,48 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) static void __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - if (wayland_vk_wsi_surface == NULL) return; + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - if (wl_vk_display == NULL) return; + TPL_CHECK_ON_NULL_RETURN(wl_vk_display); - if (wayland_vk_wsi_surface->tbm_queue) - __tpl_wl_vk_wsi_surface_destroy_swapchain(surface); + TPL_INFO("[SURFACE_FINI][BEGIN]", + "wl_vk_surface(%p) wl_surface(%p)", + wl_vk_surface, wl_vk_surface->wl_surface); - if (wayland_vk_wsi_surface->swapchain_buffers) { - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; - } + if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) { + /* finalize swapchain */ - TPL_LOG_T("WL_VK", - "[FINI] wayland_vk_wsi_surface(%p) native_surface(%p) twe_surface(%p)", - wayland_vk_wsi_surface, surface->native_handle, - wayland_vk_wsi_surface->twe_surface); - - if (twe_surface_del(wayland_vk_wsi_surface->twe_surface) - != TPL_ERROR_NONE) { - TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", - wayland_vk_wsi_surface->twe_surface, - wl_vk_display->thread); } - wayland_vk_wsi_surface->twe_surface = NULL; + wl_vk_surface->swapchain = NULL; + + if (wl_vk_surface->surf_source) + tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE); + wl_vk_surface->surf_source = NULL; - free(wayland_vk_wsi_surface); + _print_buffer_lists(wl_vk_surface); + + wl_vk_surface->wl_surface = NULL; + wl_vk_surface->wl_vk_display = NULL; + wl_vk_surface->tpl_surface = NULL; + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + tpl_gmutex_clear(&wl_vk_surface->surf_mutex); + tpl_gcond_clear(&wl_vk_surface->surf_cond); + + TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface); + + free(wl_vk_surface); surface->backend.data = NULL; } @@ -979,7 +1271,7 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display->native_handle); TPL_ASSERT(tbm_surface); - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; tbm_surface_queue_error_e tsq_err; @@ -999,7 +1291,7 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, num_rects, rects); } } - tsq_err = tbm_surface_queue_enqueue(wayland_vk_wsi_surface->tbm_queue, + tsq_err = tbm_surface_queue_enqueue(wl_vk_surface->tbm_queue, tbm_surface); if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) { tbm_surface_internal_unref(tbm_surface); @@ -1010,7 +1302,7 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, if (sync_fence != -1) { tpl_result_t res = TPL_ERROR_NONE; - res = twe_surface_set_sync_fd(wayland_vk_wsi_surface->twe_surface, + res = twe_surface_set_sync_fd(wl_vk_surface->twe_surface, tbm_surface, sync_fence); if (res != TPL_ERROR_NONE) { TPL_WARN("Failed to set sync_fd(%d). Fallback to async mode.", @@ -1032,23 +1324,23 @@ __tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - return !(wayland_vk_wsi_surface->reset); + return !(wl_vk_surface->reset); } static tpl_result_t __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - if (!wayland_vk_wsi_surface) { - TPL_ERR("Invalid backend surface. surface(%p) wayland_vk_wsi_surface(%p)", - surface, wayland_vk_wsi_surface); + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + if (!wl_vk_surface) { + TPL_ERR("Invalid backend surface. surface(%p) wl_vk_surface(%p)", + surface, wl_vk_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -1059,7 +1351,7 @@ __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); - tsq_err = tbm_surface_queue_cancel_dequeue(wayland_vk_wsi_surface->tbm_queue, + tsq_err = tbm_surface_queue_cancel_dequeue(wl_vk_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface); @@ -1082,7 +1374,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display); tbm_surface_h tbm_surface = NULL; - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; @@ -1096,7 +1388,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_OBJECT_UNLOCK(surface); TRACE_BEGIN("WAIT_DEQUEUEABLE"); lock_res = twe_display_lock(wl_vk_display->twe_display); - res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface, + res = twe_surface_wait_dequeueable(wl_vk_surface->twe_surface, timeout_ns); TRACE_END(); TPL_OBJECT_LOCK(surface); @@ -1109,26 +1401,26 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, return NULL; } else if (res != TPL_ERROR_NONE) { TPL_ERR("Invalid operation. twe_surface(%p) timeout_ns(%" PRIu64 ")", - wayland_vk_wsi_surface->twe_surface, timeout_ns); + wl_vk_surface->twe_surface, timeout_ns); if (lock_res == TPL_ERROR_NONE) twe_display_unlock(wl_vk_display->twe_display); return NULL; } - if (wayland_vk_wsi_surface->reset) { + if (wl_vk_surface->reset) { TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.", - wayland_vk_wsi_surface->tbm_queue); + wl_vk_surface->tbm_queue); if (lock_res == TPL_ERROR_NONE) twe_display_unlock(wl_vk_display->twe_display); return NULL; } - tsq_err = tbm_surface_queue_dequeue(wayland_vk_wsi_surface->tbm_queue, + tsq_err = tbm_surface_queue_dequeue(wl_vk_surface->tbm_queue, &tbm_surface); if (!tbm_surface) { TPL_ERR("Failed to get tbm_surface from tbm_surface_queue(%p) | tsq_err = %d", - wayland_vk_wsi_surface->tbm_queue, tsq_err); + wl_vk_surface->tbm_queue, tsq_err); if (lock_res == TPL_ERROR_NONE) twe_display_unlock(wl_vk_display->twe_display); return NULL; @@ -1141,7 +1433,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, } TPL_LOG_T("WL_VK", "[DEQ] tbm_queue(%p) tbm_surface(%p) bo(%d)", - wayland_vk_wsi_surface->tbm_queue, tbm_surface, + wl_vk_surface->tbm_queue, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); if (lock_res == TPL_ERROR_NONE) @@ -1155,7 +1447,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, tbm_surface_h **buffers, int *buffer_count) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; int i; tpl_result_t ret = TPL_ERROR_NONE; @@ -1167,51 +1459,51 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, TPL_ASSERT(buffers); TPL_ASSERT(buffer_count); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { - ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, + ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface, NULL, buffer_count); if (ret != TPL_ERROR_NONE) { TPL_ERR("Failed to get buffer_count. twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); twe_display_unlock(wl_vk_display->twe_display); return ret; } - wayland_vk_wsi_surface->swapchain_buffers = (tbm_surface_h *)calloc( + wl_vk_surface->swapchain_buffers = (tbm_surface_h *)calloc( *buffer_count, sizeof(tbm_surface_h)); - if (!wayland_vk_wsi_surface->swapchain_buffers) { + if (!wl_vk_surface->swapchain_buffers) { TPL_ERR("Failed to allocate memory for buffers."); twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_OUT_OF_MEMORY; } - ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, - wayland_vk_wsi_surface->swapchain_buffers, + ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface, + wl_vk_surface->swapchain_buffers, buffer_count); if (ret != TPL_ERROR_NONE) { - TPL_ERR("Failed to get swapchain_buffers. wayland_vk_wsi_surface(%p) twe_surface(%p)", - wayland_vk_wsi_surface, wayland_vk_wsi_surface->twe_surface); - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; + TPL_ERR("Failed to get swapchain_buffers. wl_vk_surface(%p) twe_surface(%p)", + wl_vk_surface, wl_vk_surface->twe_surface); + free(wl_vk_surface->swapchain_buffers); + wl_vk_surface->swapchain_buffers = NULL; twe_display_unlock(wl_vk_display->twe_display); return ret; } for (i = 0; i < *buffer_count; i++) { - if (wayland_vk_wsi_surface->swapchain_buffers[i]) { + if (wl_vk_surface->swapchain_buffers[i]) { TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)", - i, wayland_vk_wsi_surface->swapchain_buffers[i], + i, wl_vk_surface->swapchain_buffers[i], tbm_bo_export(tbm_surface_internal_get_bo( - wayland_vk_wsi_surface->swapchain_buffers[i], 0))); - tbm_surface_internal_ref(wayland_vk_wsi_surface->swapchain_buffers[i]); + wl_vk_surface->swapchain_buffers[i], 0))); + tbm_surface_internal_ref(wl_vk_surface->swapchain_buffers[i]); } } - *buffers = wayland_vk_wsi_surface->swapchain_buffers; + *buffers = wl_vk_surface->swapchain_buffers; twe_display_unlock(wl_vk_display->twe_display); } @@ -1224,34 +1516,34 @@ __cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue, void *data) { tpl_surface_t *surface = NULL; - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_bool_t is_activated = TPL_FALSE; surface = (tpl_surface_t *)data; TPL_CHECK_ON_NULL_RETURN(surface); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - TPL_CHECK_ON_NULL_RETURN(wayland_vk_wsi_surface); + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); /* When queue_reset_callback is called, if is_activated is different from * its previous state change the reset flag to TPL_TRUE to get a new buffer * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ - is_activated = twe_surface_check_activated(wayland_vk_wsi_surface->twe_surface); + is_activated = twe_surface_check_activated(wl_vk_surface->twe_surface); - if (wayland_vk_wsi_surface->is_activated != is_activated) { + if (wl_vk_surface->is_activated != is_activated) { if (is_activated) { TPL_LOG_T("WL_VK", - "[ACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", - wayland_vk_wsi_surface, surface_queue); + "[ACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)", + wl_vk_surface, surface_queue); } else { TPL_LOG_T("WL_VK", - "[DEACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", - wayland_vk_wsi_surface, surface_queue); + "[DEACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)", + wl_vk_surface, surface_queue); } - wayland_vk_wsi_surface->is_activated = is_activated; + wl_vk_surface->is_activated = is_activated; } - wayland_vk_wsi_surface->reset = TPL_TRUE; + wl_vk_surface->reset = TPL_TRUE; if (surface->reset_cb) surface->reset_cb(surface->reset_data); @@ -1262,7 +1554,7 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, int width, int height, int buffer_count, int present_mode) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; @@ -1270,78 +1562,78 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, TPL_ASSERT(surface->backend.data); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - TPL_ASSERT(wayland_vk_wsi_surface); + wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + TPL_ASSERT(wl_vk_surface); wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; TPL_ASSERT(wl_vk_display); - if (wayland_vk_wsi_surface->tbm_queue) { - int old_width = tbm_surface_queue_get_width(wayland_vk_wsi_surface->tbm_queue); - int old_height = tbm_surface_queue_get_height(wayland_vk_wsi_surface->tbm_queue); + if (wl_vk_surface->tbm_queue) { + int old_width = tbm_surface_queue_get_width(wl_vk_surface->tbm_queue); + int old_height = tbm_surface_queue_get_height(wl_vk_surface->tbm_queue); if (old_width != width || old_height != height) { - tbm_surface_queue_reset(wayland_vk_wsi_surface->tbm_queue, + tbm_surface_queue_reset(wl_vk_surface->tbm_queue, width, height, format); TPL_LOG_T("WL_VK", - "[RESIZE] wayland_vk_wsi_surface(%p) tbm_queue(%p), (%d x %d) -> (%d x %d)", - wayland_vk_wsi_surface, wayland_vk_wsi_surface->tbm_queue, + "[RESIZE] wl_vk_surface(%p) tbm_queue(%p), (%d x %d) -> (%d x %d)", + wl_vk_surface, wl_vk_surface->tbm_queue, old_width, old_height, width, height); } - if (wayland_vk_wsi_surface->swapchain_buffers) { + if (wl_vk_surface->swapchain_buffers) { int i; - for (i = 0; i < wayland_vk_wsi_surface->buffer_count; i++) { - if (wayland_vk_wsi_surface->swapchain_buffers[i]) { - TPL_DEBUG("unref tbm_surface(%p)", wayland_vk_wsi_surface->swapchain_buffers[i]); - tbm_surface_internal_unref(wayland_vk_wsi_surface->swapchain_buffers[i]); - wayland_vk_wsi_surface->swapchain_buffers[i] = NULL; + for (i = 0; i < wl_vk_surface->buffer_count; i++) { + if (wl_vk_surface->swapchain_buffers[i]) { + TPL_DEBUG("unref tbm_surface(%p)", wl_vk_surface->swapchain_buffers[i]); + tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]); + wl_vk_surface->swapchain_buffers[i] = NULL; } } - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; + free(wl_vk_surface->swapchain_buffers); + wl_vk_surface->swapchain_buffers = NULL; } - wayland_vk_wsi_surface->buffer_count = - tbm_surface_queue_get_size(wayland_vk_wsi_surface->tbm_queue); - wayland_vk_wsi_surface->reset = TPL_FALSE; + wl_vk_surface->buffer_count = + tbm_surface_queue_get_size(wl_vk_surface->tbm_queue); + wl_vk_surface->reset = TPL_FALSE; - __tpl_util_atomic_inc(&wayland_vk_wsi_surface->swapchain_reference); + __tpl_util_atomic_inc(&wl_vk_surface->swapchain_reference); - TPL_LOG_T("WL_VK", "[REUSE] wayland_vk_wsi_surface(%p) tbm_queue(%p) size(%d)", - wayland_vk_wsi_surface, wayland_vk_wsi_surface->tbm_queue, - wayland_vk_wsi_surface->buffer_count); + TPL_LOG_T("WL_VK", "[REUSE] wl_vk_surface(%p) tbm_queue(%p) size(%d)", + wl_vk_surface, wl_vk_surface->tbm_queue, + wl_vk_surface->buffer_count); return TPL_ERROR_NONE; } - res = twe_surface_create_swapchain(wayland_vk_wsi_surface->twe_surface, + res = twe_surface_create_swapchain(wl_vk_surface->twe_surface, width, height, format, buffer_count, present_mode); if (res != TPL_ERROR_NONE) { TPL_ERR("Failed to create swapchain. twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); return res; } - wayland_vk_wsi_surface->tbm_queue = twe_surface_get_tbm_queue( - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->tbm_queue = twe_surface_get_tbm_queue( + wl_vk_surface->twe_surface); /* Set reset_callback to tbm_queue */ - if (tbm_surface_queue_add_reset_cb(wayland_vk_wsi_surface->tbm_queue, + if (tbm_surface_queue_add_reset_cb(wl_vk_surface->tbm_queue, __cb_tbm_queue_reset_callback, (void *)surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("TBM surface queue add reset cb failed!"); - twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); - wayland_vk_wsi_surface->tbm_queue = NULL; + twe_surface_destroy_swapchain(wl_vk_surface->twe_surface); + wl_vk_surface->tbm_queue = NULL; return TPL_ERROR_INVALID_OPERATION; } - wayland_vk_wsi_surface->buffer_count = buffer_count; - wayland_vk_wsi_surface->reset = TPL_FALSE; + wl_vk_surface->buffer_count = buffer_count; + wl_vk_surface->reset = TPL_FALSE; - __tpl_util_atomic_set(&wayland_vk_wsi_surface->swapchain_reference, 1); + __tpl_util_atomic_set(&wl_vk_surface->swapchain_reference, 1); return TPL_ERROR_NONE; } @@ -1349,7 +1641,7 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; unsigned int ref; @@ -1359,49 +1651,49 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) TPL_ASSERT(surface->display); TPL_ASSERT(surface->display->backend.data); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { - ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference); + ref = __tpl_util_atomic_dec(&wl_vk_surface->swapchain_reference); if (ref > 0) { TPL_LOG_T("WL_VK", "This swapchain is still valid. | twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } - if (wayland_vk_wsi_surface->reset) { + if (wl_vk_surface->reset) { TPL_LOG_T("WL_VK", "Since reset is in the TRUE state, it will not be destroyed."); twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } - if (wayland_vk_wsi_surface->swapchain_buffers) { + if (wl_vk_surface->swapchain_buffers) { int i; - for (i = 0; i < wayland_vk_wsi_surface->buffer_count; i++) { + for (i = 0; i < wl_vk_surface->buffer_count; i++) { TPL_DEBUG("Stop tracking tbm_surface(%p)", - wayland_vk_wsi_surface->swapchain_buffers[i]); - tbm_surface_internal_unref(wayland_vk_wsi_surface->swapchain_buffers[i]); - wayland_vk_wsi_surface->swapchain_buffers[i] = NULL; + wl_vk_surface->swapchain_buffers[i]); + tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]); + wl_vk_surface->swapchain_buffers[i] = NULL; } - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; + free(wl_vk_surface->swapchain_buffers); + wl_vk_surface->swapchain_buffers = NULL; } - res = twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); + res = twe_surface_destroy_swapchain(wl_vk_surface->twe_surface); if (res != TPL_ERROR_NONE) { TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); twe_display_unlock(wl_vk_display->twe_display); return res; } - wayland_vk_wsi_surface->tbm_queue = NULL; + wl_vk_surface->tbm_queue = NULL; twe_display_unlock(wl_vk_display->twe_display); } @@ -1458,3 +1750,101 @@ __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) backend->create_swapchain = __tpl_wl_vk_wsi_surface_create_swapchain; backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain; } + +static void +__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) +{ + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + + TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) { + wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL; + wl_egl_surface->buffer_cnt--; + + wl_egl_buffer->idx = -1; + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + + wl_display_flush(wl_egl_display->wl_display); + + if (wl_egl_buffer->wl_buffer) { + wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer); + wl_egl_buffer->wl_buffer = NULL; + } + + if (wl_egl_buffer->buffer_release) { + zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); + wl_egl_buffer->buffer_release = NULL; + } + + if (wl_egl_buffer->release_fence_fd != -1) { + close(wl_egl_buffer->release_fence_fd); + wl_egl_buffer->release_fence_fd = -1; + } + + if (wl_egl_buffer->waiting_source) { + tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); + wl_egl_buffer->waiting_source = NULL; + } + + if (wl_egl_buffer->commit_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); + if (ret == -1) + TPL_ERR("Failed to send commit_sync signal to fd(%d)", + wl_egl_buffer->commit_sync_fd); + close(wl_egl_buffer->commit_sync_fd); + wl_egl_buffer->commit_sync_fd = -1; + } + + if (wl_egl_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (ret == -1) + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + wl_egl_buffer->presentation_sync_fd); + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; + } + + if (wl_egl_buffer->rects) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; + } + + wl_egl_buffer->tbm_surface = NULL; + wl_egl_buffer->bo_name = -1; + + free(wl_egl_buffer); +} + +static int +_get_tbm_surface_bo_name(tbm_surface_h tbm_surface) +{ + return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); +} + +static void +_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) +{ + int idx = 0; + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)", + wl_egl_surface, wl_egl_surface->buffer_cnt); + for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) { + tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx]; + if (wl_egl_buffer) { + TPL_INFO("[INFO]", + "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", + idx, wl_egl_buffer, wl_egl_buffer->tbm_surface, + wl_egl_buffer->bo_name, + status_to_string[wl_egl_buffer->status]); + } + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); +} -- 2.7.4 From 2b3e45de48873051c66365b2df997411b4019a21 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 9 Apr 2021 14:24:31 +0900 Subject: [PATCH 05/16] Implement swapchain create/destroy/get_swapchain_buffers. Change-Id: Ic6b07b8247230409808db35ec308dbee2df5861c Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 844 ++++++++++++++++++++++++++++++------------------- 1 file changed, 511 insertions(+), 333 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index fceee7e..caa2ce2 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -169,8 +169,16 @@ struct _tpl_wl_vk_buffer { tpl_wl_vk_surface_t *wl_vk_surface; }; -static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( - tpl_surface_t *surface); +static void +_print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface); +static int +_get_tbm_surface_bo_name(tbm_surface_h tbm_surface); +static void +__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer); +static tpl_result_t +_thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); +static void +_thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); static tpl_bool_t _check_native_handle_is_wl_display(tpl_handle_t native_dpy) @@ -214,7 +222,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) /* If an error occurs in tdm_client_handle_events, it cannot be recovered. * When tdm_source is no longer available due to an unexpected situation, - * wl_egl_thread must remove it from the thread and destroy it. + * wl_vk_thread must remove it from the thread and destroy it. * In that case, tdm_vblank can no longer be used for surfaces and displays * that used this tdm_source. */ if (tdm_err != TDM_ERROR_NONE) { @@ -434,7 +442,7 @@ _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display) if (wl_vk_display->explicit_sync) { wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync, wl_vk_display->ev_queue); - TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.", + TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.", wl_vk_display->explicit_sync); } @@ -626,7 +634,7 @@ __thread_func_disp_finalize(tpl_gsource *gsource) if (wl_vk_display->wl_initialized) _thread_wl_display_fini(wl_vk_display); - TPL_LOG_T("WL_EGL", "finalize| wl_vk_display(%p) tpl_gsource(%p)", + TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)", wl_vk_display, gsource); return; @@ -697,11 +705,11 @@ __tpl_wl_vk_wsi_display_init(tpl_display_t *display) tpl_gmutex_init(&wl_vk_display->wl_event_mutex); /* Create gthread */ - wl_vk_display->thread = tpl_gthread_create("wl_egl_thread", + wl_vk_display->thread = tpl_gthread_create("wl_vk_thread", (tpl_gthread_func)_thread_init, (void *)wl_vk_display); if (!wl_vk_display->thread) { - TPL_ERR("Failed to create wl_egl_thread"); + TPL_ERR("Failed to create wl_vk_thread"); goto free_display; } @@ -1092,7 +1100,11 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_lock(&wl_vk_surface->surf_mutex); TPL_DEBUG("wl_vk_surface(%p) queue creation message received!", wl_vk_surface); - + if (_thread_swapchain_create_tbm_queue(wl_vk_surface) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)", + wl_vk_surface); + } tpl_gcond_signal(&wl_vk_surface->surf_cond); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } else if (message == 3) { /* Acquirable message */ @@ -1105,7 +1117,7 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_lock(&wl_vk_surface->surf_mutex); TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!", wl_vk_surface); - + _thread_swapchain_destroy_tbm_queue(wl_vk_surface); tpl_gcond_signal(&wl_vk_surface->surf_cond); tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } @@ -1259,28 +1271,453 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) surface->backend.data = NULL; } +static void +__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, + void *data) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_surface_t *surface = NULL; + tpl_bool_t is_activated = TPL_FALSE; + int width, height; + + wl_vk_surface = (tpl_wl_vk_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); + + wl_vk_display = wl_vk_surface->wl_vk_display; + TPL_CHECK_ON_NULL_RETURN(wl_vk_display); + + surface = wl_vk_surface->tpl_surface; + TPL_CHECK_ON_NULL_RETURN(surface); + + swapchain = wl_vk_surface->swapchain; + TPL_CHECK_ON_NULL_RETURN(swapchain); + + /* When the queue is resized, change the reset flag to TPL_TRUE to reflect + * the changed window size at the next frame. */ + width = tbm_surface_queue_get_width(tbm_queue); + height = tbm_surface_queue_get_height(tbm_queue); + if (surface->width != width || surface->height != height) { + TPL_INFO("[QUEUE_RESIZE]", + "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)", + wl_vk_surface, tbm_queue, + surface->width, surface->height, width, height); + } + + /* When queue_reset_callback is called, if is_activated is different from + * its previous state change the reset flag to TPL_TRUE to get a new buffer + * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ + is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client, + swapchain->tbm_queue); + if (wl_vk_surface->is_activated != is_activated) { + if (is_activated) { + TPL_INFO("[ACTIVATED]", + "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue); + } else { + TPL_LOG_T("[DEACTIVATED]", + " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue); + } + } + + wl_vk_surface->reset = TPL_TRUE; + + if (surface->reset_cb) + surface->reset_cb(surface->reset_data); +} + +static void +__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue, + void *data) +{ + TPL_IGNORE(tbm_queue); + + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + + tpl_gsource_send_message(wl_vk_surface->surf_source, 3); + + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); +} + static tpl_result_t -__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface, - int num_rects, const int *rects, - tbm_fd sync_fence) +_thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) +{ + TPL_ASSERT (wl_vk_surface); + + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tbm_surface_queue_h tbm_queue = NULL; + tbm_bufmgr bufmgr = NULL; + unsigned int capability; + + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + + if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) { + TPL_ERR("buffer count(%d) must be higher than (%d)", + swapchain->properties.buffer_count, + wl_vk_display->min_buffer); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) { + TPL_ERR("buffer count(%d) must be lower than (%d)", + swapchain->properties.buffer_count, + wl_vk_display->max_buffer); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) { + TPL_ERR("Unsupported present_mode(%d)", + swapchain->properties.present_mode); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (swapchain->tbm_queue) { + int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue); + int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue); + + if (swapchain->swapchain_buffers) { + int i; + for (i = 0; i < swapchain->properties.buffer_count; i++) { + if (swapchain->swapchain_buffers[i]) { + TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]); + tbm_surface_internal_unref(swapchain->swapchain_buffers[i]); + swapchain->swapchain_buffers[i] = NULL; + } + } + + free(swapchain->swapchain_buffers); + swapchain->swapchain_buffers = NULL; + } + + if (old_width != swapchain->properties.width || + old_height != swapchain->properties.height) { + tbm_surface_queue_reset(swapchain->tbm_queue, + swapchain->properties.width, + swapchain->properties.height, + swapchain->properties.format); + TPL_INFO("[RESIZE]", + "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)", + wl_vk_surface, swapchain, swapchain->tbm_queue, + old_width, old_height, + swapchain->properties.width, + swapchain->properties.height); + } + + swapchain->properties.buffer_count = + tbm_surface_queue_get_size(swapchain->tbm_queue); + + wl_vk_surface->reset = TPL_FALSE; + + __tpl_util_atomic_inc(&swapchain->ref_cnt); + + TPL_INFO("[SWAPCHAIN_REUSE]", + "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)", + wl_vk_surface, swapchain, swapchain->tbm_queue, + swapchain->properties.buffer_count); + + return TPL_ERROR_NONE; + } + + bufmgr = tbm_bufmgr_init(-1); + capability = tbm_bufmgr_get_capability(bufmgr); + tbm_bufmgr_deinit(bufmgr); + + if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) { + tbm_queue = wayland_tbm_client_create_surface_queue_tiled( + wl_vk_display->wl_tbm_client, + wl_vk_surface->wl_surface, + swapchain->properties.buffer_count, + swapchain->properties.width, + swapchain->properties.height, + TBM_FORMAT_ARGB8888); + } else { + tbm_queue = wayland_tbm_client_create_surface_queue( + wl_vk_display->wl_tbm_client, + wl_vk_surface->wl_surface, + swapchain->properties.buffer_count, + swapchain->properties.width, + swapchain->properties.height, + TBM_FORMAT_ARGB8888); + } + + if (!tbm_queue) { + TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)", + wl_vk_surface); + return TPL_ERROR_OUT_OF_MEMORY; + } + + if (tbm_surface_queue_set_modes( + tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) != + TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + + if (tbm_surface_queue_add_reset_cb( + tbm_queue, + __cb_tbm_queue_reset_callback, + (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + + if (tbm_surface_queue_add_acquirable_cb( + tbm_queue, + __cb_tbm_queue_acquirable_callback, + (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + + swapchain->tbm_queue = tbm_queue; + + TPL_INFO("[TBM_QUEUE_CREATED]", + "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)", + wl_vk_surface, swapchain, tbm_queue); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, + tbm_format format, int width, + int height, int buffer_count, int present_mode) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_result_t res = TPL_ERROR_NONE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER); + + wl_vk_display = (tpl_wl_vk_display_t *) + surface->display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); + + swapchain = wl_vk_surface->swapchain; + + if (swapchain == NULL) { + swapchain = + (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t)); + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY); + swapchain->tbm_queue = NULL; + } + + swapchain->properties.buffer_count = buffer_count; + swapchain->properties.width = width; + swapchain->properties.height = height; + swapchain->properties.present_mode = present_mode; + swapchain->wl_vk_surface = wl_vk_surface; + + wl_vk_surface->swapchain = swapchain; + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + /* send swapchain create tbm_queue message */ + tpl_gsource_send_message(wl_vk_surface->surf_source, 2); + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + + TPL_CHECK_ON_FALSE_ASSERT_FAIL( + swapchain->tbm_queue != NULL, + "[CRITICAL FAIL] Failed to create tbm_surface_queue"); + + wl_vk_surface->reset = TPL_FALSE; + + __tpl_util_atomic_set(&swapchain->ref_cnt, 1); + + return TPL_ERROR_NONE; +} + +static void +_thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) +{ + TPL_ASSERT(wl_vk_surface); + + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + + TPL_CHECK_ON_NULL_RETURN(swapchain); + + if (swapchain->tbm_queue) { + TPL_INFO("[TBM_QUEUE_DESTROY]", + "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)", + wl_vk_surface, swapchain, swapchain->tbm_queue); + tbm_surface_queue_destroy(swapchain->tbm_queue); + swapchain->tbm_queue = NULL; + } +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; + tpl_result_t res = TPL_ERROR_NONE; + unsigned int ref; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + + wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER); + + wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); + + swapchain = wl_vk_surface->swapchain; + if (!swapchain) { + TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.", + wl_vk_surface); + return TPL_ERROR_INVALID_OPERATION; + } + + if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) { + TPL_INFO("[DESTROY_SWAPCHAIN]", + "wl_vk_surface(%p) swapchain(%p) still valid.", + wl_vk_surface, swapchain); + return TPL_ERROR_NONE; + } + + TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]", + "wl_vk_surface(%p) swapchain(%p)", + wl_vk_surface, wl_vk_surface->swapchain); + + if (swapchain->swapchain_buffers) { + for (int i = 0; i < swapchain->properties.buffer_count; i++) { + if (swapchain->swapchain_buffers[i]) { + TPL_DEBUG("Stop tracking tbm_surface(%p)", + swapchain->swapchain_buffers[i]); + tbm_surface_internal_unref(swapchain->swapchain_buffers[i]); + swapchain->swapchain_buffers[i] = NULL; + } + } + + free(swapchain->swapchain_buffers); + swapchain->swapchain_buffers = NULL; + } + + _tpl_wl_vk_surface_buffer_clear(wl_vk_surface); + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + tpl_gsource_send_message(wl_vk_surface->surf_source, 4); + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + + _print_buffer_lists(wl_vk_surface); + + free(swapchain); + wl_vk_surface->swapchain = NULL; + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, + tbm_surface_h **buffers, + int *buffer_count) +{ TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); TPL_ASSERT(surface->display); - TPL_ASSERT(surface->display->native_handle); - TPL_ASSERT(tbm_surface); + TPL_ASSERT(surface->display->backend.data); tpl_wl_vk_surface_t *wl_vk_surface = - (tpl_wl_vk_surface_t *) surface->backend.data; - tbm_surface_queue_error_e tsq_err; + (tpl_wl_vk_surface_t *)surface->backend.data; + tpl_wl_vk_display_t *wl_vk_display = + (tpl_wl_vk_display_t *)surface->display->backend.data; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tpl_result_t ret = TPL_ERROR_NONE; + int i; - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", - tbm_surface); - return TPL_ERROR_INVALID_PARAMETER; + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER); + + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + + if (!buffers) { + *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + return TPL_ERROR_NONE; + } + + swapchain->swapchain_buffers = (tbm_surface_h *)calloc( + *buffer_count, + sizeof(tbm_surface_h)); + if (!swapchain->swapchain_buffers) { + TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)", + *buffer_count); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + return TPL_ERROR_OUT_OF_MEMORY; + } + + ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client, + swapchain->tbm_queue, + swapchain->swapchain_buffers, + buffer_count); + if (!ret) { + TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)", + wl_vk_display->wl_tbm_client, swapchain->tbm_queue); + free(swapchain->swapchain_buffers); + swapchain->swapchain_buffers = NULL; + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + return TPL_ERROR_INVALID_OPERATION; + } + + for (i = 0; i < *buffer_count; i++) { + if (swapchain->swapchain_buffers[i]) { + TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)", + i, swapchain->swapchain_buffers[i], + _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i])); + tbm_surface_internal_ref(swapchain->swapchain_buffers[i]); + } } + *buffers = swapchain->swapchain_buffers; + + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, + tbm_fd sync_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wl_vk_surface_t *wl_vk_surface = + (tpl_wl_vk_surface_t *) surface->backend.data; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + int bo_name = -1; + + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface), + TPL_ERROR_INVALID_PARAMETER); + + bo_name = _get_tbm_surface_bo_name(tbm_surface); + /* If there are received region information, * save it to buf_info in tbm_surface user_data using below API. */ if (num_rects && rects) { @@ -1442,265 +1879,6 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, return tbm_surface; } -static tpl_result_t -__tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, - tbm_surface_h **buffers, - int *buffer_count) -{ - tpl_wl_vk_surface_t *wl_vk_surface = NULL; - tpl_wl_vk_display_t *wl_vk_display = NULL; - int i; - tpl_result_t ret = TPL_ERROR_NONE; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->display->backend.data); - TPL_ASSERT(buffers); - TPL_ASSERT(buffer_count); - - wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; - - if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { - ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface, - NULL, buffer_count); - if (ret != TPL_ERROR_NONE) { - TPL_ERR("Failed to get buffer_count. twe_surface(%p)", - wl_vk_surface->twe_surface); - twe_display_unlock(wl_vk_display->twe_display); - return ret; - } - - wl_vk_surface->swapchain_buffers = (tbm_surface_h *)calloc( - *buffer_count, - sizeof(tbm_surface_h)); - if (!wl_vk_surface->swapchain_buffers) { - TPL_ERR("Failed to allocate memory for buffers."); - twe_display_unlock(wl_vk_display->twe_display); - return TPL_ERROR_OUT_OF_MEMORY; - } - - ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface, - wl_vk_surface->swapchain_buffers, - buffer_count); - if (ret != TPL_ERROR_NONE) { - TPL_ERR("Failed to get swapchain_buffers. wl_vk_surface(%p) twe_surface(%p)", - wl_vk_surface, wl_vk_surface->twe_surface); - free(wl_vk_surface->swapchain_buffers); - wl_vk_surface->swapchain_buffers = NULL; - twe_display_unlock(wl_vk_display->twe_display); - return ret; - } - - for (i = 0; i < *buffer_count; i++) { - if (wl_vk_surface->swapchain_buffers[i]) { - TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)", - i, wl_vk_surface->swapchain_buffers[i], - tbm_bo_export(tbm_surface_internal_get_bo( - wl_vk_surface->swapchain_buffers[i], 0))); - tbm_surface_internal_ref(wl_vk_surface->swapchain_buffers[i]); - } - } - - *buffers = wl_vk_surface->swapchain_buffers; - - twe_display_unlock(wl_vk_display->twe_display); - } - - return TPL_ERROR_NONE; -} - -static void -__cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue, - void *data) -{ - tpl_surface_t *surface = NULL; - tpl_wl_vk_surface_t *wl_vk_surface = NULL; - tpl_bool_t is_activated = TPL_FALSE; - - surface = (tpl_surface_t *)data; - TPL_CHECK_ON_NULL_RETURN(surface); - - wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); - - /* When queue_reset_callback is called, if is_activated is different from - * its previous state change the reset flag to TPL_TRUE to get a new buffer - * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ - is_activated = twe_surface_check_activated(wl_vk_surface->twe_surface); - - if (wl_vk_surface->is_activated != is_activated) { - if (is_activated) { - TPL_LOG_T("WL_VK", - "[ACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)", - wl_vk_surface, surface_queue); - } else { - TPL_LOG_T("WL_VK", - "[DEACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)", - wl_vk_surface, surface_queue); - } - wl_vk_surface->is_activated = is_activated; - } - - wl_vk_surface->reset = TPL_TRUE; - - if (surface->reset_cb) - surface->reset_cb(surface->reset_data); -} - -static tpl_result_t -__tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, - tbm_format format, int width, - int height, int buffer_count, int present_mode) -{ - tpl_wl_vk_surface_t *wl_vk_surface = NULL; - tpl_wl_vk_display_t *wl_vk_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(surface->display); - - wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - TPL_ASSERT(wl_vk_surface); - - wl_vk_display = (tpl_wl_vk_display_t *) - surface->display->backend.data; - TPL_ASSERT(wl_vk_display); - - if (wl_vk_surface->tbm_queue) { - int old_width = tbm_surface_queue_get_width(wl_vk_surface->tbm_queue); - int old_height = tbm_surface_queue_get_height(wl_vk_surface->tbm_queue); - - if (old_width != width || old_height != height) { - tbm_surface_queue_reset(wl_vk_surface->tbm_queue, - width, height, format); - TPL_LOG_T("WL_VK", - "[RESIZE] wl_vk_surface(%p) tbm_queue(%p), (%d x %d) -> (%d x %d)", - wl_vk_surface, wl_vk_surface->tbm_queue, - old_width, old_height, width, height); - } - - if (wl_vk_surface->swapchain_buffers) { - int i; - for (i = 0; i < wl_vk_surface->buffer_count; i++) { - if (wl_vk_surface->swapchain_buffers[i]) { - TPL_DEBUG("unref tbm_surface(%p)", wl_vk_surface->swapchain_buffers[i]); - tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]); - wl_vk_surface->swapchain_buffers[i] = NULL; - } - } - - free(wl_vk_surface->swapchain_buffers); - wl_vk_surface->swapchain_buffers = NULL; - } - - wl_vk_surface->buffer_count = - tbm_surface_queue_get_size(wl_vk_surface->tbm_queue); - wl_vk_surface->reset = TPL_FALSE; - - __tpl_util_atomic_inc(&wl_vk_surface->swapchain_reference); - - TPL_LOG_T("WL_VK", "[REUSE] wl_vk_surface(%p) tbm_queue(%p) size(%d)", - wl_vk_surface, wl_vk_surface->tbm_queue, - wl_vk_surface->buffer_count); - return TPL_ERROR_NONE; - } - - res = twe_surface_create_swapchain(wl_vk_surface->twe_surface, - width, height, format, - buffer_count, present_mode); - if (res != TPL_ERROR_NONE) { - TPL_ERR("Failed to create swapchain. twe_surface(%p)", - wl_vk_surface->twe_surface); - return res; - } - - wl_vk_surface->tbm_queue = twe_surface_get_tbm_queue( - wl_vk_surface->twe_surface); - - /* Set reset_callback to tbm_queue */ - if (tbm_surface_queue_add_reset_cb(wl_vk_surface->tbm_queue, - __cb_tbm_queue_reset_callback, - (void *)surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("TBM surface queue add reset cb failed!"); - twe_surface_destroy_swapchain(wl_vk_surface->twe_surface); - wl_vk_surface->tbm_queue = NULL; - return TPL_ERROR_INVALID_OPERATION; - } - - wl_vk_surface->buffer_count = buffer_count; - wl_vk_surface->reset = TPL_FALSE; - - __tpl_util_atomic_set(&wl_vk_surface->swapchain_reference, 1); - - return TPL_ERROR_NONE; -} - -static tpl_result_t -__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) -{ - tpl_wl_vk_surface_t *wl_vk_surface = NULL; - tpl_wl_vk_display_t *wl_vk_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; - unsigned int ref; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->display->backend.data); - - wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - - if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { - ref = __tpl_util_atomic_dec(&wl_vk_surface->swapchain_reference); - if (ref > 0) { - TPL_LOG_T("WL_VK", - "This swapchain is still valid. | twe_surface(%p)", - wl_vk_surface->twe_surface); - twe_display_unlock(wl_vk_display->twe_display); - return TPL_ERROR_NONE; - } - - - if (wl_vk_surface->reset) { - TPL_LOG_T("WL_VK", - "Since reset is in the TRUE state, it will not be destroyed."); - twe_display_unlock(wl_vk_display->twe_display); - return TPL_ERROR_NONE; - } - - if (wl_vk_surface->swapchain_buffers) { - int i; - for (i = 0; i < wl_vk_surface->buffer_count; i++) { - TPL_DEBUG("Stop tracking tbm_surface(%p)", - wl_vk_surface->swapchain_buffers[i]); - tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]); - wl_vk_surface->swapchain_buffers[i] = NULL; - } - - free(wl_vk_surface->swapchain_buffers); - wl_vk_surface->swapchain_buffers = NULL; - } - - res = twe_surface_destroy_swapchain(wl_vk_surface->twe_surface); - if (res != TPL_ERROR_NONE) { - TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", - wl_vk_surface->twe_surface); - twe_display_unlock(wl_vk_display->twe_display); - return res; - } - - wl_vk_surface->tbm_queue = NULL; - - twe_display_unlock(wl_vk_display->twe_display); - } - - return TPL_ERROR_NONE; -} - tpl_bool_t __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) { @@ -1752,74 +1930,74 @@ __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) } static void -__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) +__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer) { - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; - TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", - wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); + TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface); - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) { - wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL; - wl_egl_surface->buffer_cnt--; + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) { + wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL; + wl_vk_surface->buffer_cnt--; - wl_egl_buffer->idx = -1; + wl_vk_buffer->idx = -1; } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); - wl_display_flush(wl_egl_display->wl_display); + wl_display_flush(wl_vk_display->wl_display); - if (wl_egl_buffer->wl_buffer) { - wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, - (void *)wl_egl_buffer->wl_buffer); - wl_egl_buffer->wl_buffer = NULL; + if (wl_vk_buffer->wl_buffer) { + wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client, + (void *)wl_vk_buffer->wl_buffer); + wl_vk_buffer->wl_buffer = NULL; } - if (wl_egl_buffer->buffer_release) { - zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); - wl_egl_buffer->buffer_release = NULL; + if (wl_vk_buffer->buffer_release) { + zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release); + wl_vk_buffer->buffer_release = NULL; } - if (wl_egl_buffer->release_fence_fd != -1) { - close(wl_egl_buffer->release_fence_fd); - wl_egl_buffer->release_fence_fd = -1; + if (wl_vk_buffer->release_fence_fd != -1) { + close(wl_vk_buffer->release_fence_fd); + wl_vk_buffer->release_fence_fd = -1; } - if (wl_egl_buffer->waiting_source) { - tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); - wl_egl_buffer->waiting_source = NULL; + if (wl_vk_buffer->waiting_source) { + tpl_gsource_destroy(wl_vk_buffer->waiting_source, TPL_FALSE); + wl_vk_buffer->waiting_source = NULL; } - if (wl_egl_buffer->commit_sync_fd != -1) { - int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); + if (wl_vk_buffer->commit_sync_fd != -1) { + int ret = _write_to_eventfd(wl_vk_buffer->commit_sync_fd); if (ret == -1) TPL_ERR("Failed to send commit_sync signal to fd(%d)", - wl_egl_buffer->commit_sync_fd); - close(wl_egl_buffer->commit_sync_fd); - wl_egl_buffer->commit_sync_fd = -1; + wl_vk_buffer->commit_sync_fd); + close(wl_vk_buffer->commit_sync_fd); + wl_vk_buffer->commit_sync_fd = -1; } - if (wl_egl_buffer->presentation_sync_fd != -1) { - int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (wl_vk_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_vk_buffer->presentation_sync_fd); if (ret == -1) TPL_ERR("Failed to send presentation_sync signal to fd(%d)", - wl_egl_buffer->presentation_sync_fd); - close(wl_egl_buffer->presentation_sync_fd); - wl_egl_buffer->presentation_sync_fd = -1; + wl_vk_buffer->presentation_sync_fd); + close(wl_vk_buffer->presentation_sync_fd); + wl_vk_buffer->presentation_sync_fd = -1; } - if (wl_egl_buffer->rects) { - free(wl_egl_buffer->rects); - wl_egl_buffer->rects = NULL; - wl_egl_buffer->num_rects = 0; + if (wl_vk_buffer->rects) { + free(wl_vk_buffer->rects); + wl_vk_buffer->rects = NULL; + wl_vk_buffer->num_rects = 0; } - wl_egl_buffer->tbm_surface = NULL; - wl_egl_buffer->bo_name = -1; + wl_vk_buffer->tbm_surface = NULL; + wl_vk_buffer->bo_name = -1; - free(wl_egl_buffer); + free(wl_vk_buffer); } static int @@ -1829,22 +2007,22 @@ _get_tbm_surface_bo_name(tbm_surface_h tbm_surface) } static void -_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) +_print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface) { int idx = 0; - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)", - wl_egl_surface, wl_egl_surface->buffer_cnt); + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)", + wl_vk_surface, wl_vk_surface->buffer_cnt); for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) { - tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx]; - if (wl_egl_buffer) { + tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx]; + if (wl_vk_buffer) { TPL_INFO("[INFO]", - "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", - idx, wl_egl_buffer, wl_egl_buffer->tbm_surface, - wl_egl_buffer->bo_name, - status_to_string[wl_egl_buffer->status]); + "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", + idx, wl_vk_buffer, wl_vk_buffer->tbm_surface, + wl_vk_buffer->bo_name, + status_to_string[wl_vk_buffer->status]); } } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); } -- 2.7.4 From ed179b9b4bff5dfca418f48b678d5cde68684d51 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 26 Apr 2021 19:35:31 +0900 Subject: [PATCH 06/16] Implement DEQ/CANCEL/ACQ/ENQ buffer. Change-Id: If00f9b4e7c2aedefc9b20b76ac7abb99b6e6202d Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 562 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 376 insertions(+), 186 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index caa2ce2..31dc351 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -27,6 +27,9 @@ #define BUFFER_ARRAY_SIZE 10 #define VK_CLIENT_QUEUE_SIZE 3 +static int wl_vk_buffer_key; +#define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key) + typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t; typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t; typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t; @@ -179,6 +182,8 @@ static tpl_result_t _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); static void _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); +static tpl_result_t +_thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface); static tpl_bool_t _check_native_handle_is_wl_display(tpl_handle_t native_dpy) @@ -1111,7 +1116,11 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_lock(&wl_vk_surface->surf_mutex); TPL_DEBUG("wl_vk_surface(%p) acquirable message received!", wl_vk_surface); - _thread_surface_queue_acquire(wl_vk_surface); + if (_thread_surface_queue_acquire(wl_vk_surface) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)", + wl_vk_surface); + } tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); } else if (message == 4) { /* swapchain destroy */ tpl_gmutex_lock(&wl_vk_surface->surf_mutex); @@ -1271,6 +1280,18 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) surface->backend.data = NULL; } +static tpl_bool_t +__tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wl_vk_surface_t *wl_vk_surface = + (tpl_wl_vk_surface_t *)surface->backend.data; + + return !(wl_vk_surface->reset); +} + static void __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, void *data) @@ -1696,189 +1717,429 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, return TPL_ERROR_NONE; } -static tpl_result_t -__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface, - int num_rects, const int *rects, - tbm_fd sync_fence) +static void +__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer) { - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); + tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; - tpl_wl_vk_surface_t *wl_vk_surface = - (tpl_wl_vk_surface_t *) surface->backend.data; - tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - int bo_name = -1; + TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface); - TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); - TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER); - TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface), - TPL_ERROR_INVALID_PARAMETER); + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) { + wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL; + wl_vk_surface->buffer_cnt--; - bo_name = _get_tbm_surface_bo_name(tbm_surface); + wl_vk_buffer->idx = -1; + } + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); - /* If there are received region information, - * save it to buf_info in tbm_surface user_data using below API. */ - if (num_rects && rects) { - tpl_result_t ret = TPL_ERROR_NONE; - ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects); - if (ret != TPL_ERROR_NONE) { - TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)", - num_rects, rects); - } + wl_display_flush(wl_vk_display->wl_display); + + if (wl_vk_buffer->wl_buffer) { + wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client, + (void *)wl_vk_buffer->wl_buffer); + wl_vk_buffer->wl_buffer = NULL; } - tsq_err = tbm_surface_queue_enqueue(wl_vk_surface->tbm_queue, - tbm_surface); - if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) { - tbm_surface_internal_unref(tbm_surface); - } else { - TPL_ERR("Failed to enqeueue tbm_surface. | tsq_err = %d", tsq_err); - return TPL_ERROR_INVALID_OPERATION; + + if (wl_vk_buffer->buffer_release) { + zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release); + wl_vk_buffer->buffer_release = NULL; } - if (sync_fence != -1) { - tpl_result_t res = TPL_ERROR_NONE; - res = twe_surface_set_sync_fd(wl_vk_surface->twe_surface, - tbm_surface, sync_fence); - if (res != TPL_ERROR_NONE) { - TPL_WARN("Failed to set sync_fd(%d). Fallback to async mode.", - sync_fence); - } + if (wl_vk_buffer->release_fence_fd != -1) { + close(wl_vk_buffer->release_fence_fd); + wl_vk_buffer->release_fence_fd = -1; } - TPL_LOG_T("WL_VK", "[ENQ] tbm_surface(%p) bo(%d) sync_fence(%d)", - tbm_surface, - tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)), - sync_fence); + if (wl_vk_buffer->rects) { + free(wl_vk_buffer->rects); + wl_vk_buffer->rects = NULL; + wl_vk_buffer->num_rects = 0; + } - return TPL_ERROR_NONE; + wl_vk_buffer->tbm_surface = NULL; + wl_vk_buffer->bo_name = -1; + + free(wl_vk_buffer); } -static tpl_bool_t -__tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) +static tpl_wl_vk_buffer_t * +_get_wl_vk_buffer(tbm_surface_h tbm_surface) { - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - - tpl_wl_vk_surface_t *wl_vk_surface = - (tpl_wl_vk_surface_t *)surface->backend.data; - - return !(wl_vk_surface->reset); + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER, + (void **)&wl_vk_buffer); + return wl_vk_buffer; } -static tpl_result_t -__tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface) +static tpl_wl_vk_buffer_t * +_wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface, + tbm_surface_h tbm_surface) { - tpl_wl_vk_surface_t *wl_vk_surface = NULL; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; - wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - if (!wl_vk_surface) { - TPL_ERR("Invalid backend surface. surface(%p) wl_vk_surface(%p)", - surface, wl_vk_surface); - return TPL_ERROR_INVALID_PARAMETER; - } + wl_vk_buffer = _get_wl_vk_buffer(tbm_surface); - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); - return TPL_ERROR_INVALID_PARAMETER; - } + if (!wl_vk_buffer) { + wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t)); + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL); - tbm_surface_internal_unref(tbm_surface); + tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER, + (tbm_data_free)__cb_wl_vk_buffer_free); + tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER, + wl_vk_buffer); - tsq_err = tbm_surface_queue_cancel_dequeue(wl_vk_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface); - return TPL_ERROR_INVALID_OPERATION; + wl_vk_buffer->wl_buffer = NULL; + wl_vk_buffer->tbm_surface = tbm_surface; + wl_vk_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface); + wl_vk_buffer->wl_vk_surface = wl_vk_surface; + + wl_vk_buffer->status = RELEASED; + + wl_vk_buffer->acquire_fence_fd = -1; + wl_vk_buffer->release_fence_fd = -1; + + wl_vk_buffer->dx = 0; + wl_vk_buffer->dy = 0; + wl_vk_buffer->width = tbm_surface_get_width(tbm_surface); + wl_vk_buffer->height = tbm_surface_get_height(tbm_surface); + + wl_vk_buffer->rects = NULL; + wl_vk_buffer->num_rects = 0; + + tpl_gmutex_init(&wl_vk_buffer->mutex); + tpl_gcond_init(&wl_vk_buffer->cond); + + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + { + int i; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) + if (wl_vk_surface->buffers[i] == NULL) break; + + /* If this exception is reached, + * it may be a critical memory leak problem. */ + if (i == BUFFER_ARRAY_SIZE) { + tpl_wl_vk_buffer_t *evicted_buffer = NULL; + int evicted_idx = 0; /* evict the frontmost buffer */ + + evicted_buffer = wl_vk_surface->buffers[evicted_idx]; + + TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.", + wl_vk_surface); + TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)", + evicted_buffer, evicted_buffer->tbm_surface, + status_to_string[evicted_buffer->status]); + + /* [TODO] need to think about whether there will be + * better modifications */ + wl_vk_surface->buffer_cnt--; + wl_vk_surface->buffers[evicted_idx] = NULL; + + i = evicted_idx; + } + + wl_vk_surface->buffer_cnt++; + wl_vk_surface->buffers[i] = wl_vk_buffer; + wl_vk_buffer->idx = i; + } + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); + + TPL_INFO("[WL_VK_BUFFER_CREATE]", + "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)", + wl_vk_surface, wl_vk_buffer, tbm_surface, + wl_vk_buffer->bo_name); } - TPL_LOG_T("WL_VK", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)", - surface, tbm_surface); + wl_vk_buffer->need_to_commit = TPL_FALSE; + wl_vk_buffer->buffer_release = NULL; - return TPL_ERROR_NONE; + return wl_vk_buffer; } static tbm_surface_h __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, - uint64_t timeout_ns, - tbm_fd *sync_fence) + uint64_t timeout_ns, + int32_t *release_fence) { TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); TPL_ASSERT(surface->display); + TPL_ASSERT(surface->display->backend.data); + TPL_OBJECT_CHECK_RETURN(surface, NULL); - tbm_surface_h tbm_surface = NULL; tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; - tbm_surface_queue_error_e tsq_err = 0; - tpl_result_t lock_res = TPL_ERROR_NONE; - tpl_result_t res = TPL_ERROR_NONE; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + + tpl_result_t res = TPL_ERROR_NONE; - if (sync_fence) - *sync_fence = -1; + tbm_surface_h tbm_surface = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL); + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL); TPL_OBJECT_UNLOCK(surface); TRACE_BEGIN("WAIT_DEQUEUEABLE"); - lock_res = twe_display_lock(wl_vk_display->twe_display); - res = twe_surface_wait_dequeueable(wl_vk_surface->twe_surface, - timeout_ns); + if (timeout_ns != UINT64_MAX) { + tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( + swapchain->tbm_queue, timeout_ns/1000); + } else { + tsq_err = tbm_surface_queue_can_dequeue( + swapchain->tbm_queue, 1); + } TRACE_END(); TPL_OBJECT_LOCK(surface); if (res == TPL_ERROR_TIME_OUT) { TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")", timeout_ns); - if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wl_vk_display->twe_display); return NULL; } else if (res != TPL_ERROR_NONE) { - TPL_ERR("Invalid operation. twe_surface(%p) timeout_ns(%" PRIu64 ")", - wl_vk_surface->twe_surface, timeout_ns); - if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wl_vk_display->twe_display); + TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p)", + wl_vk_surface, swapchain->tbm_queue); return NULL; } + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + if (wl_vk_surface->reset) { - TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.", - wl_vk_surface->tbm_queue); - if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wl_vk_display->twe_display); + TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.", + swapchain, swapchain->tbm_queue); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); return NULL; } - - tsq_err = tbm_surface_queue_dequeue(wl_vk_surface->tbm_queue, + tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue, &tbm_surface); if (!tbm_surface) { - TPL_ERR("Failed to get tbm_surface from tbm_surface_queue(%p) | tsq_err = %d", - wl_vk_surface->tbm_queue, tsq_err); - if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wl_vk_display->twe_display); + TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d", + swapchain->tbm_queue, wl_vk_surface, tsq_err); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); return NULL; } tbm_surface_internal_ref(tbm_surface); - if (sync_fence) { - *sync_fence = twe_surface_create_sync_fd(tbm_surface); + wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer"); + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + wl_vk_buffer->status = DEQUEUED; + + if (release_fence) { + if (wl_vk_surface->surface_sync) { + *release_fence = wl_vk_buffer->release_fence_fd; + TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)", + wl_vk_surface, wl_vk_buffer, *release_fence); + wl_vk_buffer->release_fence_fd = -1; + } else { + *release_fence = -1; + } } - TPL_LOG_T("WL_VK", "[DEQ] tbm_queue(%p) tbm_surface(%p) bo(%d)", - wl_vk_surface->tbm_queue, tbm_surface, - tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + wl_vk_surface->reset = TPL_FALSE; - if (lock_res == TPL_ERROR_NONE) - twe_display_unlock(wl_vk_display->twe_display); + TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name, + release_fence ? *release_fence : -1); + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); return tbm_surface; } +static tpl_result_t +__tpl_wl_vk_wsi_surface_cancel_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wl_vk_surface_t *wl_vk_surface = + (tpl_wl_vk_surface_t *)surface->backend.data; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface), + TPL_ERROR_INVALID_PARAMETER); + + swapchain = wl_vk_surface->swapchain; + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, + TPL_ERROR_INVALID_PARAMETER); + + wl_vk_buffer = _get_wl_vk_buffer(tbm_surface); + if (wl_vk_buffer) { + tpl_gmutex_lock(&wl_vk_buffer->mutex); + wl_vk_buffer->status = RELEASED; + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + } + + tbm_surface_internal_unref(tbm_surface); + + TPL_INFO("[CANCEL BUFFER]", + "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)", + wl_vk_surface, swapchain, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_OPERATION; + } + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, + int32_t acquire_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->backend.data); + TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); + + tpl_wl_vk_surface_t *wl_vk_surface = + (tpl_wl_vk_surface_t *) surface->backend.data; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + int bo_name = -1; + + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER); + TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface), + TPL_ERROR_INVALID_PARAMETER); + + wl_vk_buffer = _get_wl_vk_buffer(tbm_surface); + bo_name = wl_vk_buffer->bo_name; + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + /* If there are received region information, save it to wl_vk_buffer */ + if (num_rects && rects) { + if (wl_vk_buffer->rects != NULL) { + free(wl_vk_buffer->rects); + wl_vk_buffer->rects = NULL; + wl_vk_buffer->num_rects = 0; + } + + wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects)); + wl_vk_buffer->num_rects = num_rects; + + if (wl_vk_buffer->rects) { + memcpy((char *)wl_vk_buffer->rects, (char *)rects, + sizeof(int) * 4 * num_rects); + } else { + TPL_ERR("Failed to allocate memory for rects info."); + } + } + + if (wl_vk_buffer->acquire_fence_fd != -1) + close(wl_vk_buffer->acquire_fence_fd); + + wl_vk_buffer->acquire_fence_fd = acquire_fence; + + wl_vk_buffer->status = ENQUEUED; + TPL_LOG_T("WL_VK", + "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)", + wl_vk_buffer, tbm_surface, bo_name, acquire_fence); + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + tbm_surface_internal_unref(tbm_surface); + TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d", + tbm_surface, wl_vk_surface, tsq_err); + return TPL_ERROR_INVALID_OPERATION; + } + + tbm_surface_internal_unref(tbm_surface); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +_thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tbm_surface_h tbm_surface = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tpl_bool_t ready_to_commit = TPL_TRUE; + + TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER); + + while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) { + tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue, + &tbm_surface); + if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to acquire from tbm_queue(%p)", + swapchain->tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + + tbm_surface_internal_ref(tbm_surface); + + wl_vk_buffer = _get_wl_vk_buffer(tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL, + "wl_vk_buffer sould be not NULL"); + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + wl_vk_buffer->status = ACQUIRED; + + TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)", + wl_vk_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + if (wl_vk_buffer->wl_buffer == NULL) { + wl_vk_buffer->wl_buffer = + (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_vk_display->wl_tbm_client, tbm_surface); + + if (!wl_vk_buffer->wl_buffer) { + TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)", + wl_vk_display->wl_tbm_client, tbm_surface); + } else { + TPL_LOG_T("WL_EGL", + "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface); + } + } + + if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done) + ready_to_commit = TPL_TRUE; + else { + wl_vk_buffer->status = WAITING_VBLANK; + __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer); + ready_to_commit = TPL_FALSE; + } + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + if (ready_to_commit) + _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer); + } + + return TPL_ERROR_NONE; +} + tpl_bool_t __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) { @@ -1920,7 +2181,7 @@ __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) backend->fini = __tpl_wl_vk_wsi_surface_fini; backend->validate = __tpl_wl_vk_wsi_surface_validate; backend->cancel_dequeued_buffer = - __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer; + __tpl_wl_vk_wsi_surface_cancel_buffer; backend->dequeue_buffer = __tpl_wl_vk_wsi_surface_dequeue_buffer; backend->enqueue_buffer = __tpl_wl_vk_wsi_surface_enqueue_buffer; backend->get_swapchain_buffers = @@ -1929,77 +2190,6 @@ __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain; } -static void -__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer) -{ - tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; - tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; - - TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", - wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface); - - tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); - if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) { - wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL; - wl_vk_surface->buffer_cnt--; - - wl_vk_buffer->idx = -1; - } - tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); - - wl_display_flush(wl_vk_display->wl_display); - - if (wl_vk_buffer->wl_buffer) { - wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client, - (void *)wl_vk_buffer->wl_buffer); - wl_vk_buffer->wl_buffer = NULL; - } - - if (wl_vk_buffer->buffer_release) { - zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release); - wl_vk_buffer->buffer_release = NULL; - } - - if (wl_vk_buffer->release_fence_fd != -1) { - close(wl_vk_buffer->release_fence_fd); - wl_vk_buffer->release_fence_fd = -1; - } - - if (wl_vk_buffer->waiting_source) { - tpl_gsource_destroy(wl_vk_buffer->waiting_source, TPL_FALSE); - wl_vk_buffer->waiting_source = NULL; - } - - if (wl_vk_buffer->commit_sync_fd != -1) { - int ret = _write_to_eventfd(wl_vk_buffer->commit_sync_fd); - if (ret == -1) - TPL_ERR("Failed to send commit_sync signal to fd(%d)", - wl_vk_buffer->commit_sync_fd); - close(wl_vk_buffer->commit_sync_fd); - wl_vk_buffer->commit_sync_fd = -1; - } - - if (wl_vk_buffer->presentation_sync_fd != -1) { - int ret = _write_to_eventfd(wl_vk_buffer->presentation_sync_fd); - if (ret == -1) - TPL_ERR("Failed to send presentation_sync signal to fd(%d)", - wl_vk_buffer->presentation_sync_fd); - close(wl_vk_buffer->presentation_sync_fd); - wl_vk_buffer->presentation_sync_fd = -1; - } - - if (wl_vk_buffer->rects) { - free(wl_vk_buffer->rects); - wl_vk_buffer->rects = NULL; - wl_vk_buffer->num_rects = 0; - } - - wl_vk_buffer->tbm_surface = NULL; - wl_vk_buffer->bo_name = -1; - - free(wl_vk_buffer); -} - static int _get_tbm_surface_bo_name(tbm_surface_h tbm_surface) { -- 2.7.4 From 63f9a32027d784ca55abeb9c30e67acae980d9ee Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 27 Apr 2021 15:22:56 +0900 Subject: [PATCH 07/16] Implement buffer commit/release Change-Id: I7f6685b20da489603e9661333420a09980f93182 Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 352 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 348 insertions(+), 4 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 31dc351..9bfb4ed 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -184,6 +184,9 @@ static void _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); static tpl_result_t _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface); +static void +_thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface, + tpl_wl_vk_buffer_t *wl_vk_buffer); static tpl_bool_t _check_native_handle_is_wl_display(tpl_handle_t native_dpy) @@ -1520,7 +1523,6 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_wl_vk_swapchain_t *swapchain = NULL; - tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(surface); TPL_ASSERT(surface->display); @@ -1590,8 +1592,6 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) tpl_wl_vk_swapchain_t *swapchain = NULL; tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; - tpl_result_t res = TPL_ERROR_NONE; - unsigned int ref; TPL_ASSERT(surface); TPL_ASSERT(surface->display); @@ -2117,7 +2117,7 @@ _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface) TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)", wl_vk_display->wl_tbm_client, tbm_surface); } else { - TPL_LOG_T("WL_EGL", + TPL_LOG_T("WL_VK", "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface); } @@ -2140,6 +2140,350 @@ _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface) return TPL_ERROR_NONE; } +static void +__cb_buffer_fenced_release(void *data, + struct zwp_linux_buffer_release_v1 *release, + int32_t fence) +{ + tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer); + + tbm_surface = wl_vk_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; + tpl_wl_vk_swapchain_t *swapchain = NULL; + + if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) { + TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface); + tbm_surface_internal_unref(tbm_surface); + return; + } + + swapchain = wl_vk_surface->swapchain; + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + if (wl_vk_buffer->status == COMMITTED) { + tbm_surface_queue_error_e tsq_err; + + zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release); + wl_vk_buffer->buffer_release = NULL; + + wl_vk_buffer->release_fence_fd = fence; + wl_vk_buffer->status = RELEASED; + + TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", + wl_vk_buffer->bo_name, + fence); + TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)", + wl_vk_buffer->bo_name); + + TPL_LOG_T("WL_VK", + "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_vk_buffer, tbm_surface, + wl_vk_buffer->bo_name, + fence); + + tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); + } + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +static void +__cb_buffer_immediate_release(void *data, + struct zwp_linux_buffer_release_v1 *release) +{ + tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer); + + tbm_surface = wl_vk_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; + tpl_wl_vk_swapchain_t *swapchain = NULL; + + if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) { + TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface); + tbm_surface_internal_unref(tbm_surface); + return; + } + + swapchain = wl_vk_surface->swapchain; + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + if (wl_vk_buffer->status == COMMITTED) { + tbm_surface_queue_error_e tsq_err; + + zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release); + wl_vk_buffer->buffer_release = NULL; + + wl_vk_buffer->release_fence_fd = -1; + wl_vk_buffer->status = RELEASED; + + TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + TPL_LOG_T("WL_VK", + "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)", + wl_vk_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); + } + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = { + __cb_buffer_fenced_release, + __cb_buffer_immediate_release, +}; + +static void +__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) +{ + tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer) + + tbm_surface = wl_vk_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE; + + if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) { + TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface); + tbm_surface_internal_unref(tbm_surface); + return; + } + + swapchain = wl_vk_surface->swapchain; + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + if (wl_vk_buffer->status == COMMITTED) { + + tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + wl_vk_buffer->status = RELEASED; + + TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name); + TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)", + wl_vk_buffer->bo_name); + + TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_vk_buffer->wl_buffer, tbm_surface, + wl_vk_buffer->bo_name); + + tbm_surface_internal_unref(tbm_surface); + } + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +static const struct wl_buffer_listener wl_buffer_release_listener = { + (void *)__cb_wl_buffer_release, +}; + +static void +__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, + unsigned int sequence, unsigned int tv_sec, + unsigned int tv_usec, void *user_data) +{ + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + + TRACE_ASYNC_END((int)wl_vk_surface, "WAIT_VBLANK"); + TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface); + + if (error == TDM_ERROR_TIMEOUT) + TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)", + wl_vk_surface); + + wl_vk_surface->vblank_done = TPL_TRUE; + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front( + wl_vk_surface->vblank_waiting_buffers, + NULL); + if (wl_vk_buffer) + _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); +} + +static tpl_result_t +_thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tdm_error tdm_err = TDM_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + + if (wl_vk_surface->vblank == NULL) { + wl_vk_surface->vblank = + _thread_create_tdm_client_vblank(wl_vk_display->tdm_client); + if (!wl_vk_surface->vblank) { + TPL_WARN("Failed to create vblank. wl_vk_surface(%p)", + wl_vk_surface); + return TPL_ERROR_OUT_OF_MEMORY; + } + } + + tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank, + wl_vk_surface->post_interval, + __cb_tdm_client_vblank, + (void *)wl_vk_surface); + + if (tdm_err == TDM_ERROR_NONE) { + wl_vk_surface->vblank_done = TPL_FALSE; + TRACE_ASYNC_BEGIN((int)wl_vk_surface, "WAIT_VBLANK"); + } else { + TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); + return TPL_ERROR_INVALID_OPERATION; + } + + return TPL_ERROR_NONE; +} + +static void +_thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface, + tpl_wl_vk_buffer_t *wl_vk_buffer) +{ + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + struct wl_surface *wl_surface = wl_vk_surface->wl_surface; + uint32_t version; + + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL, + "wl_vk_buffer sould be not NULL"); + + if (wl_vk_buffer->wl_buffer == NULL) { + wl_vk_buffer->wl_buffer = + (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_vk_display->wl_tbm_client, + wl_vk_buffer->tbm_surface); + } + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL, + "[FATAL] Failed to create wl_buffer"); + + version = wl_proxy_get_version((struct wl_proxy *)wl_surface); + + wl_surface_attach(wl_surface, (void *)wl_vk_buffer->wl_buffer, + wl_vk_buffer->dx, wl_vk_buffer->dy); + + if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) { + if (version < 4) { + wl_surface_damage(wl_surface, + wl_vk_buffer->dx, wl_vk_buffer->dy, + wl_vk_buffer->width, wl_vk_buffer->height); + } else { + wl_surface_damage_buffer(wl_surface, + 0, 0, + wl_vk_buffer->width, wl_vk_buffer->height); + } + } else { + int i; + for (i = 0; i < wl_vk_buffer->num_rects; i++) { + int inverted_y = + wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] + + wl_vk_buffer->rects[i * 4 + 3]); + if (version < 4) { + wl_surface_damage(wl_surface, + wl_vk_buffer->rects[i * 4 + 0], + inverted_y, + wl_vk_buffer->rects[i * 4 + 2], + wl_vk_buffer->rects[i * 4 + 3]); + } else { + wl_surface_damage_buffer(wl_surface, + wl_vk_buffer->rects[i * 4 + 0], + inverted_y, + wl_vk_buffer->rects[i * 4 + 2], + wl_vk_buffer->rects[i * 4 + 3]); + } + } + } + + if (wl_vk_display->use_explicit_sync && + wl_vk_surface->surface_sync) { + + zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync, + wl_vk_buffer->acquire_fence_fd); + TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)", + wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd); + close(wl_vk_buffer->acquire_fence_fd); + wl_vk_buffer->acquire_fence_fd = -1; + + wl_vk_buffer->buffer_release = + zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync); + if (!wl_vk_buffer->buffer_release) { + TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface); + } else { + zwp_linux_buffer_release_v1_add_listener( + wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer); + TPL_DEBUG("add explicit_sync_release_listener."); + } + } else { + wl_buffer_add_listener((void *)wl_vk_buffer->wl_buffer, + &wl_buffer_release_listener, wl_vk_buffer); + } + + wl_surface_commit(wl_surface); + + wl_display_flush(wl_vk_display->wl_display); + + TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)", + wl_vk_buffer->bo_name); + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + wl_vk_buffer->need_to_commit = TPL_FALSE; + wl_vk_buffer->status = COMMITTED; + + tpl_gcond_signal(&wl_vk_buffer->cond); + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + TPL_LOG_T("WL_VK", + "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface, + wl_vk_buffer->bo_name); + + if (wl_vk_display->use_wait_vblank && + _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE) + TPL_ERR("Failed to set wait vblank."); +} + tpl_bool_t __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) { -- 2.7.4 From 482599e1ada68f6fe87c13a2acb7d5fd0ea78386 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 27 Apr 2021 19:41:10 +0900 Subject: [PATCH 08/16] Add set_post_interval to set commit interval Change-Id: I1f531f4240662ccfe5afef643052e99a830ae4d6 Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 9bfb4ed..abd00cf 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -112,6 +112,8 @@ struct _tpl_wl_vk_surface { tpl_bool_t is_activated; tpl_bool_t reset; /* TRUE if queue reseted by external */ tpl_bool_t vblank_done; + + int post_interval; }; typedef enum buffer_status { @@ -1210,6 +1212,8 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) wl_vk_surface->vblank = NULL; wl_vk_surface->surface_sync = NULL; + wl_vk_surface->post_interval = surface->post_interval; + { int i = 0; for (i = 0; i < BUFFER_ARRAY_SIZE; i++) @@ -1283,6 +1287,27 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) surface->backend.data = NULL; } +static tpl_result_t +__tpl_wl_vk_wsi_surface_set_post_interval(tpl_surface_t *surface, + int post_interval) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); + + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER); + + TPL_INFO("[SET_POST_INTERVAL]", + "wl_vk_surface(%p) post_interval(%d -> %d)", + wl_vk_surface, wl_vk_surface->post_interval, post_interval); + + wl_vk_surface->post_interval = post_interval; + + return TPL_ERROR_NONE; +} + static tpl_bool_t __tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) { @@ -2532,6 +2557,8 @@ __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) __tpl_wl_vk_wsi_surface_get_swapchain_buffers; backend->create_swapchain = __tpl_wl_vk_wsi_surface_create_swapchain; backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain; + backend->set_post_interval = + __tpl_wl_vk_wsi_surface_set_post_interval; } static int -- 2.7.4 From 208b40bf77e003d8dc411189e1114bc98c35f2fb Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 27 Apr 2021 19:46:09 +0900 Subject: [PATCH 09/16] Change some internal functions to static Change-Id: I11f1394a62ed6b1a2f19a01364c0aa9a6aded16f Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index abd00cf..185eee7 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -277,7 +277,7 @@ static tpl_gsource_functions tdm_funcs = { .finalize = __thread_func_tdm_finalize, }; -tpl_result_t +static tpl_result_t _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display) { tdm_client *tdm_client = NULL; @@ -371,7 +371,7 @@ _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display, wl_vk_display->last_error = errno; } -tpl_result_t +static tpl_result_t _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display) { struct wl_registry *registry = NULL; @@ -477,7 +477,7 @@ fini: return result; } -void +static void _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display) { /* If wl_vk_display is in prepared state, cancel it */ -- 2.7.4 From f9f7aefc520f8f79b7cd909cd5a5adb5704b6b0f Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 27 Apr 2021 19:51:22 +0900 Subject: [PATCH 10/16] Make the prefix of backend APIs shorten Change-Id: I8ccfcd5402ad9071854f7ea69695a553fe9e213c Signed-off-by: Joonbum Ko --- src/tpl.c | 4 +-- src/tpl_internal.h | 4 +-- src/tpl_wl_vk_thread.c | 68 +++++++++++++++++++++++++------------------------- 3 files changed, 38 insertions(+), 38 deletions(-) diff --git a/src/tpl.c b/src/tpl.c index 50e19af..c239b4b 100644 --- a/src/tpl.c +++ b/src/tpl.c @@ -312,7 +312,7 @@ __tpl_display_init_backend(tpl_display_t *display, tpl_backend_type_t type) __tpl_display_init_backend_wayland_vk_wsi(&display->backend); break; case TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD: - __tpl_display_init_backend_wl_vk_wsi_thread(&display->backend); + __tpl_display_init_backend_wl_vk_thread(&display->backend); break; case TPL_BACKEND_TBM: __tpl_display_init_backend_tbm(&display->backend, type); @@ -451,7 +451,7 @@ __tpl_surface_init_backend(tpl_surface_t *surface, tpl_backend_type_t type) __tpl_surface_init_backend_wayland_vk_wsi(&surface->backend); break; case TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD: - __tpl_surface_init_backend_wl_vk_wsi_thread(&surface->backend); + __tpl_surface_init_backend_wl_vk_thread(&surface->backend); break; case TPL_BACKEND_TBM: __tpl_surface_init_backend_tbm(&surface->backend, type); diff --git a/src/tpl_internal.h b/src/tpl_internal.h index 2b663ed..f978f4f 100755 --- a/src/tpl_internal.h +++ b/src/tpl_internal.h @@ -231,7 +231,7 @@ void __tpl_display_init_backend_tbm(tpl_display_backend_t *backend, void __tpl_display_init_backend_wayland_egl(tpl_display_backend_t *backend); void __tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend); void __tpl_display_init_backend_wayland_vk_wsi(tpl_display_backend_t *backend); -void __tpl_display_init_backend_wl_vk_wsi_thread(tpl_display_backend_t *backend); +void __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend); void __tpl_display_init_backend_x11_dri2(tpl_display_backend_t *backend); void __tpl_display_init_backend_x11_dri3(tpl_display_backend_t *backend); void __tpl_surface_init_backend_tbm(tpl_surface_backend_t *backend, @@ -239,7 +239,7 @@ void __tpl_surface_init_backend_tbm(tpl_surface_backend_t *backend, void __tpl_surface_init_backend_wayland_egl(tpl_surface_backend_t *backend); void __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend); void __tpl_surface_init_backend_wayland_vk_wsi(tpl_surface_backend_t *backend); -void __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend); +void __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend); void __tpl_surface_init_backend_x11_dri2(tpl_surface_backend_t *backend); void __tpl_surface_init_backend_x11_dri3(tpl_surface_backend_t *backend); diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 185eee7..643f496 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -659,7 +659,7 @@ static tpl_gsource_functions disp_funcs = { }; static tpl_result_t -__tpl_wl_vk_wsi_display_init(tpl_display_t *display) +__tpl_wl_vk_display_init(tpl_display_t *display) { TPL_ASSERT(display); @@ -774,7 +774,7 @@ free_display: } static void -__tpl_wl_vk_wsi_display_fini(tpl_display_t *display) +__tpl_wl_vk_display_fini(tpl_display_t *display) { tpl_wl_vk_display_t *wl_vk_display; @@ -812,7 +812,7 @@ __tpl_wl_vk_wsi_display_fini(tpl_display_t *display) } static tpl_result_t -__tpl_wl_vk_wsi_display_query_config(tpl_display_t *display, +__tpl_wl_vk_display_query_config(tpl_display_t *display, tpl_surface_type_t surface_type, int red_size, int green_size, int blue_size, int alpha_size, @@ -841,7 +841,7 @@ __tpl_wl_vk_wsi_display_query_config(tpl_display_t *display, } static tpl_result_t -__tpl_wl_vk_wsi_display_filter_config(tpl_display_t *display, +__tpl_wl_vk_display_filter_config(tpl_display_t *display, int *visual_id, int alpha_size) { @@ -852,7 +852,7 @@ __tpl_wl_vk_wsi_display_filter_config(tpl_display_t *display, } static tpl_result_t -__tpl_wl_vk_wsi_display_query_window_supported_buffer_count( +__tpl_wl_vk_display_query_window_supported_buffer_count( tpl_display_t *display, tpl_handle_t window, int *min, int *max) { @@ -871,7 +871,7 @@ __tpl_wl_vk_wsi_display_query_window_supported_buffer_count( } static tpl_result_t -__tpl_wl_vk_wsi_display_query_window_supported_present_modes( +__tpl_wl_vk_display_query_window_supported_present_modes( tpl_display_t *display, tpl_handle_t window, int *present_modes) { @@ -1162,7 +1162,7 @@ static tpl_gsource_functions surf_funcs = { static tpl_result_t -__tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) +__tpl_wl_vk_surface_init(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; @@ -1240,7 +1240,7 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) } static void -__tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) +__tpl_wl_vk_surface_fini(tpl_surface_t *surface) { tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; @@ -1288,7 +1288,7 @@ __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) } static tpl_result_t -__tpl_wl_vk_wsi_surface_set_post_interval(tpl_surface_t *surface, +__tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface, int post_interval) { tpl_wl_vk_surface_t *wl_vk_surface = NULL; @@ -1309,7 +1309,7 @@ __tpl_wl_vk_wsi_surface_set_post_interval(tpl_surface_t *surface, } static tpl_bool_t -__tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) +__tpl_wl_vk_surface_validate(tpl_surface_t *surface) { TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); @@ -1541,7 +1541,7 @@ _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) } static tpl_result_t -__tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, +__tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, int width, int height, int buffer_count, int present_mode) { @@ -1612,7 +1612,7 @@ _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface) } static tpl_result_t -__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) +__tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface) { tpl_wl_vk_swapchain_t *swapchain = NULL; tpl_wl_vk_surface_t *wl_vk_surface = NULL; @@ -1675,7 +1675,7 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) } static tpl_result_t -__tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, +__tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface, tbm_surface_h **buffers, int *buffer_count) { @@ -1884,7 +1884,7 @@ _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface, } static tbm_surface_h -__tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, +__tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, int32_t *release_fence) { @@ -1981,7 +1981,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, } static tpl_result_t -__tpl_wl_vk_wsi_surface_cancel_buffer(tpl_surface_t *surface, +__tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface) { TPL_ASSERT(surface); @@ -2026,7 +2026,7 @@ __tpl_wl_vk_wsi_surface_cancel_buffer(tpl_surface_t *surface, } static tpl_result_t -__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, +__tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface, int num_rects, const int *rects, int32_t acquire_fence) @@ -2521,44 +2521,44 @@ __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) } void -__tpl_display_init_backend_wl_vk_wsi_thread(tpl_display_backend_t *backend) +__tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend) { TPL_ASSERT(backend); backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD; backend->data = NULL; - backend->init = __tpl_wl_vk_wsi_display_init; - backend->fini = __tpl_wl_vk_wsi_display_fini; - backend->query_config = __tpl_wl_vk_wsi_display_query_config; - backend->filter_config = __tpl_wl_vk_wsi_display_filter_config; + backend->init = __tpl_wl_vk_display_init; + backend->fini = __tpl_wl_vk_display_fini; + backend->query_config = __tpl_wl_vk_display_query_config; + backend->filter_config = __tpl_wl_vk_display_filter_config; backend->query_window_supported_buffer_count = - __tpl_wl_vk_wsi_display_query_window_supported_buffer_count; + __tpl_wl_vk_display_query_window_supported_buffer_count; backend->query_window_supported_present_modes = - __tpl_wl_vk_wsi_display_query_window_supported_present_modes; + __tpl_wl_vk_display_query_window_supported_present_modes; } void -__tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) +__tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend) { TPL_ASSERT(backend); backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD; backend->data = NULL; - backend->init = __tpl_wl_vk_wsi_surface_init; - backend->fini = __tpl_wl_vk_wsi_surface_fini; - backend->validate = __tpl_wl_vk_wsi_surface_validate; + backend->init = __tpl_wl_vk_surface_init; + backend->fini = __tpl_wl_vk_surface_fini; + backend->validate = __tpl_wl_vk_surface_validate; backend->cancel_dequeued_buffer = - __tpl_wl_vk_wsi_surface_cancel_buffer; - backend->dequeue_buffer = __tpl_wl_vk_wsi_surface_dequeue_buffer; - backend->enqueue_buffer = __tpl_wl_vk_wsi_surface_enqueue_buffer; + __tpl_wl_vk_surface_cancel_buffer; + backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer; + backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer; backend->get_swapchain_buffers = - __tpl_wl_vk_wsi_surface_get_swapchain_buffers; - backend->create_swapchain = __tpl_wl_vk_wsi_surface_create_swapchain; - backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain; + __tpl_wl_vk_surface_get_swapchain_buffers; + backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain; + backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain; backend->set_post_interval = - __tpl_wl_vk_wsi_surface_set_post_interval; + __tpl_wl_vk_surface_set_post_interval; } static int -- 2.7.4 From 9414d6ef78ddac774296377a6e5821498c896afe Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 29 Apr 2021 10:44:19 +0900 Subject: [PATCH 11/16] Make vulkan only uses the thread backend. Change-Id: Ia6e693f04811edd4759ddd6fa322327227e6aa2e Signed-off-by: Joonbum Ko --- src/tpl.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/tpl.c b/src/tpl.c index c239b4b..f40889d 100644 --- a/src/tpl.c +++ b/src/tpl.c @@ -308,10 +308,8 @@ __tpl_display_init_backend(tpl_display_t *display, tpl_backend_type_t type) case TPL_BACKEND_WAYLAND_THREAD: __tpl_display_init_backend_wl_egl_thread(&display->backend); break; - case TPL_BACKEND_WAYLAND_VULKAN_WSI: - __tpl_display_init_backend_wayland_vk_wsi(&display->backend); - break; case TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD: + case TPL_BACKEND_WAYLAND_VULKAN_WSI: __tpl_display_init_backend_wl_vk_thread(&display->backend); break; case TPL_BACKEND_TBM: @@ -448,8 +446,6 @@ __tpl_surface_init_backend(tpl_surface_t *surface, tpl_backend_type_t type) __tpl_surface_init_backend_wl_egl_thread(&surface->backend); break; case TPL_BACKEND_WAYLAND_VULKAN_WSI: - __tpl_surface_init_backend_wayland_vk_wsi(&surface->backend); - break; case TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD: __tpl_surface_init_backend_wl_vk_thread(&surface->backend); break; -- 2.7.4 From c770bf03a919f9b52fd0da0208dc6ee017b95aee Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 29 Apr 2021 10:45:53 +0900 Subject: [PATCH 12/16] Disabled the codes that is not being used now. Change-Id: I48424781d5f3c5edafa205da7963401b60daecef Signed-off-by: Joonbum Ko --- src/tpl_internal.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/tpl_internal.h b/src/tpl_internal.h index f978f4f..97a07ef 100755 --- a/src/tpl_internal.h +++ b/src/tpl_internal.h @@ -220,8 +220,8 @@ tpl_bool_t __tpl_display_choose_backend_tbm(tpl_handle_t native_dpy); tpl_bool_t __tpl_display_choose_backend_wayland_egl(tpl_handle_t native_dpy); tpl_bool_t __tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy); tpl_bool_t __tpl_display_choose_backend_wayland_vk_wsi(tpl_handle_t native_dpy); -tpl_bool_t __tpl_display_choose_backend_x11_dri2(tpl_handle_t native_dpy); -tpl_bool_t __tpl_display_choose_backend_x11_dri3(tpl_handle_t native_dpy); +//tpl_bool_t __tpl_display_choose_backend_x11_dri2(tpl_handle_t native_dpy); //unused +//tpl_bool_t __tpl_display_choose_backend_x11_dri3(tpl_handle_t native_dpy); //unused void __tpl_display_init_backend(tpl_display_t *display, tpl_backend_type_t type); void __tpl_surface_init_backend(tpl_surface_t *surface, @@ -230,18 +230,18 @@ void __tpl_display_init_backend_tbm(tpl_display_backend_t *backend, tpl_backend_type_t type); void __tpl_display_init_backend_wayland_egl(tpl_display_backend_t *backend); void __tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend); -void __tpl_display_init_backend_wayland_vk_wsi(tpl_display_backend_t *backend); +//void __tpl_display_init_backend_wayland_vk_wsi(tpl_display_backend_t *backend); //unused void __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend); -void __tpl_display_init_backend_x11_dri2(tpl_display_backend_t *backend); -void __tpl_display_init_backend_x11_dri3(tpl_display_backend_t *backend); +//void __tpl_display_init_backend_x11_dri2(tpl_display_backend_t *backend); //unused +//void __tpl_display_init_backend_x11_dri3(tpl_display_backend_t *backend); //unused void __tpl_surface_init_backend_tbm(tpl_surface_backend_t *backend, tpl_backend_type_t type); void __tpl_surface_init_backend_wayland_egl(tpl_surface_backend_t *backend); void __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend); -void __tpl_surface_init_backend_wayland_vk_wsi(tpl_surface_backend_t *backend); +//void __tpl_surface_init_backend_wayland_vk_wsi(tpl_surface_backend_t *backend); //unused void __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend); -void __tpl_surface_init_backend_x11_dri2(tpl_surface_backend_t *backend); -void __tpl_surface_init_backend_x11_dri3(tpl_surface_backend_t *backend); +//void __tpl_surface_init_backend_x11_dri2(tpl_surface_backend_t *backend); //unused +//void __tpl_surface_init_backend_x11_dri3(tpl_surface_backend_t *backend); //unused /* OS related functions */ void __tpl_util_sys_yield(void); -- 2.7.4 From f33951ba9ecd00b5cbcb81252df77e8b5b4d3719 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 29 Apr 2021 10:48:11 +0900 Subject: [PATCH 13/16] Fix so that error log is not printed when tpl_display is reused. Change-Id: Iab3be64d490dd0e8592dc57d413877e37e107fa1 Signed-off-by: Joonbum Ko --- src/tpl_display.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/tpl_display.c b/src/tpl_display.c index a125a3f..fe7dbaf 100644 --- a/src/tpl_display.c +++ b/src/tpl_display.c @@ -31,7 +31,10 @@ tpl_display_create(tpl_backend_type_t type, tpl_handle_t native_dpy) display = __tpl_runtime_find_display(type, native_dpy); /* If tpl_display already exists, then return it */ - TPL_CHECK_ON_TRUE_RETURN_VAL(display, display); + if (display) { + TPL_LOG_F("already initialized display(%p)", display); + return display; + } /* if backend is unknown, try to find the best match from the list of supported types */ if (TPL_BACKEND_UNKNOWN == type) -- 2.7.4 From 9fff67364f948809741a869732ece2147d33a0ff Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 30 Apr 2021 15:20:04 +0900 Subject: [PATCH 14/16] Fix a problem when explicit_fence was not used. - Wayland error logs are printed due to attempt to add the buffer release listener in duplicate. Change-Id: I1c5653ea3802a5a901cbf7fa07df73f0a822afaf Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 46 ++++++++++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 643f496..67d2256 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -140,7 +140,7 @@ struct _tpl_wl_vk_buffer { tbm_surface_h tbm_surface; int bo_name; - struct wl_proxy *wl_buffer; + struct wl_buffer *wl_buffer; int dx, dy; /* position to attach to wl_surface */ int width, height; /* size to attach to wl_surface */ @@ -180,6 +180,8 @@ static int _get_tbm_surface_bo_name(tbm_surface_h tbm_surface); static void __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer); +static void +__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer); static tpl_result_t _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface); static void @@ -1205,7 +1207,7 @@ __tpl_wl_vk_surface_init(tpl_surface_t *surface) wl_vk_surface->reset = TPL_FALSE; wl_vk_surface->is_activated = TPL_FALSE; - wl_vk_surface->vblank_done = TPL_FALSE; + wl_vk_surface->vblank_done = TPL_TRUE; wl_vk_surface->render_done_cnt = 0; @@ -1764,7 +1766,7 @@ __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer) if (wl_vk_buffer->wl_buffer) { wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client, - (void *)wl_vk_buffer->wl_buffer); + wl_vk_buffer->wl_buffer); wl_vk_buffer->wl_buffer = NULL; } @@ -2098,6 +2100,10 @@ __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_NONE; } +static const struct wl_buffer_listener wl_buffer_release_listener = { + (void *)__cb_wl_buffer_release, +}; + static tpl_result_t _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface) { @@ -2134,8 +2140,7 @@ _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface) _get_tbm_surface_bo_name(tbm_surface)); if (wl_vk_buffer->wl_buffer == NULL) { - wl_vk_buffer->wl_buffer = - (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer( wl_vk_display->wl_tbm_client, tbm_surface); if (!wl_vk_buffer->wl_buffer) { @@ -2146,6 +2151,12 @@ _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface) "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)", wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface); } + + if (wl_vk_buffer->acquire_fence_fd == -1 || + wl_vk_display->use_explicit_sync == TPL_FALSE) { + wl_buffer_add_listener(wl_vk_buffer->wl_buffer, + &wl_buffer_release_listener, wl_vk_buffer); + } } if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done) @@ -2340,10 +2351,6 @@ __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) } } -static const struct wl_buffer_listener wl_buffer_release_listener = { - (void *)__cb_wl_buffer_release, -}; - static void __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, unsigned int sequence, unsigned int tv_sec, @@ -2414,17 +2421,22 @@ _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface, "wl_vk_buffer sould be not NULL"); if (wl_vk_buffer->wl_buffer == NULL) { - wl_vk_buffer->wl_buffer = - (struct wl_proxy *)wayland_tbm_client_create_buffer( - wl_vk_display->wl_tbm_client, - wl_vk_buffer->tbm_surface); + wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer( + wl_vk_display->wl_tbm_client, + wl_vk_buffer->tbm_surface); + if (wl_vk_buffer->wl_buffer && + (wl_vk_buffer->acquire_fence_fd == -1 || + wl_vk_display->use_explicit_sync == TPL_FALSE)) { + wl_buffer_add_listener(wl_vk_buffer->wl_buffer, + &wl_buffer_release_listener, wl_vk_buffer); + } } TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL, "[FATAL] Failed to create wl_buffer"); version = wl_proxy_get_version((struct wl_proxy *)wl_surface); - wl_surface_attach(wl_surface, (void *)wl_vk_buffer->wl_buffer, + wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer, wl_vk_buffer->dx, wl_vk_buffer->dy); if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) { @@ -2460,7 +2472,8 @@ _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface, } if (wl_vk_display->use_explicit_sync && - wl_vk_surface->surface_sync) { + wl_vk_surface->surface_sync && + wl_vk_buffer->acquire_fence_fd != -1) { zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync, wl_vk_buffer->acquire_fence_fd); @@ -2478,9 +2491,6 @@ _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface, wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer); TPL_DEBUG("add explicit_sync_release_listener."); } - } else { - wl_buffer_add_listener((void *)wl_vk_buffer->wl_buffer, - &wl_buffer_release_listener, wl_vk_buffer); } wl_surface_commit(wl_surface); -- 2.7.4 From 9630493a85d584705785cf71505fffb9bcd02938 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 30 Apr 2021 15:27:54 +0900 Subject: [PATCH 15/16] Back up the legacy codes of wl_vk_thread to unused. Change-Id: Ie9276cd997b89be2f558306daaee3e3eb3738be9 Signed-off-by: Joonbum Ko --- src/unused/tpl_wl_vk_thread_legacy.c | 832 +++++++++++++++++++++++++++++++++++ 1 file changed, 832 insertions(+) create mode 100644 src/unused/tpl_wl_vk_thread_legacy.c diff --git a/src/unused/tpl_wl_vk_thread_legacy.c b/src/unused/tpl_wl_vk_thread_legacy.c new file mode 100644 index 0000000..cb4f549 --- /dev/null +++ b/src/unused/tpl_wl_vk_thread_legacy.c @@ -0,0 +1,832 @@ +#define inline __inline__ +#undef inline + +#include "tpl_internal.h" + +#include +#include +#include + +#include + +#include "tpl_wayland_egl_thread.h" + +typedef struct _tpl_wayland_vk_wsi_display tpl_wayland_vk_wsi_display_t; +typedef struct _tpl_wayland_vk_wsi_surface tpl_wayland_vk_wsi_surface_t; +typedef struct _tpl_wayland_vk_wsi_buffer tpl_wayland_vk_wsi_buffer_t; + +struct _tpl_wayland_vk_wsi_display { + twe_thread *wl_thread; + twe_display_h twe_display; +}; + +struct _tpl_wayland_vk_wsi_surface { + twe_surface_h twe_surface; + tbm_surface_queue_h tbm_queue; + tbm_surface_h *swapchain_buffers; + int buffer_count; + tpl_bool_t is_activated; + tpl_bool_t reset; + tpl_util_atomic_uint swapchain_reference; +}; + +static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( + tpl_surface_t *surface); + +static TPL_INLINE tpl_bool_t +__tpl_wl_vk_wsi_display_is_wl_display(tpl_handle_t native_dpy) +{ + if (!native_dpy) return TPL_FALSE; + + if (twe_check_native_handle_is_wl_display(native_dpy)) + return TPL_TRUE; + + return TPL_FALSE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_display_init(tpl_display_t *display) +{ + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + + TPL_ASSERT(display); + + /* Do not allow default display in wayland */ + if (!display->native_handle) { + TPL_ERR("Invalid native handle for display."); + return TPL_ERROR_INVALID_PARAMETER; + } + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) calloc(1, + sizeof(tpl_wayland_vk_wsi_display_t)); + if (!wayland_vk_wsi_display) { + TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_display_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + display->backend.data = wayland_vk_wsi_display; + + if (twe_check_native_handle_is_wl_display(display->native_handle)) { + wayland_vk_wsi_display->wl_thread = twe_thread_create(); + if (!wayland_vk_wsi_display->wl_thread) { + TPL_ERR("Failed to create twe_thread."); + goto free_display; + } + + wayland_vk_wsi_display->twe_display = + twe_display_add(wayland_vk_wsi_display->wl_thread, + display->native_handle, + display->backend.type); + if (!wayland_vk_wsi_display->twe_display) { + TPL_ERR("Failed to add native_display(%p) to thread(%p)", + display->native_handle, + wayland_vk_wsi_display->wl_thread); + goto free_display; + } + + } else { + TPL_ERR("Invalid native handle for display."); + goto free_display; + } + + TPL_LOG_T("WL_VK", + "[INIT DISPLAY] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)", + wayland_vk_wsi_display, + wayland_vk_wsi_display->wl_thread, + wayland_vk_wsi_display->twe_display); + + return TPL_ERROR_NONE; + +free_display: + if (wayland_vk_wsi_display) { + if (wayland_vk_wsi_display->twe_display) + twe_display_del(wayland_vk_wsi_display->twe_display); + if (wayland_vk_wsi_display->wl_thread) + twe_thread_destroy(wayland_vk_wsi_display->wl_thread); + + wayland_vk_wsi_display->wl_thread = NULL; + wayland_vk_wsi_display->twe_display = NULL; + + free(wayland_vk_wsi_display); + display->backend.data = NULL; + } + + return TPL_ERROR_INVALID_OPERATION; +} + +static void +__tpl_wl_vk_wsi_display_fini(tpl_display_t *display) +{ + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display; + + TPL_ASSERT(display); + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + if (wayland_vk_wsi_display) { + + TPL_LOG_T("WL_VK", + "[FINI] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)", + wayland_vk_wsi_display, + wayland_vk_wsi_display->wl_thread, + wayland_vk_wsi_display->twe_display); + + if (wayland_vk_wsi_display->twe_display) { + tpl_result_t ret = TPL_ERROR_NONE; + ret = twe_display_del(wayland_vk_wsi_display->twe_display); + if (ret != TPL_ERROR_NONE) + TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)", + wayland_vk_wsi_display->twe_display, + wayland_vk_wsi_display->wl_thread); + wayland_vk_wsi_display->twe_display = NULL; + } + + if (wayland_vk_wsi_display->wl_thread) { + twe_thread_destroy(wayland_vk_wsi_display->wl_thread); + wayland_vk_wsi_display->wl_thread = NULL; + } + + free(wayland_vk_wsi_display); + } + display->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_vk_wsi_display_query_config(tpl_display_t *display, + tpl_surface_type_t surface_type, + int red_size, int green_size, + int blue_size, int alpha_size, + int color_depth, int *native_visual_id, + tpl_bool_t *is_slow) +{ + TPL_ASSERT(display); + + if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 && + green_size == 8 && blue_size == 8 && + (color_depth == 32 || color_depth == 24)) { + + if (alpha_size == 8) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + if (alpha_size == 0) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + } + + return TPL_ERROR_INVALID_PARAMETER; +} + +static tpl_result_t +__tpl_wl_vk_wsi_display_filter_config(tpl_display_t *display, + int *visual_id, + int alpha_size) +{ + TPL_IGNORE(display); + TPL_IGNORE(visual_id); + TPL_IGNORE(alpha_size); + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_display_query_window_supported_buffer_count( + tpl_display_t *display, + tpl_handle_t window, int *min, int *max) +{ + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_result_t res = TPL_ERROR_NONE; + + TPL_ASSERT(display); + TPL_ASSERT(window); + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + + if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; + + res = twe_display_get_buffer_count(wayland_vk_wsi_display->twe_display, + min, max); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to query buffer count. twe_display(%p)", + wayland_vk_wsi_display->twe_display); + return res; + } + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_display_query_window_supported_present_modes( + tpl_display_t *display, + tpl_handle_t window, int *modes) +{ + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_result_t res = TPL_ERROR_NONE; + + TPL_ASSERT(display); + TPL_ASSERT(window); + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + + if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; + + if (modes) { + res = twe_display_get_present_mode(wayland_vk_wsi_display->twe_display, + modes); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to query present modes. twe_display(%p)", + wayland_vk_wsi_display->twe_display); + return res; + } + } + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + twe_surface_h twe_surface = NULL; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); + TPL_ASSERT(surface->native_handle); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) calloc(1, + sizeof(tpl_wayland_vk_wsi_surface_t)); + if (!wayland_vk_wsi_surface) { + TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_surface_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + wayland_vk_wsi_display = + (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + if (!wayland_vk_wsi_display) { + TPL_ERR("Invalid parameter. wayland_vk_wsi_display(%p)", + wayland_vk_wsi_display); + free(wayland_vk_wsi_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + surface->backend.data = (void *)wayland_vk_wsi_surface; + wayland_vk_wsi_surface->tbm_queue = NULL; + + twe_surface = twe_surface_add(wayland_vk_wsi_display->wl_thread, + wayland_vk_wsi_display->twe_display, + surface->native_handle, + surface->format, surface->num_buffers); + if (!twe_surface) { + TPL_ERR("Failed to add native_surface(%p) to thread(%p)", + surface->native_handle, wayland_vk_wsi_display->wl_thread); + free(wayland_vk_wsi_surface); + surface->backend.data = NULL; + return TPL_ERROR_OUT_OF_MEMORY; + } + + wayland_vk_wsi_surface->twe_surface = twe_surface; + wayland_vk_wsi_surface->is_activated = TPL_FALSE; + wayland_vk_wsi_surface->swapchain_buffers = NULL; + + TPL_LOG_T("WL_VK", + "[INIT]tpl_surface(%p) tpl_wayland_vk_wsi_surface(%p) twe_surface(%p)", + surface, wayland_vk_wsi_surface, twe_surface); + + return TPL_ERROR_NONE; +} + +static void +__tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + if (wayland_vk_wsi_surface == NULL) return; + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) + surface->display->backend.data; + if (wayland_vk_wsi_display == NULL) return; + + if (wayland_vk_wsi_surface->tbm_queue) + __tpl_wl_vk_wsi_surface_destroy_swapchain(surface); + + if (wayland_vk_wsi_surface->swapchain_buffers) { + free(wayland_vk_wsi_surface->swapchain_buffers); + wayland_vk_wsi_surface->swapchain_buffers = NULL; + } + + TPL_LOG_T("WL_VK", + "[FINI] wayland_vk_wsi_surface(%p) native_surface(%p) twe_surface(%p)", + wayland_vk_wsi_surface, surface->native_handle, + wayland_vk_wsi_surface->twe_surface); + + if (twe_surface_del(wayland_vk_wsi_surface->twe_surface) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", + wayland_vk_wsi_surface->twe_surface, + wayland_vk_wsi_display->wl_thread); + } + + wayland_vk_wsi_surface->twe_surface = NULL; + + free(wayland_vk_wsi_surface); + surface->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, + tbm_fd sync_fence) +{ + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->display->native_handle); + TPL_ASSERT(tbm_surface); + + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = + (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + tbm_surface_queue_error_e tsq_err; + + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", + tbm_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + /* If there are received region information, + * save it to buf_info in tbm_surface user_data using below API. */ + if (num_rects && rects) { + tpl_result_t ret = TPL_ERROR_NONE; + ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects); + if (ret != TPL_ERROR_NONE) { + TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)", + num_rects, rects); + } + } + tsq_err = tbm_surface_queue_enqueue(wayland_vk_wsi_surface->tbm_queue, + tbm_surface); + if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) { + tbm_surface_internal_unref(tbm_surface); + } else { + TPL_ERR("Failed to enqeueue tbm_surface. | tsq_err = %d", tsq_err); + return TPL_ERROR_INVALID_OPERATION; + } + + if (sync_fence != -1) { + tpl_result_t res = TPL_ERROR_NONE; + res = twe_surface_set_sync_fd(wayland_vk_wsi_surface->twe_surface, + tbm_surface, sync_fence); + if (res != TPL_ERROR_NONE) { + TPL_WARN("Failed to set sync_fd(%d). Fallback to async mode.", + sync_fence); + } + } + + TPL_LOG_T("WL_VK", "[ENQ] tbm_surface(%p) bo(%d) sync_fence(%d)", + tbm_surface, + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)), + sync_fence); + + return TPL_ERROR_NONE; +} + +static tpl_bool_t +__tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = + (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + + return !(wayland_vk_wsi_surface->reset); +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + if (!wayland_vk_wsi_surface) { + TPL_ERR("Invalid backend surface. surface(%p) wayland_vk_wsi_surface(%p)", + surface, wayland_vk_wsi_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + tbm_surface_internal_unref(tbm_surface); + + tsq_err = tbm_surface_queue_cancel_dequeue(wayland_vk_wsi_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_OPERATION; + } + + TPL_LOG_T("WL_VK", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)", + surface, tbm_surface); + + return TPL_ERROR_NONE; +} + +static tbm_surface_h +__tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, + uint64_t timeout_ns, + tbm_fd *sync_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + + tbm_surface_h tbm_surface = NULL; + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = + (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = + (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + tbm_surface_queue_error_e tsq_err = 0; + tpl_result_t lock_res = TPL_ERROR_NONE; + tpl_result_t res = TPL_ERROR_NONE; + + if (sync_fence) + *sync_fence = -1; + + TPL_OBJECT_UNLOCK(surface); + TRACE_BEGIN("WAIT_DEQUEUEABLE"); + lock_res = twe_display_lock(wayland_vk_wsi_display->twe_display); + res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface, + timeout_ns); + TRACE_END(); + TPL_OBJECT_LOCK(surface); + + if (res == TPL_ERROR_TIME_OUT) { + TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")", + timeout_ns); + if (lock_res == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return NULL; + } else if (res != TPL_ERROR_NONE) { + TPL_ERR("Invalid operation. twe_surface(%p) timeout_ns(%" PRIu64 ")", + wayland_vk_wsi_surface->twe_surface, timeout_ns); + if (lock_res == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return NULL; + } + + if (wayland_vk_wsi_surface->reset) { + TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.", + wayland_vk_wsi_surface->tbm_queue); + if (lock_res == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return NULL; + } + + + tsq_err = tbm_surface_queue_dequeue(wayland_vk_wsi_surface->tbm_queue, + &tbm_surface); + if (!tbm_surface) { + TPL_ERR("Failed to get tbm_surface from tbm_surface_queue(%p) | tsq_err = %d", + wayland_vk_wsi_surface->tbm_queue, tsq_err); + if (lock_res == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return NULL; + } + + tbm_surface_internal_ref(tbm_surface); + + if (sync_fence) { + *sync_fence = twe_surface_create_sync_fd(tbm_surface); + } + + TPL_LOG_T("WL_VK", "[DEQ] tbm_queue(%p) tbm_surface(%p) bo(%d)", + wayland_vk_wsi_surface->tbm_queue, tbm_surface, + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + + if (lock_res == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); + + return tbm_surface; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, + tbm_surface_h **buffers, + int *buffer_count) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + int i; + tpl_result_t ret = TPL_ERROR_NONE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->display->backend.data); + TPL_ASSERT(buffers); + TPL_ASSERT(buffer_count); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + + if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { + ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, + NULL, buffer_count); + if (ret != TPL_ERROR_NONE) { + TPL_ERR("Failed to get buffer_count. twe_surface(%p)", + wayland_vk_wsi_surface->twe_surface); + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return ret; + } + + wayland_vk_wsi_surface->swapchain_buffers = (tbm_surface_h *)calloc( + *buffer_count, + sizeof(tbm_surface_h)); + if (!wayland_vk_wsi_surface->swapchain_buffers) { + TPL_ERR("Failed to allocate memory for buffers."); + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return TPL_ERROR_OUT_OF_MEMORY; + } + + ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, + wayland_vk_wsi_surface->swapchain_buffers, + buffer_count); + if (ret != TPL_ERROR_NONE) { + TPL_ERR("Failed to get swapchain_buffers. wayland_vk_wsi_surface(%p) twe_surface(%p)", + wayland_vk_wsi_surface, wayland_vk_wsi_surface->twe_surface); + free(wayland_vk_wsi_surface->swapchain_buffers); + wayland_vk_wsi_surface->swapchain_buffers = NULL; + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return ret; + } + + for (i = 0; i < *buffer_count; i++) { + if (wayland_vk_wsi_surface->swapchain_buffers[i]) { + TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)", + i, wayland_vk_wsi_surface->swapchain_buffers[i], + tbm_bo_export(tbm_surface_internal_get_bo( + wayland_vk_wsi_surface->swapchain_buffers[i], 0))); + tbm_surface_internal_ref(wayland_vk_wsi_surface->swapchain_buffers[i]); + } + } + + *buffers = wayland_vk_wsi_surface->swapchain_buffers; + + twe_display_unlock(wayland_vk_wsi_display->twe_display); + } + + return TPL_ERROR_NONE; +} + +static void +__cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue, + void *data) +{ + tpl_surface_t *surface = NULL; + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_bool_t is_activated = TPL_FALSE; + + surface = (tpl_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(surface); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wayland_vk_wsi_surface); + + /* When queue_reset_callback is called, if is_activated is different from + * its previous state change the reset flag to TPL_TRUE to get a new buffer + * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ + is_activated = twe_surface_check_activated(wayland_vk_wsi_surface->twe_surface); + + if (wayland_vk_wsi_surface->is_activated != is_activated) { + if (is_activated) { + TPL_LOG_T("WL_VK", + "[ACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", + wayland_vk_wsi_surface, surface_queue); + } else { + TPL_LOG_T("WL_VK", + "[DEACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", + wayland_vk_wsi_surface, surface_queue); + } + wayland_vk_wsi_surface->is_activated = is_activated; + } + + wayland_vk_wsi_surface->reset = TPL_TRUE; + + if (surface->reset_cb) + surface->reset_cb(surface->reset_data); +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, + tbm_format format, int width, + int height, int buffer_count, int present_mode) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_result_t res = TPL_ERROR_NONE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + TPL_ASSERT(wayland_vk_wsi_surface); + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) + surface->display->backend.data; + TPL_ASSERT(wayland_vk_wsi_display); + + if (wayland_vk_wsi_surface->tbm_queue) { + int old_width = tbm_surface_queue_get_width(wayland_vk_wsi_surface->tbm_queue); + int old_height = tbm_surface_queue_get_height(wayland_vk_wsi_surface->tbm_queue); + + if (old_width != width || old_height != height) { + tbm_surface_queue_reset(wayland_vk_wsi_surface->tbm_queue, + width, height, format); + TPL_LOG_T("WL_VK", + "[RESIZE] wayland_vk_wsi_surface(%p) tbm_queue(%p), (%d x %d) -> (%d x %d)", + wayland_vk_wsi_surface, wayland_vk_wsi_surface->tbm_queue, + old_width, old_height, width, height); + } + + if (wayland_vk_wsi_surface->swapchain_buffers) { + int i; + for (i = 0; i < wayland_vk_wsi_surface->buffer_count; i++) { + if (wayland_vk_wsi_surface->swapchain_buffers[i]) { + TPL_DEBUG("unref tbm_surface(%p)", wayland_vk_wsi_surface->swapchain_buffers[i]); + tbm_surface_internal_unref(wayland_vk_wsi_surface->swapchain_buffers[i]); + wayland_vk_wsi_surface->swapchain_buffers[i] = NULL; + } + } + + free(wayland_vk_wsi_surface->swapchain_buffers); + wayland_vk_wsi_surface->swapchain_buffers = NULL; + } + + wayland_vk_wsi_surface->buffer_count = + tbm_surface_queue_get_size(wayland_vk_wsi_surface->tbm_queue); + wayland_vk_wsi_surface->reset = TPL_FALSE; + + __tpl_util_atomic_inc(&wayland_vk_wsi_surface->swapchain_reference); + + TPL_LOG_T("WL_VK", "[REUSE] wayland_vk_wsi_surface(%p) tbm_queue(%p) size(%d)", + wayland_vk_wsi_surface, wayland_vk_wsi_surface->tbm_queue, + wayland_vk_wsi_surface->buffer_count); + return TPL_ERROR_NONE; + } + + res = twe_surface_create_swapchain(wayland_vk_wsi_surface->twe_surface, + width, height, format, + buffer_count, present_mode); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to create swapchain. twe_surface(%p)", + wayland_vk_wsi_surface->twe_surface); + return res; + } + + wayland_vk_wsi_surface->tbm_queue = twe_surface_get_tbm_queue( + wayland_vk_wsi_surface->twe_surface); + + /* Set reset_callback to tbm_queue */ + if (tbm_surface_queue_add_reset_cb(wayland_vk_wsi_surface->tbm_queue, + __cb_tbm_queue_reset_callback, + (void *)surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("TBM surface queue add reset cb failed!"); + twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); + wayland_vk_wsi_surface->tbm_queue = NULL; + return TPL_ERROR_INVALID_OPERATION; + } + + wayland_vk_wsi_surface->buffer_count = buffer_count; + wayland_vk_wsi_surface->reset = TPL_FALSE; + + __tpl_util_atomic_set(&wayland_vk_wsi_surface->swapchain_reference, 1); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_result_t res = TPL_ERROR_NONE; + unsigned int ref; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->display->backend.data); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) surface->display->backend.data; + + if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { + ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference); + if (ref > 0) { + TPL_LOG_T("WL_VK", + "This swapchain is still valid. | twe_surface(%p)", + wayland_vk_wsi_surface->twe_surface); + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return TPL_ERROR_NONE; + } + + + if (wayland_vk_wsi_surface->reset) { + TPL_LOG_T("WL_VK", + "Since reset is in the TRUE state, it will not be destroyed."); + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return TPL_ERROR_NONE; + } + + if (wayland_vk_wsi_surface->swapchain_buffers) { + int i; + for (i = 0; i < wayland_vk_wsi_surface->buffer_count; i++) { + TPL_DEBUG("Stop tracking tbm_surface(%p)", + wayland_vk_wsi_surface->swapchain_buffers[i]); + tbm_surface_internal_unref(wayland_vk_wsi_surface->swapchain_buffers[i]); + wayland_vk_wsi_surface->swapchain_buffers[i] = NULL; + } + + free(wayland_vk_wsi_surface->swapchain_buffers); + wayland_vk_wsi_surface->swapchain_buffers = NULL; + } + + res = twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", + wayland_vk_wsi_surface->twe_surface); + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return res; + } + + wayland_vk_wsi_surface->tbm_queue = NULL; + + twe_display_unlock(wayland_vk_wsi_display->twe_display); + } + + return TPL_ERROR_NONE; +} + +tpl_bool_t +__tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) +{ + if (!native_dpy) return TPL_FALSE; + + if (twe_check_native_handle_is_wl_display(native_dpy)) + return TPL_TRUE; + + return TPL_FALSE; +} + +void +__tpl_display_init_backend_wl_vk_wsi_thread(tpl_display_backend_t *backend) +{ + TPL_ASSERT(backend); + + backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD; + backend->data = NULL; + + backend->init = __tpl_wl_vk_wsi_display_init; + backend->fini = __tpl_wl_vk_wsi_display_fini; + backend->query_config = __tpl_wl_vk_wsi_display_query_config; + backend->filter_config = __tpl_wl_vk_wsi_display_filter_config; + backend->query_window_supported_buffer_count = + __tpl_wl_vk_wsi_display_query_window_supported_buffer_count; + backend->query_window_supported_present_modes = + __tpl_wl_vk_wsi_display_query_window_supported_present_modes; +} + +void +__tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) +{ + TPL_ASSERT(backend); + + backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD; + backend->data = NULL; + + backend->init = __tpl_wl_vk_wsi_surface_init; + backend->fini = __tpl_wl_vk_wsi_surface_fini; + backend->validate = __tpl_wl_vk_wsi_surface_validate; + backend->cancel_dequeued_buffer = + __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer; + backend->dequeue_buffer = __tpl_wl_vk_wsi_surface_dequeue_buffer; + backend->enqueue_buffer = __tpl_wl_vk_wsi_surface_enqueue_buffer; + backend->get_swapchain_buffers = + __tpl_wl_vk_wsi_surface_get_swapchain_buffers; + backend->create_swapchain = __tpl_wl_vk_wsi_surface_create_swapchain; + backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain; +} -- 2.7.4 From 0066d54f58bb8c77493108d0f02ce5c77d40ebc8 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 30 Apr 2021 15:36:33 +0900 Subject: [PATCH 16/16] Package version up to 1.8.5 Change-Id: Ie10549ac47f7c15bc87ac8a80b3bd07d217c3a04 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index 8a81e7f..9649fad 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 8 -%define TPL_VERSION_PATCH 4 +%define TPL_VERSION_PATCH 5 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4