From f05d29898d4ef28372a701b7f464c1d6e6c70c45 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 11 Nov 2019 13:32:48 +0900 Subject: [PATCH 01/16] wayland-egl-tizen: Added new API to create render sync fd. New API wl_egl_window_tizen_get_render_sync_fd(struct wl_egl_window*) /** * Create a sync fence fd that can tell render done. * * If eglSwapBuffers works async, it returns fd which tells * when the render job is finished. * This fd can wait asynchronously via poll or select. * * Important * * This requires the following premise: * - After ddk calls libplpl-egl's tpl_surface_dequeue_buffer to get the buffer, * and until it calls tpl_surface_enqueue_buffer, * it is called the gpu rendering job interval. * - Therefore, when using the dma_buf implicit fence, * there is no guarantee that the rendering job is finished * with the fence obtained through this API. * * The fence_fd obtained through this function is one-time available, * can not be reused, so caller must close it when finished using it. * * @param egl_window handle to wl_egl_window. * @return sync fd on success, -1 on failure. */ Change-Id: Ia19f88108cfe8d0e5e6477acbd83a2df173f5507 Signed-off-by: Joonbum Ko --- src/wayland-egl-tizen/wayland-egl-tizen-priv.h | 2 ++ src/wayland-egl-tizen/wayland-egl-tizen.c | 24 +++++++++++++++++++++++- src/wayland-egl-tizen/wayland-egl-tizen.h | 26 ++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 1 deletion(-) diff --git a/src/wayland-egl-tizen/wayland-egl-tizen-priv.h b/src/wayland-egl-tizen/wayland-egl-tizen-priv.h index 5b65a70..69df3bc 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen-priv.h +++ b/src/wayland-egl-tizen/wayland-egl-tizen-priv.h @@ -22,6 +22,7 @@ struct tizen_private { int (*get_rotation_capability)(struct wl_egl_window *, void *); void (*set_frontbuffer_callback)(struct wl_egl_window *, void *, int); void (*set_window_serial_callback)(struct wl_egl_window *, void *, unsigned int); + int (*create_render_sync_fd)(struct wl_egl_window *, void *); }; static struct tizen_private* tizen_private_create() @@ -40,6 +41,7 @@ static struct tizen_private* tizen_private_create() private->get_rotation_capability = NULL; private->set_window_serial_callback = NULL; private->set_frontbuffer_callback = NULL; + private->create_render_sync_fd = NULL; } return private; diff --git a/src/wayland-egl-tizen/wayland-egl-tizen.c b/src/wayland-egl-tizen/wayland-egl-tizen.c index 4c127bc..888f8af 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen.c +++ b/src/wayland-egl-tizen/wayland-egl-tizen.c @@ -250,4 +250,26 @@ wl_egl_window_tizen_set_window_serial(struct wl_egl_window *egl_window, if (private->set_window_serial_callback) private->set_window_serial_callback(egl_window, egl_window->driver_private, serial); -} \ No newline at end of file +} + +int +wl_egl_window_tizen_create_render_sync_fd(struct wl_egl_window *egl_window) +{ + struct tizen_private *private = NULL; + + if (egl_window == NULL) { + WL_EGL_ERR("egl_window is NULL"); + return -1; + } + + private = egl_window->driver_private; + if (private == NULL) { + WL_EGL_ERR("wl_egl_window(%p) dirver_private is NULL", egl_window); + return -1; + } + + if (private->create_render_sync_fd) + return private->create_render_sync_fd(egl_window, egl_window->driver_private); + + return -1; +} diff --git a/src/wayland-egl-tizen/wayland-egl-tizen.h b/src/wayland-egl-tizen/wayland-egl-tizen.h index b305e27..96127a0 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen.h +++ b/src/wayland-egl-tizen/wayland-egl-tizen.h @@ -73,6 +73,32 @@ void wl_egl_window_tizen_set_window_serial(struct wl_egl_window *egl_window, unsigned int serial); +/* temporary APIs for testing sync feature */ +/** + * Create a sync fence fd that can tell render done. + * + * If eglSwapBuffers works async, it returns fd which tells + * when the render job is finished. + * This fd can wait asynchronously via poll or select. + * + * Important * + * This requires the following premise: + * - After ddk calls libplpl-egl's tpl_surface_dequeue_buffer to get the buffer, + * and until it calls tpl_surface_enqueue_buffer, + * it is called the gpu rendering job interval. + * - Therefore, when using the dma_buf implicit fence, + * there is no guarantee that the rendering job is finished + * with the fence obtained through this API. + * + * The fence_fd obtained through this function is one-time available, + * can not be reused, so caller must close it when finished using it. + * + * @param egl_window handle to wl_egl_window. + * @return sync fd on success, -1 on failure. + */ +int +wl_egl_window_tizen_create_render_sync_fd(struct wl_egl_window *egl_window); + #ifdef __cplusplus } #endif -- 2.7.4 From 94f58f690824087365bf7835f7d5d4859e772063 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 12 Nov 2019 11:27:07 +0900 Subject: [PATCH 02/16] tpl_wayland_egl_thread: Implemented the render sync feature. - Create a timeline in surface units and create a sync fence to pass to the user. - The sync fence user get is signaled at the time of tpl surface enqueue. - This feature can help user sync by telling gpu render done, but this feature alone does not guarantee sync. Therefore, users should handle this sync_fence properly using poll or select. Change-Id: If1c8a894fc8e103141b88d4479274f926753e52f Signed-off-by: Joonbum Ko --- src/tpl_wayland_egl_thread.c | 57 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index dca6597..101cf8f 100755 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -78,6 +78,7 @@ struct _twe_wl_disp_source { twe_del_source *disp_del_source; twe_thread *thread; GMutex wl_event_mutex; + /* TODO : surface list */ }; @@ -120,6 +121,10 @@ struct _twe_wl_surf_source { twe_wl_disp_source *disp_source; twe_del_source *surf_del_source; + tbm_fd render_sync_timeline; + int render_sync_timestamp; + unsigned int render_sync_fence_number; + GMutex surf_mutex; GMutex free_queue_mutex; @@ -1221,6 +1226,41 @@ __cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window, } } +static int +__cb_create_render_sync_fd(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + struct tizen_private *tizen_private = (struct tizen_private *)private; + twe_wl_surf_source *surf_source = NULL; + + tbm_fd render_sync_fd = -1; + + surf_source = (twe_wl_surf_source *)tizen_private->data; + if (!surf_source) { + TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source); + return -1; + } + + if (surf_source->render_sync_timeline != -1) { + char name[32]; + snprintf(name, 32, "%u", surf_source->render_sync_fence_number++); + render_sync_fd = tbm_sync_fence_create(surf_source->render_sync_timeline, + name, + surf_source->render_sync_timestamp + 1); + TPL_DEBUG("[RENDER_SYNC] surf_source(%p) timeline(%d) timestamp(%d) name(%s) sync_fence(%d)", + surf_source, surf_source->render_sync_timeline, surf_source->render_sync_timestamp, + name, render_sync_fd); + + TRACE_ASYNC_BEGIN(surf_source->render_sync_timestamp + 1, "[SYNC_FENCE]"); + + return render_sync_fd; + } + + return -1; +} + static void __cb_tss_flusher_flush_callback(void *data, struct tizen_surface_shm_flusher *tss_flusher) @@ -1554,6 +1594,18 @@ _twe_surface_trace_enqueue_buffer(twe_wl_surf_source *surf_source, return; } + if (surf_source->render_sync_timeline != -1) { + + surf_source->render_sync_timestamp++; + TRACE_ASYNC_END(surf_source->render_sync_timestamp, "[SYNC_FENCE]"); + + TPL_DEBUG("[RENDER_SYNC][INC] surf_source(%p) timeline(%d) timestamp(%d)", + surf_source, surf_source->render_sync_timeline, surf_source->render_sync_timestamp); + if (!tbm_sync_timeline_inc(surf_source->render_sync_timeline, 1)) { + TPL_ERR("Failed to increase timeline(%d)", surf_source->render_sync_timeline); + } + } + if (surf_source->in_use_buffers) { g_mutex_lock(&surf_source->surf_mutex); /* Stop tracking of this canceled tbm_surface */ @@ -2502,6 +2554,10 @@ twe_surface_add(twe_thread* thread, source->post_interval = 1; + source->render_sync_timeline = tbm_sync_timeline_create(); + source->render_sync_timestamp = 0; + source->render_sync_fence_number = 0; + if (!disp_source->is_vulkan_dpy) { struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)native_handle; @@ -2521,6 +2577,7 @@ twe_surface_add(twe_thread* thread, __cb_get_rotation_capability; private->set_window_serial_callback = (void *) __cb_set_window_serial_callback; + private->create_render_sync_fd = (void *)__cb_create_render_sync_fd; source->latest_transform = private->transform; -- 2.7.4 From 76696ef8252c097185b333aa0472fa3e89f72f5e Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 13 Nov 2019 11:40:23 +0900 Subject: [PATCH 03/16] wayland-egl-tizen: Added new API to get presentation sync fd. /** * Create a sync fence fd that can tell presentation done. * * It returns fd which tells when the presentation is finished. * This fd can wait asynchronously via poll or select. * * Important * * This fence lets caller knows when wl_surface received PRESENT or DISCARD * events from the server. * In case of receiving DISCARD, it is not actually displayed on the display, * but it releases the fence like PRESENT. * In most cases that are not complicated, attached buffer will not be discarded. * * @param egl_window handle to wl_egl_window. * @return sync fd on success, -1 on failure. */ int wl_egl_window_tizen_create_presentation_sync_fd(struct wl_egl_window *egl_window) Change-Id: I9c69dea4b78aba34db284b25dd47dd0544672bb4 Signed-off-by: Joonbum Ko --- src/wayland-egl-tizen/wayland-egl-tizen-priv.h | 2 ++ src/wayland-egl-tizen/wayland-egl-tizen.c | 22 ++++++++++++++++++++++ src/wayland-egl-tizen/wayland-egl-tizen.h | 19 +++++++++++++++++++ 3 files changed, 43 insertions(+) diff --git a/src/wayland-egl-tizen/wayland-egl-tizen-priv.h b/src/wayland-egl-tizen/wayland-egl-tizen-priv.h index 69df3bc..6fd31ea 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen-priv.h +++ b/src/wayland-egl-tizen/wayland-egl-tizen-priv.h @@ -23,6 +23,7 @@ struct tizen_private { void (*set_frontbuffer_callback)(struct wl_egl_window *, void *, int); void (*set_window_serial_callback)(struct wl_egl_window *, void *, unsigned int); int (*create_render_sync_fd)(struct wl_egl_window *, void *); + int (*create_presentation_sync_fd)(struct wl_egl_window *, void *); }; static struct tizen_private* tizen_private_create() @@ -42,6 +43,7 @@ static struct tizen_private* tizen_private_create() private->set_window_serial_callback = NULL; private->set_frontbuffer_callback = NULL; private->create_render_sync_fd = NULL; + private->create_presentation_sync_fd = NULL; } return private; diff --git a/src/wayland-egl-tizen/wayland-egl-tizen.c b/src/wayland-egl-tizen/wayland-egl-tizen.c index 888f8af..831951c 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen.c +++ b/src/wayland-egl-tizen/wayland-egl-tizen.c @@ -273,3 +273,25 @@ wl_egl_window_tizen_create_render_sync_fd(struct wl_egl_window *egl_window) return -1; } + +int +wl_egl_window_tizen_create_presentation_sync_fd(struct wl_egl_window *egl_window) +{ + struct tizen_private *private = NULL; + + if (egl_window == NULL) { + WL_EGL_ERR("egl_window is NULL"); + return -1; + } + + private = egl_window->driver_private; + if (private == NULL) { + WL_EGL_ERR("wl_egl_window(%p) dirver_private is NULL", egl_window); + return -1; + } + + if (private->create_presentation_sync_fd) + return private->create_presentation_sync_fd(egl_window, egl_window->driver_private); + + return -1; +} diff --git a/src/wayland-egl-tizen/wayland-egl-tizen.h b/src/wayland-egl-tizen/wayland-egl-tizen.h index 96127a0..00b8b75 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen.h +++ b/src/wayland-egl-tizen/wayland-egl-tizen.h @@ -99,6 +99,25 @@ wl_egl_window_tizen_set_window_serial(struct wl_egl_window *egl_window, int wl_egl_window_tizen_create_render_sync_fd(struct wl_egl_window *egl_window); +/** + * Create a sync fence fd that can tell presentation done. + * + * It returns fd which tells when the presentation is finished. + * This fd can wait asynchronously via poll or select. + * + * Important * + * This fence lets caller knows when wl_surface received PRESENT or DISCARD + * events from the server. + * In case of receiving DISCARD, it is not actually displayed on the display, + * but it releases the fence like PRESENT. + * In most cases that are not complicated, attached buffer will not be discarded. + * + * @param egl_window handle to wl_egl_window. + * @return sync fd on success, -1 on failure. + */ +int +wl_egl_window_tizen_create_presentation_sync_fd(struct wl_egl_window *egl_window); + #ifdef __cplusplus } #endif -- 2.7.4 From e32a763836f8e61b9c3a00e62a975193e761fae6 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 14 Nov 2019 19:31:42 +0900 Subject: [PATCH 04/16] tpl_wayland_egl_thread: Implemented the presentation sync feature. - Create a timeline in surface and create a sync fence to pass to the user. - The sync fence user get is signaled at the time of presentation done. - This feature can help user sync by telling presentation done. Users should handle this sync_fence properly using poll or select. - This patch depends on wayland-extension and enlightenment. Change-Id: I180ac6b23cc89ed3f7f0ebfc934893efa873dd6a Signed-off-by: Joonbum Ko --- configure.ac | 2 +- packaging/libtpl-egl.spec | 1 + src/tpl_wayland_egl_thread.c | 175 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 177 insertions(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 077366e..7a761bb 100644 --- a/configure.ac +++ b/configure.ac @@ -60,7 +60,7 @@ AC_ARG_WITH([wayland], [with_wayland=yes]) AS_IF([test "${with_wayland}" = "yes" || test "${with_wayland}" = "1"], - [PKG_CHECK_MODULES([TPL_WL], [libtdm-client wayland-tbm-client wayland-tbm-server tizen-surface-client glib-2.0 wayland-egl wayland-egl-backend]) + [PKG_CHECK_MODULES([TPL_WL], [libtdm-client wayland-tbm-client wayland-tbm-server tizen-surface-client glib-2.0 wayland-egl presentation-time-client wayland-egl-backend]) TPL_CFLAGS+="$TPL_WL_CFLAGS" TPL_CFLAGS+=" -DTPL_WINSYS_WL=1 " TPL_LIBS+="$TPL_WL_LIBS"], diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index 7b4b3eb..b49148a 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -79,6 +79,7 @@ BuildRequires: pkgconfig(wayland-egl-backend) BuildRequires: pkgconfig(wayland-tbm-client) BuildRequires: pkgconfig(wayland-tbm-server) BuildRequires: pkgconfig(tizen-surface-client) +BuildRequires: pkgconfig(presentation-time-client) BuildRequires: pkgconfig(glib-2.0) %endif diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 101cf8f..f7fb447 100755 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "tpl_utils.h" #include "tpl_internal.h" @@ -67,6 +68,7 @@ struct _twe_wl_disp_source { struct wl_event_queue *ev_queue; struct wayland_tbm_client *wl_tbm_client; struct tizen_surface_shm *tss; /* used for surface buffer_flush */ + struct wp_presentation *presentation; struct { int min_buffer; int max_buffer; @@ -125,6 +127,12 @@ struct _twe_wl_surf_source { int render_sync_timestamp; unsigned int render_sync_fence_number; + tbm_fd presentation_sync_timeline; + int presentation_sync_timestamp; + int presentation_sync_ts_backup; + int presentation_sync_req_cnt; + GMutex pst_mutex; + GMutex surf_mutex; GMutex free_queue_mutex; @@ -788,6 +796,13 @@ __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, &wayland_vulkan_interface, version); } + + if (!strcmp(interface, wp_presentation_interface.name)) { + disp_source->presentation = + wl_registry_bind(wl_registry, + name, &wp_presentation_interface, 1); + TPL_DEBUG("bind wp_presentation_interface"); + } } void @@ -867,6 +882,12 @@ _twe_display_wayland_init(twe_wl_disp_source *disp_source) TPL_LOG_T(BACKEND, "wl_vk_client(%p) init.", disp_source->wl_vk_client); } + if (disp_source->presentation) { + wl_proxy_set_queue((struct wl_proxy *)disp_source->presentation, + disp_source->ev_queue); + TPL_LOG_T(BACKEND, "wp_presentation(%p) init.", disp_source->presentation); + } + fini: if (display_wrapper) wl_proxy_wrapper_destroy(display_wrapper); @@ -1261,6 +1282,44 @@ __cb_create_render_sync_fd(struct wl_egl_window *wl_egl_window, void *private) return -1; } +static int +__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + struct tizen_private *tizen_private = (struct tizen_private *)private; + twe_wl_surf_source *surf_source = NULL; + + tbm_fd presentation_sync_fd = -1; + + surf_source = (twe_wl_surf_source *)tizen_private->data; + if (!surf_source) { + TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source); + return -1; + } + + if (surf_source->presentation_sync_timeline != -1) { + g_mutex_lock(&surf_source->pst_mutex); + + presentation_sync_fd = tbm_sync_fence_create(surf_source->presentation_sync_timeline, + NULL, + surf_source->presentation_sync_timestamp++); + TPL_DEBUG("[PRESENTATION_SYNC] surf_source(%p) timeline(%d) timestamp(%d) sync_fence(%d)", + surf_source, surf_source->presentation_sync_timeline, surf_source->presentation_sync_timestamp, + presentation_sync_fd); + + TRACE_ASYNC_BEGIN(surf_source->presentation_sync_timestamp, "[PRESENTATION]"); + + surf_source->presentation_sync_req_cnt++; + + g_mutex_unlock(&surf_source->pst_mutex); + return presentation_sync_fd; + } + + return -1; +} + static void __cb_tss_flusher_flush_callback(void *data, struct tizen_surface_shm_flusher *tss_flusher) @@ -1901,6 +1960,105 @@ _twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source, } static void +__cb_presentation_feedback_sync_output(void *data, + struct wp_presentation_feedback *presentation_feedback, + struct wl_output *output) +{ + TPL_IGNORE(data); + TPL_IGNORE(presentation_feedback); + TPL_IGNORE(output); +} + +static void +__cb_presentation_feedback_presented(void *data, + struct wp_presentation_feedback *presentation_feedback, + uint32_t tv_sec_hi, + uint32_t tv_sec_lo, + uint32_t tv_nsec, + uint32_t refresh_nsec, + uint32_t seq_hi, + uint32_t seq_lo, + uint32_t flags) +{ + TPL_IGNORE(tv_sec_hi); + TPL_IGNORE(tv_sec_lo); + TPL_IGNORE(tv_nsec); + TPL_IGNORE(refresh_nsec); + TPL_IGNORE(seq_hi); + TPL_IGNORE(seq_lo); + TPL_IGNORE(flags); + + twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data; + + g_mutex_lock(&surf_source->pst_mutex); + + TPL_DEBUG("[FEEDBACK][PRESENTED] surf_source(%p) wl_surface(%p)", + surf_source, surf_source->surf); + + if (surf_source->presentation_sync_timeline != -1 && + surf_source->presentation_sync_req_cnt > 0) { + + surf_source->presentation_sync_ts_backup++; + surf_source->presentation_sync_req_cnt--; + + TRACE_ASYNC_END(surf_source->presentation_sync_ts_backup, "[PRESENTATION]"); + + TPL_DEBUG("[PRESENTATION][INC] surf_source(%p) timeline(%d) timestamp(%d)", + surf_source, surf_source->presentation_sync_timeline, + surf_source->presentation_sync_ts_backup); + if (!tbm_sync_timeline_inc(surf_source->presentation_sync_timeline, 1)) { + TPL_ERR("Failed to increase timeline(%d)", + surf_source->presentation_sync_timeline); + } + } + + if (presentation_feedback) + wp_presentation_feedback_destroy(presentation_feedback); + + g_mutex_unlock(&surf_source->pst_mutex); +} + +static void +__cb_presentation_feedback_discarded(void *data, + struct wp_presentation_feedback *presentation_feedback) +{ + twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data; + + g_mutex_lock(&surf_source->pst_mutex); + + TPL_DEBUG("[FEEDBACK][DISCARDED] surf_source(%p) wl_surface(%p)", + surf_source, surf_source->surf); + + if (surf_source->presentation_sync_timeline != -1 && + surf_source->presentation_sync_req_cnt > 0) { + + surf_source->presentation_sync_ts_backup++; + surf_source->presentation_sync_req_cnt--; + + TRACE_ASYNC_END(surf_source->presentation_sync_ts_backup, "[PRESENTATION]"); + + TPL_DEBUG("[PRESENTATION][INC] surf_source(%p) timeline(%d) timestamp(%d)", + surf_source, surf_source->presentation_sync_timeline, + surf_source->presentation_sync_ts_backup); + if (!tbm_sync_timeline_inc(surf_source->presentation_sync_timeline, 1)) { + TPL_ERR("Failed to increase timeline(%d)", + surf_source->presentation_sync_timeline); + } + } + + if (presentation_feedback) + wp_presentation_feedback_destroy(presentation_feedback); + + g_mutex_unlock(&surf_source->pst_mutex); +} + +static const struct wp_presentation_feedback_listener feedback_listener = { + __cb_presentation_feedback_sync_output, /* sync_output feedback -*/ + __cb_presentation_feedback_presented, + __cb_presentation_feedback_discarded +}; + +static void _twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source, tbm_surface_h tbm_surface) { @@ -1909,6 +2067,7 @@ _twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source, struct wl_surface *wl_surface = surf_source->surf; struct wl_egl_window *wl_egl_window = surf_source->wl_egl_window; uint32_t version; + struct wp_presentation_feedback *p_feedback; tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO, (void **)&buf_info); @@ -1920,6 +2079,15 @@ _twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source, version = wl_proxy_get_version((struct wl_proxy *)wl_surface); + g_mutex_lock(&surf_source->pst_mutex); + if (disp_source->presentation && + surf_source->presentation_sync_req_cnt > 0) { + p_feedback = wp_presentation_feedback(disp_source->presentation, + wl_surface); + wp_presentation_feedback_add_listener(p_feedback, &feedback_listener, surf_source); + } + g_mutex_unlock(&surf_source->pst_mutex); + if (buf_info->w_rotated == TPL_TRUE) { wayland_tbm_client_set_buffer_transform( disp_source->wl_tbm_client, @@ -2558,6 +2726,11 @@ twe_surface_add(twe_thread* thread, source->render_sync_timestamp = 0; source->render_sync_fence_number = 0; + source->presentation_sync_timeline = tbm_sync_timeline_create(); + source->presentation_sync_timestamp = 0; + source->presentation_sync_ts_backup = 0; + source->presentation_sync_req_cnt = 0; + if (!disp_source->is_vulkan_dpy) { struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)native_handle; @@ -2578,6 +2751,7 @@ twe_surface_add(twe_thread* thread, private->set_window_serial_callback = (void *) __cb_set_window_serial_callback; private->create_render_sync_fd = (void *)__cb_create_render_sync_fd; + private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd; source->latest_transform = private->transform; @@ -2607,6 +2781,7 @@ twe_surface_add(twe_thread* thread, g_source_attach(&source->gsource, g_main_loop_get_context(ctx->twe_loop)); g_mutex_init(&source->surf_mutex); + g_mutex_init(&source->pst_mutex); g_mutex_init(&source->free_queue_mutex); g_cond_init(&source->free_queue_cond); -- 2.7.4 From d6375a343b0cc5bf7945aa6bdcc456b4aec2bbae Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 14 Nov 2019 20:33:32 +0900 Subject: [PATCH 05/16] wayland-egl-tizen: Added new API to merge sync fds. New API /** * Get a new fence fd with fence1 and fence2 merged * * It returns a new fence fd waiting for both fences to be signaled. * If user succeed in obtaining a new merged fence using this API, * the two fence fds passed must be closed by the user. * * Multiple calls to this API allow to merge multiple fences. * * The two fence fds caller want to merge should be closed * if caller is not going to use them after * the new merged fd is created. * * @param egl_window handle to wl_egl_window * @param sync_fd1 first fd to merge with second fd * @param sync_fd2 seconde fd to merge with first fd * @return merged fd on success, -1 on failure. */ int wl_egl_window_tizen_merge_sync_fds(struct wl_egl_window *egl_window, int sync_fd1, int sync_fd2); Change-Id: I29ac248c836392b9e6acb141a30bb70dc5e9731f Signed-off-by: Joonbum Ko --- src/wayland-egl-tizen/wayland-egl-tizen-priv.h | 2 ++ src/wayland-egl-tizen/wayland-egl-tizen.c | 23 +++++++++++++++++++++++ src/wayland-egl-tizen/wayland-egl-tizen.h | 22 ++++++++++++++++++++++ 3 files changed, 47 insertions(+) diff --git a/src/wayland-egl-tizen/wayland-egl-tizen-priv.h b/src/wayland-egl-tizen/wayland-egl-tizen-priv.h index 6fd31ea..9a6b812 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen-priv.h +++ b/src/wayland-egl-tizen/wayland-egl-tizen-priv.h @@ -24,6 +24,7 @@ struct tizen_private { void (*set_window_serial_callback)(struct wl_egl_window *, void *, unsigned int); int (*create_render_sync_fd)(struct wl_egl_window *, void *); int (*create_presentation_sync_fd)(struct wl_egl_window *, void *); + int (*merge_sync_fds)(void *, int, int); }; static struct tizen_private* tizen_private_create() @@ -44,6 +45,7 @@ static struct tizen_private* tizen_private_create() private->set_frontbuffer_callback = NULL; private->create_render_sync_fd = NULL; private->create_presentation_sync_fd = NULL; + private->merge_sync_fds = NULL; } return private; diff --git a/src/wayland-egl-tizen/wayland-egl-tizen.c b/src/wayland-egl-tizen/wayland-egl-tizen.c index 831951c..70e66a3 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen.c +++ b/src/wayland-egl-tizen/wayland-egl-tizen.c @@ -295,3 +295,26 @@ wl_egl_window_tizen_create_presentation_sync_fd(struct wl_egl_window *egl_window return -1; } + +int +wl_egl_window_tizen_merge_sync_fds(struct wl_egl_window *egl_window, + int sync_fd1, int sync_fd2) +{ + struct tizen_private *private = NULL; + + if (egl_window == NULL) { + WL_EGL_ERR("egl_window is NULL"); + return -1; + } + + private = egl_window->driver_private; + if (private == NULL) { + WL_EGL_ERR("wl_egl_window(%p) dirver_private is NULL", egl_window); + return -1; + } + + if (private->merge_sync_fds) + return private->merge_sync_fds(egl_window->driver_private, sync_fd1, sync_fd2); + + return -1; +} \ No newline at end of file diff --git a/src/wayland-egl-tizen/wayland-egl-tizen.h b/src/wayland-egl-tizen/wayland-egl-tizen.h index 00b8b75..041488c 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen.h +++ b/src/wayland-egl-tizen/wayland-egl-tizen.h @@ -118,6 +118,28 @@ wl_egl_window_tizen_create_render_sync_fd(struct wl_egl_window *egl_window); int wl_egl_window_tizen_create_presentation_sync_fd(struct wl_egl_window *egl_window); +/** + * Get a new fence fd with fence1 and fence2 merged + * + * It returns a new fence fd waiting for both fences to be signaled. + * If user succeed in obtaining a new merged fence using this API, + * the two fence fds passed must be closed by the user. + * + * Multiple calls to this API allow to merge multiple fences. + * + * The two fence fds caller want to merge should be closed + * if caller is not going to use them after + * the new merged fd is created. + * + * @param egl_window handle to wl_egl_window + * @param sync_fd1 first fd to merge with second fd + * @param sync_fd2 seconde fd to merge with first fd + * @return merged fd on success, -1 on failure. + */ +int +wl_egl_window_tizen_merge_sync_fds(struct wl_egl_window *egl_window, + int sync_fd1, int sync_fd2); + #ifdef __cplusplus } #endif -- 2.7.4 From 8645365b9a9d54f1e2f9fbf7fde5e69e5ad9ae49 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 15 Nov 2019 15:14:47 +0900 Subject: [PATCH 06/16] tpl_wayland_egl_thread: Implemented merge sync fence. Change-Id: Ic5697fd0dbade852d9340b2c490d6247a12f46f1 Signed-off-by: Joonbum Ko --- src/tpl_wayland_egl_thread.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index f7fb447..6368d91 100755 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -1320,6 +1320,35 @@ __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *priv return -1; } +static int +__cb_merge_sync_fds(void *private, int sync_fd1, int sync_fd2) +{ + TPL_ASSERT(private); + + struct tizen_private *tizen_private = (struct tizen_private *)private; + twe_wl_surf_source *surf_source = NULL; + tbm_fd merged_fd; + + surf_source = (twe_wl_surf_source *)tizen_private->data; + if (!surf_source) { + TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source); + return -1; + } + + if (surf_source->render_sync_timeline == -1 && + surf_source->presentation_sync_timeline == -1) { + TPL_ERR("There is no timeline for any sync fd in surf_source(%p)", surf_source); + return -1; + } + + merged_fd = tbm_sync_fence_merge(NULL, sync_fd1, sync_fd2); + + TPL_DEBUG("[FENCE_MERGE] surf_source(%p) fence1(%d) + fence2(%d) = merged(%d)", + surf_source, sync_fd1, sync_fd2, merged_fd); + + return merged_fd; +} + static void __cb_tss_flusher_flush_callback(void *data, struct tizen_surface_shm_flusher *tss_flusher) @@ -2752,6 +2781,7 @@ twe_surface_add(twe_thread* thread, __cb_set_window_serial_callback; private->create_render_sync_fd = (void *)__cb_create_render_sync_fd; private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd; + private->merge_sync_fds = (void *)__cb_merge_sync_fds; source->latest_transform = private->transform; -- 2.7.4 From 9f7b27bb3c91b6a0c02dcc8ca1310780bcc5e9c2 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 12 Mar 2020 14:04:16 +0900 Subject: [PATCH 07/16] wayland-egl-tizen: Changed API name render_sync_fd to commit_sync_fd. - If the user wants to know the render done using EGLSyncKHR, tpl_surface_dequeue_buffer, tpl_surface_enqueue_buffer and wl_surface_commit occur at similar moments. Therefore, it only tells when wl_surface_commit is done, but does not guarantee render done. - If EGLSyncKHR or dma_buf implicit fence is not used, render done can be guaranteed. Change-Id: I43d7bdd10a33d0b559ecd1d219261bbb5415b574 Signed-off-by: Joonbum Ko --- src/tpl_wayland_egl_thread.c | 61 +++++++++++++------------- src/wayland-egl-tizen/wayland-egl-tizen-priv.h | 4 +- src/wayland-egl-tizen/wayland-egl-tizen.c | 6 +-- src/wayland-egl-tizen/wayland-egl-tizen.h | 19 ++++---- 4 files changed, 45 insertions(+), 45 deletions(-) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 6368d91..0176383 100755 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -123,9 +123,9 @@ struct _twe_wl_surf_source { twe_wl_disp_source *disp_source; twe_del_source *surf_del_source; - tbm_fd render_sync_timeline; - int render_sync_timestamp; - unsigned int render_sync_fence_number; + tbm_fd commit_sync_timeline; + int commit_sync_timestamp; + unsigned int commit_sync_fence_number; tbm_fd presentation_sync_timeline; int presentation_sync_timestamp; @@ -1248,7 +1248,7 @@ __cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window, } static int -__cb_create_render_sync_fd(struct wl_egl_window *wl_egl_window, void *private) +__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) { TPL_ASSERT(private); TPL_ASSERT(wl_egl_window); @@ -1256,7 +1256,7 @@ __cb_create_render_sync_fd(struct wl_egl_window *wl_egl_window, void *private) struct tizen_private *tizen_private = (struct tizen_private *)private; twe_wl_surf_source *surf_source = NULL; - tbm_fd render_sync_fd = -1; + tbm_fd commit_sync_fd = -1; surf_source = (twe_wl_surf_source *)tizen_private->data; if (!surf_source) { @@ -1264,19 +1264,19 @@ __cb_create_render_sync_fd(struct wl_egl_window *wl_egl_window, void *private) return -1; } - if (surf_source->render_sync_timeline != -1) { + if (surf_source->commit_sync_timeline != -1) { char name[32]; - snprintf(name, 32, "%u", surf_source->render_sync_fence_number++); - render_sync_fd = tbm_sync_fence_create(surf_source->render_sync_timeline, + snprintf(name, 32, "%u", surf_source->commit_sync_fence_number++); + commit_sync_fd = tbm_sync_fence_create(surf_source->commit_sync_timeline, name, - surf_source->render_sync_timestamp + 1); - TPL_DEBUG("[RENDER_SYNC] surf_source(%p) timeline(%d) timestamp(%d) name(%s) sync_fence(%d)", - surf_source, surf_source->render_sync_timeline, surf_source->render_sync_timestamp, - name, render_sync_fd); + surf_source->commit_sync_timestamp + 1); + TPL_DEBUG("[COMMIT_SYNC] surf_source(%p) timeline(%d) timestamp(%d) name(%s) sync_fence(%d)", + surf_source, surf_source->commit_sync_timeline, surf_source->commit_sync_timestamp, + name, commit_sync_fd); - TRACE_ASYNC_BEGIN(surf_source->render_sync_timestamp + 1, "[SYNC_FENCE]"); + TRACE_ASYNC_BEGIN(surf_source->commit_sync_timestamp + 1, "[SYNC_FENCE]"); - return render_sync_fd; + return commit_sync_fd; } return -1; @@ -1335,7 +1335,7 @@ __cb_merge_sync_fds(void *private, int sync_fd1, int sync_fd2) return -1; } - if (surf_source->render_sync_timeline == -1 && + if (surf_source->commit_sync_timeline == -1 && surf_source->presentation_sync_timeline == -1) { TPL_ERR("There is no timeline for any sync fd in surf_source(%p)", surf_source); return -1; @@ -1682,18 +1682,6 @@ _twe_surface_trace_enqueue_buffer(twe_wl_surf_source *surf_source, return; } - if (surf_source->render_sync_timeline != -1) { - - surf_source->render_sync_timestamp++; - TRACE_ASYNC_END(surf_source->render_sync_timestamp, "[SYNC_FENCE]"); - - TPL_DEBUG("[RENDER_SYNC][INC] surf_source(%p) timeline(%d) timestamp(%d)", - surf_source, surf_source->render_sync_timeline, surf_source->render_sync_timestamp); - if (!tbm_sync_timeline_inc(surf_source->render_sync_timeline, 1)) { - TPL_ERR("Failed to increase timeline(%d)", surf_source->render_sync_timeline); - } - } - if (surf_source->in_use_buffers) { g_mutex_lock(&surf_source->surf_mutex); /* Stop tracking of this canceled tbm_surface */ @@ -2193,6 +2181,17 @@ _twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source, if (surf_source->committed_buffers) { __tpl_list_push_back(surf_source->committed_buffers, tbm_surface); } + + if (surf_source->commit_sync_timeline != -1) { + surf_source->commit_sync_timestamp++; + TRACE_ASYNC_END(surf_source->commit_sync_timestamp, "[SYNC_FENCE]"); + + TPL_DEBUG("[COMMIT_SYNC][INC] surf_source(%p) timeline(%d) timestamp(%d)", + surf_source, surf_source->commit_sync_timeline, surf_source->commit_sync_timestamp); + if (!tbm_sync_timeline_inc(surf_source->commit_sync_timeline, 1)) { + TPL_ERR("Failed to increase timeline(%d)", surf_source->commit_sync_timeline); + } + } } /* The following function _twe_thread_wl_surface_acquire_and_commit can be @@ -2751,9 +2750,9 @@ twe_surface_add(twe_thread* thread, source->post_interval = 1; - source->render_sync_timeline = tbm_sync_timeline_create(); - source->render_sync_timestamp = 0; - source->render_sync_fence_number = 0; + source->commit_sync_timeline = tbm_sync_timeline_create(); + source->commit_sync_timestamp = 0; + source->commit_sync_fence_number = 0; source->presentation_sync_timeline = tbm_sync_timeline_create(); source->presentation_sync_timestamp = 0; @@ -2779,7 +2778,7 @@ twe_surface_add(twe_thread* thread, __cb_get_rotation_capability; private->set_window_serial_callback = (void *) __cb_set_window_serial_callback; - private->create_render_sync_fd = (void *)__cb_create_render_sync_fd; + private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd; private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd; private->merge_sync_fds = (void *)__cb_merge_sync_fds; diff --git a/src/wayland-egl-tizen/wayland-egl-tizen-priv.h b/src/wayland-egl-tizen/wayland-egl-tizen-priv.h index 9a6b812..caeb5b0 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen-priv.h +++ b/src/wayland-egl-tizen/wayland-egl-tizen-priv.h @@ -22,7 +22,7 @@ struct tizen_private { int (*get_rotation_capability)(struct wl_egl_window *, void *); void (*set_frontbuffer_callback)(struct wl_egl_window *, void *, int); void (*set_window_serial_callback)(struct wl_egl_window *, void *, unsigned int); - int (*create_render_sync_fd)(struct wl_egl_window *, void *); + int (*create_commit_sync_fd)(struct wl_egl_window *, void *); int (*create_presentation_sync_fd)(struct wl_egl_window *, void *); int (*merge_sync_fds)(void *, int, int); }; @@ -43,7 +43,7 @@ static struct tizen_private* tizen_private_create() private->get_rotation_capability = NULL; private->set_window_serial_callback = NULL; private->set_frontbuffer_callback = NULL; - private->create_render_sync_fd = NULL; + private->create_commit_sync_fd = NULL; private->create_presentation_sync_fd = NULL; private->merge_sync_fds = NULL; } diff --git a/src/wayland-egl-tizen/wayland-egl-tizen.c b/src/wayland-egl-tizen/wayland-egl-tizen.c index 70e66a3..b4e679c 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen.c +++ b/src/wayland-egl-tizen/wayland-egl-tizen.c @@ -253,7 +253,7 @@ wl_egl_window_tizen_set_window_serial(struct wl_egl_window *egl_window, } int -wl_egl_window_tizen_create_render_sync_fd(struct wl_egl_window *egl_window) +wl_egl_window_tizen_create_commit_sync_fd(struct wl_egl_window *egl_window) { struct tizen_private *private = NULL; @@ -268,8 +268,8 @@ wl_egl_window_tizen_create_render_sync_fd(struct wl_egl_window *egl_window) return -1; } - if (private->create_render_sync_fd) - return private->create_render_sync_fd(egl_window, egl_window->driver_private); + if (private->create_commit_sync_fd) + return private->create_commit_sync_fd(egl_window, egl_window->driver_private); return -1; } diff --git a/src/wayland-egl-tizen/wayland-egl-tizen.h b/src/wayland-egl-tizen/wayland-egl-tizen.h index 041488c..921b193 100644 --- a/src/wayland-egl-tizen/wayland-egl-tizen.h +++ b/src/wayland-egl-tizen/wayland-egl-tizen.h @@ -75,20 +75,21 @@ wl_egl_window_tizen_set_window_serial(struct wl_egl_window *egl_window, /* temporary APIs for testing sync feature */ /** - * Create a sync fence fd that can tell render done. + * Create a sync fence fd that can tell wl_surface_commit done. * * If eglSwapBuffers works async, it returns fd which tells - * when the render job is finished. + * when wl_surface_commit called. * This fd can wait asynchronously via poll or select. * * Important * * This requires the following premise: - * - After ddk calls libplpl-egl's tpl_surface_dequeue_buffer to get the buffer, - * and until it calls tpl_surface_enqueue_buffer, - * it is called the gpu rendering job interval. - * - Therefore, when using the dma_buf implicit fence, - * there is no guarantee that the rendering job is finished - * with the fence obtained through this API. + * - If the user wants to know the render done using EGLSyncKHR, + * tpl_surface_dequeue_buffer, tpl_surface_enqueue_buffer and + * wl_surface_commit occur at similar moments. + * Therefore, it only tells when wl_surface_commit is done, + * but does not guarantee render done. + * - If EGLSyncKHR or dma_buf implicit fence is not used, + * render done can be guaranteed. * * The fence_fd obtained through this function is one-time available, * can not be reused, so caller must close it when finished using it. @@ -97,7 +98,7 @@ wl_egl_window_tizen_set_window_serial(struct wl_egl_window *egl_window, * @return sync fd on success, -1 on failure. */ int -wl_egl_window_tizen_create_render_sync_fd(struct wl_egl_window *egl_window); +wl_egl_window_tizen_create_commit_sync_fd(struct wl_egl_window *egl_window); /** * Create a sync fence fd that can tell presentation done. -- 2.7.4 From 350c6164114b59b35c4d95c24188f922b963d8e5 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 17 Mar 2020 17:00:21 +0900 Subject: [PATCH 08/16] Package version up to 1.7.2 Change-Id: Ic9008c2a0752c62bd0c9a3c50033fb16971e0c20 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index b49148a..d4eccf4 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 7 -%define TPL_VERSION_PATCH 1 +%define TPL_VERSION_PATCH 2 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 93c6381fdaa12494c3f51e689f9f6a03e1bf6ee4 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 20 Mar 2020 10:45:38 +0900 Subject: [PATCH 09/16] Deleted the unused codes related with worker_thread. - The worker_thread that was created for vulkan has been replaced with twe_thread, so it is obsolete and has been deprecated for a long time. Change-Id: I0d1774b772c1c9b7820806fd5029952848dc2eb0 Signed-off-by: Joonbum Ko --- src/Makefile.am | 1 - src/tpl_wayland_vk_wsi.c | 252 +---------------------- src/tpl_worker_thread.c | 508 ----------------------------------------------- src/tpl_worker_thread.h | 30 --- 4 files changed, 1 insertion(+), 790 deletions(-) delete mode 100644 src/tpl_worker_thread.c delete mode 100644 src/tpl_worker_thread.h diff --git a/src/Makefile.am b/src/Makefile.am index d885262..d7823b7 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -29,7 +29,6 @@ libtpl_egl_la_SOURCES += tpl_wayland_egl.c \ tpl_wayland_egl_thread.c \ tpl_wayland_vk_wsi.c \ tpl_wl_vk_thread.c \ - tpl_worker_thread.c \ wayland-vulkan/wayland-vulkan-protocol.c endif diff --git a/src/tpl_wayland_vk_wsi.c b/src/tpl_wayland_vk_wsi.c index f2e2adc..3f91540 100644 --- a/src/tpl_wayland_vk_wsi.c +++ b/src/tpl_wayland_vk_wsi.c @@ -15,17 +15,6 @@ #define CLIENT_QUEUE_SIZE 3 -#define USE_WORKER_THREAD -#ifndef USE_WORKER_THREAD -#define USE_WORKER_THREAD 0 -#else -#include "tpl_worker_thread.h" -#include -#include -#undef USE_WORKER_THREAD -#define USE_WORKER_THREAD 1 -#endif - typedef struct _tpl_wayland_vk_wsi_display tpl_wayland_vk_wsi_display_t; typedef struct _tpl_wayland_vk_wsi_surface tpl_wayland_vk_wsi_surface_t; typedef struct _tpl_wayland_vk_wsi_buffer tpl_wayland_vk_wsi_buffer_t; @@ -43,23 +32,6 @@ struct _tpl_wayland_vk_wsi_display { struct _tpl_wayland_vk_wsi_surface { tbm_surface_queue_h tbm_queue; int buffer_count; - -#if USE_WORKER_THREAD == 1 - /* - * TODO: it can move to libtbm - * libtbm already has free queue's pthread_cond and pthread_mutex - */ - pthread_mutex_t free_queue_mutex; - pthread_cond_t free_queue_cond; - - /* tbm_surface list */ - tpl_list_t vblank_list; - pthread_mutex_t vblank_list_mutex; - - tpl_bool_t vblank_done; - - tpl_worker_surface_t worker_surface; -#endif int present_mode; }; @@ -68,10 +40,6 @@ struct _tpl_wayland_vk_wsi_buffer { struct wl_proxy *wl_proxy; tbm_fd sync_timeline; unsigned int sync_timestamp; - -#if USE_WORKER_THREAD == 1 - tbm_fd wait_sync; -#endif }; static const struct wl_registry_listener registry_listener; @@ -366,10 +334,6 @@ __tpl_wayland_vk_wsi_display_query_window_supported_present_modes( if (modes) { *modes = TPL_DISPLAY_PRESENT_MODE_MAILBOX | TPL_DISPLAY_PRESENT_MODE_IMMEDIATE | wayland_vk_wsi_display->surface_capabilities.present_modes; -#if USE_WORKER_THREAD == 1 - if (__tpl_worker_support_vblank() == TPL_TRUE) - *modes |= TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED; -#endif } return TPL_ERROR_NONE; @@ -455,14 +419,7 @@ __tpl_wayland_vk_wsi_surface_commit_buffer(tpl_surface_t *surface, wl_display_flush(surface->display->native_handle); wayland_vk_wsi_buffer->sync_timestamp++; -#if USE_WORKER_THREAD == 1 - pthread_mutex_lock(&wayland_vk_wsi_surface->free_queue_mutex); -#endif tbm_surface_queue_release(wayland_vk_wsi_surface->tbm_queue, tbm_surface); -#if USE_WORKER_THREAD == 1 - pthread_mutex_unlock(&wayland_vk_wsi_surface->free_queue_mutex); - pthread_cond_signal(&wayland_vk_wsi_surface->free_queue_cond); -#endif } static tpl_result_t @@ -495,9 +452,6 @@ __tpl_wayland_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); -#if USE_WORKER_THREAD == 1 - wayland_vk_wsi_buffer->wait_sync = sync_fence; -#endif tsq_err = tbm_surface_queue_enqueue(wayland_vk_wsi_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { @@ -505,7 +459,6 @@ __tpl_wayland_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_OPERATION; } -#if USE_WORKER_THREAD == 0 if (sync_fence != -1) { /* non worker thread mode */ /* TODO: set max wait time */ @@ -526,10 +479,6 @@ __tpl_wayland_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); -#else - __tpl_worker_new_buffer_notify(&wayland_vk_wsi_surface->worker_surface); - -#endif /* * tbm_surface insert to free queue. @@ -574,7 +523,6 @@ __tpl_wayland_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, if (sync_fence) *sync_fence = -1; -#if USE_WORKER_THREAD == 0 TPL_OBJECT_UNLOCK(surface); while (tbm_surface_queue_can_dequeue( wayland_vk_wsi_surface->tbm_queue, 0) == 0) { @@ -586,46 +534,10 @@ __tpl_wayland_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, } } TPL_OBJECT_LOCK(surface); -#else - /* - * TODO: it can move to libtbm - * libtbm already has free queue's pthread_cond and pthread_mutex - */ - struct timespec abs_time; - if (timeout_ns != UINT64_MAX) { - clock_gettime(CLOCK_REALTIME, &abs_time); - abs_time.tv_sec += (timeout_ns / 1000000000L); - abs_time.tv_nsec += (timeout_ns % 1000000000L); - if (abs_time.tv_nsec >= 1000000000L) { - abs_time.tv_sec += (abs_time.tv_nsec / 1000000000L); - abs_time.tv_nsec = (abs_time.tv_nsec % 1000000000L); - } - } - pthread_mutex_lock(&wayland_vk_wsi_surface->free_queue_mutex); - while (tbm_surface_queue_can_dequeue(wayland_vk_wsi_surface->tbm_queue, - 0) == 0) { - if (timeout_ns != UINT64_MAX) { - int ret; - ret = pthread_cond_timedwait(&wayland_vk_wsi_surface->free_queue_cond, - &wayland_vk_wsi_surface->free_queue_mutex, - &abs_time); - if (ret == ETIMEDOUT) { - /* timeout */ - pthread_mutex_unlock(&wayland_vk_wsi_surface->free_queue_mutex); - return NULL; - } - } else { - pthread_cond_wait(&wayland_vk_wsi_surface->free_queue_cond, - &wayland_vk_wsi_surface->free_queue_mutex); - } - } -#endif tsq_err = tbm_surface_queue_dequeue(wayland_vk_wsi_surface->tbm_queue, &tbm_surface); -#if USE_WORKER_THREAD == 1 - pthread_mutex_unlock(&wayland_vk_wsi_surface->free_queue_mutex); -#endif + if (!tbm_surface) { TPL_ERR("Failed to get tbm_surface from tbm_surface_queue | tsq_err = %d", tsq_err); @@ -774,120 +686,6 @@ release_buffer_fail: return ret; } -#if USE_WORKER_THREAD == 1 -static void -__tpl_wayland_vk_wsi_process_draw_done(tpl_surface_t *surface, - tbm_surface_h tbm_surface, - tpl_result_t result) -{ - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_buffer_t *wayland_vk_wsi_buffer = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; - - TPL_ASSERT(surface); - TPL_ASSERT(tbm_surface); - TPL_ASSERT(tbm_surface_internal_is_valid(tbm_surface)); - - wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; - wayland_vk_wsi_buffer = - __tpl_wayland_vk_wsi_get_wayland_buffer_from_tbm_surface(tbm_surface); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) - surface->display->backend.data; - - TPL_ASSERT(wayland_vk_wsi_surface); - TPL_ASSERT(wayland_vk_wsi_buffer); - TPL_ASSERT(wayland_vk_wsi_display); - - close(wayland_vk_wsi_buffer->wait_sync); - wayland_vk_wsi_buffer->wait_sync = -1; - - /* if server supported current supported mode then just send */ - - if (wayland_vk_wsi_surface->present_mode & - wayland_vk_wsi_display->surface_capabilities.present_modes) { - __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); - return; - } - - if (wayland_vk_wsi_surface->present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO) { - pthread_mutex_lock(&wayland_vk_wsi_surface->vblank_list_mutex); - /* unref in tpl list remove callback - (__tpl_wayland_vk_wsi_buffer_remove_from_vblank_list) */ - tbm_surface_internal_ref(tbm_surface); - __tpl_list_push_back(&wayland_vk_wsi_surface->vblank_list, tbm_surface); - pthread_mutex_unlock(&wayland_vk_wsi_surface->vblank_list_mutex); - } else if (wayland_vk_wsi_surface->present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED && - wayland_vk_wsi_surface->vblank_done == TPL_FALSE) { - /* if can't process previous vblank event, send buffer immediately */ - pthread_mutex_lock(&wayland_vk_wsi_surface->vblank_list_mutex); - /* unref in tpl list remove callback - (__tpl_wayland_vk_wsi_buffer_remove_from_vblank_list) */ - tbm_surface_internal_ref(tbm_surface); - __tpl_list_push_back(&wayland_vk_wsi_surface->vblank_list, tbm_surface); - wayland_vk_wsi_surface->vblank_done = TPL_TRUE; - pthread_mutex_unlock(&wayland_vk_wsi_surface->vblank_list_mutex); - } else { - __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); - } -} - -static int -__tpl_wayland_vk_wsi_draw_wait_fd_get(tpl_surface_t *surface, - tbm_surface_h tbm_surface) -{ - tpl_wayland_vk_wsi_buffer_t *wayland_vk_wsi_buffer = NULL; - - /*TPL_ASSERT(surface);*/ - TPL_ASSERT(tbm_surface); - - wayland_vk_wsi_buffer = - __tpl_wayland_vk_wsi_get_wayland_buffer_from_tbm_surface(tbm_surface); - - TPL_ASSERT(wayland_vk_wsi_buffer); - - return wayland_vk_wsi_buffer->wait_sync; -} - -static void -__tpl_wayland_vk_wsi_buffer_remove_from_vblank_list(void *data) -{ - tbm_surface_h tbm_surface = data; - tbm_surface_internal_unref(tbm_surface); -} - -static void -__tpl_wayland_vk_wsi_vblank(tpl_surface_t *surface, unsigned int sequence, - unsigned int tv_sec, unsigned int tv_usec) -{ - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface; - tbm_surface_h tbm_surface; - - TPL_ASSERT(surface); - - wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; - - TPL_ASSERT(wayland_vk_wsi_surface); - - if ((wayland_vk_wsi_surface->present_mode & - (TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED)) == 0) - return; - - pthread_mutex_lock(&wayland_vk_wsi_surface->vblank_list_mutex); - tbm_surface = __tpl_list_pop_front(&wayland_vk_wsi_surface->vblank_list, - __tpl_wayland_vk_wsi_buffer_remove_from_vblank_list); - pthread_mutex_unlock(&wayland_vk_wsi_surface->vblank_list_mutex); - - if (tbm_surface_internal_is_valid(tbm_surface)) { - __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); - wayland_vk_wsi_surface->vblank_done = TPL_TRUE; - } else { - wayland_vk_wsi_surface->vblank_done = TPL_FALSE; - } -} -#endif - static tpl_result_t __tpl_wayland_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, int width, @@ -919,16 +717,6 @@ __tpl_wayland_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, if ((present_mode & wayland_vk_wsi_display->surface_capabilities.present_modes) == 0) { /* server not supported current mode check client mode */ switch (present_mode) { -#if USE_WORKER_THREAD == 1 - case TPL_DISPLAY_PRESENT_MODE_FIFO: - case TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED: - if (__tpl_worker_support_vblank() == TPL_FALSE) { - TPL_ERR("Unsupported present mode: %d, worker not support vblank", - present_mode); - return TPL_ERROR_INVALID_PARAMETER; - } - break; -#endif case TPL_DISPLAY_PRESENT_MODE_MAILBOX: case TPL_DISPLAY_PRESENT_MODE_IMMEDIATE: break; @@ -956,28 +744,6 @@ __tpl_wayland_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, surface->width = width; surface->height = height; -#if USE_WORKER_THREAD == 1 - pthread_mutex_init(&wayland_vk_wsi_surface->free_queue_mutex, NULL); - pthread_cond_init(&wayland_vk_wsi_surface->free_queue_cond, NULL); - - wayland_vk_wsi_surface->worker_surface.surface = surface; - wayland_vk_wsi_surface->worker_surface.tbm_queue = - wayland_vk_wsi_surface->tbm_queue; - - wayland_vk_wsi_surface->worker_surface.draw_done = - __tpl_wayland_vk_wsi_process_draw_done; - wayland_vk_wsi_surface->worker_surface.draw_wait_fd_get = - __tpl_wayland_vk_wsi_draw_wait_fd_get; - if ((wayland_vk_wsi_surface->present_mode & - (TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED))) { - wayland_vk_wsi_surface->worker_surface.vblank = - __tpl_wayland_vk_wsi_vblank; - pthread_mutex_init(&wayland_vk_wsi_surface->vblank_list_mutex, NULL); - __tpl_list_init(&wayland_vk_wsi_surface->vblank_list); - } - - __tpl_worker_surface_list_insert(&wayland_vk_wsi_surface->worker_surface); -#endif return TPL_ERROR_NONE; } @@ -993,9 +759,6 @@ __tpl_wayland_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; TPL_ASSERT(wayland_vk_wsi_surface); -#if USE_WORKER_THREAD == 1 - __tpl_worker_surface_list_remove(&wayland_vk_wsi_surface->worker_surface); -#endif if (surface->type == TPL_SURFACE_TYPE_WINDOW) { wl_display_flush(surface->display->native_handle); @@ -1005,19 +768,6 @@ __tpl_wayland_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) wayland_vk_wsi_surface->tbm_queue = NULL; } -#if USE_WORKER_THREAD == 1 - if ((wayland_vk_wsi_surface->present_mode & - (TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED))) { - pthread_mutex_lock(&wayland_vk_wsi_surface->vblank_list_mutex); - __tpl_list_fini(&wayland_vk_wsi_surface->vblank_list, - __tpl_wayland_vk_wsi_buffer_remove_from_vblank_list); - pthread_mutex_unlock(&wayland_vk_wsi_surface->vblank_list_mutex); - pthread_mutex_destroy(&wayland_vk_wsi_surface->vblank_list_mutex); - } - - pthread_cond_destroy(&wayland_vk_wsi_surface->free_queue_cond); - pthread_mutex_destroy(&wayland_vk_wsi_surface->free_queue_mutex); -#endif return TPL_ERROR_NONE; } diff --git a/src/tpl_worker_thread.c b/src/tpl_worker_thread.c deleted file mode 100644 index feca15c..0000000 --- a/src/tpl_worker_thread.c +++ /dev/null @@ -1,508 +0,0 @@ -#include "tpl_worker_thread.h" -#include "tpl_internal.h" - -#include -/*#define __USE_GNU*/ -#include -#include -#include -#include - -#define TPL_ERR_ERRNO(f, x...) \ - do { int err = errno; char buf[256] = {0,}; \ - strerror_r(err, buf, 255); \ - TPL_ERR(f " | error: %d(%s)", ##x, err, buf); \ - } while (0); - -#define TPL_WARN_ERRNO(f, x...) \ - do { int err = errno; char buf[256] = {0,}; \ - strerror_r(err, buf, 255); \ - TPL_WARN(f " | error: %d(%s)", ##x, err, buf); \ - } while (0); - -static struct { - int running; - int epoll_fd; - int event_fd; - - pthread_t worker_id; - tpl_list_t surface_list; - pthread_mutex_t surface_mutex; - tpl_bool_t support_vblank; -} tpl_worker_thread; - -tpl_bool_t -__tpl_worker_support_vblank() -{ - return tpl_worker_thread.support_vblank; -} - -void -__tpl_worker_surface_list_insert(tpl_worker_surface_t *surface) -{ - TPL_ASSERT(surface->surface); - - if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { - TPL_ERR_ERRNO("surface list mutex lock failed"); - return; - } - - surface->draw_wait_buffer = NULL; - __tpl_list_push_back(&tpl_worker_thread.surface_list, surface); - - pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); -} - -void -__tpl_worker_surface_list_remove(tpl_worker_surface_t *surface) -{ - if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { - TPL_ERR_ERRNO("surface list mutex lock failed"); - return; - } - - __tpl_list_remove_data(&tpl_worker_thread.surface_list, surface, - TPL_FIRST, NULL); - - pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); -} - -static void -__tpl_worker_event_send() -{ - int len; - uint64_t dummy_event = 1; - - if (tpl_worker_thread.event_fd == -1) { - TPL_ERR("worker thread not working"); - return; - } - - len = write(tpl_worker_thread.event_fd, - &dummy_event, - sizeof(dummy_event)); - if (len < 0) - TPL_WARN_ERRNO("event fd(%d) write failed.", - tpl_worker_thread.event_fd); -} - -static void -__tpl_worker_prepare_draw_wait_buffer(int epoll_fd, - tpl_worker_surface_t *surface) -{ - if (surface->draw_wait_buffer) - return; - - if (surface->draw_wait_buffer_get) { - int wait_fd = -1; - tbm_surface_h tbm_surface; - - while ((tbm_surface = surface->draw_wait_buffer_get(surface->surface)) != NULL) { - if (surface->draw_wait_fd_get) - wait_fd = surface->draw_wait_fd_get(surface->surface, tbm_surface); - - if (wait_fd != -1) { - struct epoll_event wait_fence_event; - int epoll_err; - - wait_fence_event.events = EPOLLIN; - wait_fence_event.data.ptr = surface; - epoll_err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, - wait_fd, - &wait_fence_event); - if (epoll_err == 0) { - surface->draw_wait_buffer = tbm_surface; - return; - } - } /* else can't(or not need) wait fence in poll */ - - if (surface->draw_done) - surface->draw_done(surface->surface, tbm_surface, - TPL_ERROR_INVALID_OPERATION); - } - return; - } - - while (tbm_surface_queue_can_acquire(surface->tbm_queue, 0)) { - tbm_surface_h tbm_surface = NULL; - tbm_surface_queue_error_e tsq_err; - int wait_fd = -1; - - tsq_err = tbm_surface_queue_acquire(surface->tbm_queue, &tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE || tbm_surface == NULL) { - TPL_ERR("Failed to acquire tbm_surface. | tsq_err = %d", tsq_err); - return; - } - - if (surface->draw_wait_fd_get) - wait_fd = surface->draw_wait_fd_get(surface->surface, tbm_surface); - - if (wait_fd != -1) { - struct epoll_event wait_fence_event; - int epoll_err; - - wait_fence_event.events = EPOLLIN; - wait_fence_event.data.ptr = surface; - epoll_err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, - wait_fd, - &wait_fence_event); - if (epoll_err == 0) { - surface->draw_wait_buffer = tbm_surface; - return; - } - } /* else can't(or not need) wait fence in poll */ - - if (surface->draw_done) - surface->draw_done(surface->surface, tbm_surface, - TPL_ERROR_INVALID_OPERATION); - } -} - -void -__tpl_worker_new_buffer_notify(tpl_worker_surface_t *surface) -{ - TPL_ASSERT(surface->surface); - - __tpl_worker_event_send(); -} - -static tpl_bool_t -__tpl_worker_regist_vblank_handler(tdm_client_vblank *tdm_vblank); - -static void -__tpl_worker_cb_vblank(tdm_client_vblank *tdm_vblank, tdm_error error, - unsigned int sequence, unsigned int tv_sec, - unsigned int tv_usec, void *user_data) -{ - tpl_list_node_t *trail; - - if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { - TPL_ERR_ERRNO("surface list mutex lock failed"); - return; - } - - for (trail = __tpl_list_get_front_node(&tpl_worker_thread.surface_list); - trail != NULL; - trail = __tpl_list_node_next(trail)) { - tpl_worker_surface_t *surface; - - surface = __tpl_list_node_get_data(trail); - if (surface->vblank) - surface->vblank(surface->surface, sequence, tv_sec, tv_usec); - } - pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); - - __tpl_worker_regist_vblank_handler(tdm_vblank); -} - -static tpl_bool_t -__tpl_worker_regist_vblank_handler(tdm_client_vblank *tdm_vblank) -{ - tdm_error tdm_err; - - tdm_err = tdm_client_vblank_wait(tdm_vblank, - 1, /* interval */ - __tpl_worker_cb_vblank, /* handler */ - NULL); - if (tdm_err != TDM_ERROR_NONE) { - TPL_ERR("Failed to tdm_client_wait_vblank. error:%d", tdm_err); - return TPL_FALSE; - } - return TPL_TRUE; -} - -static int -__tpl_worker_prepare_event_fd(int epoll_fd) -{ - int event_fd; - struct epoll_event event; - event.events = EPOLLIN; - event.data.ptr = &tpl_worker_thread; - - event_fd = eventfd(0, EFD_CLOEXEC); - if (event_fd == -1) { - TPL_ERR_ERRNO("eventfd() failed"); - return -1; - } - - if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &event) != 0) { - TPL_ERR_ERRNO("eventfd epoll ctl epoll_fd: %d, event_fd: %d.", - epoll_fd, tpl_worker_thread.event_fd); - close(event_fd); - return -1; - } - return event_fd; -} - -/* FIXME: Temporarily added 'unused' attribute to suppress warning. */ -/* Remove this attribute when you use this function. */ -static tpl_bool_t __attribute__((unused)) -__tpl_worker_prepare_vblank(int epoll_fd, tdm_client **ret_client, tdm_client_vblank **ret_vblank) -{ - tdm_error tdm_err; - tdm_client *tdm_client = NULL; - tdm_client_output *tdm_output = NULL; - tdm_client_vblank *tdm_vblank = NULL; - int tdm_fd, ret; - struct epoll_event event; - - TPL_ASSERT(ret_client); - TPL_ASSERT(ret_vblank); - - tdm_client = tdm_client_create(&tdm_err); - if (!tdm_client) { - TPL_ERR("tdm_client_create failed | tdm_err: %d\n", tdm_err); - goto error_cleanup; - } - - tdm_err = tdm_client_get_fd(tdm_client, &tdm_fd); - if (tdm_err != TDM_ERROR_NONE || tdm_fd < 0) { - TPL_ERR("tdm_client_get_fd failed | tdm_err: %d\n", tdm_err); - goto error_cleanup; - } - - tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err); - if (!tdm_output) { - TPL_ERR("Failed to get tdm client output. tdm_err(%d)", tdm_err); - goto error_cleanup; - } - - tdm_vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err); - if (!tdm_vblank) { - TPL_ERR("Failed to create tdm vblank output. tdm_err(%d)", tdm_err); - goto error_cleanup; - } - - tdm_client_vblank_set_enable_fake(tdm_vblank, 1); - tdm_client_vblank_set_sync(tdm_vblank, 0); - - if (__tpl_worker_regist_vblank_handler(tdm_vblank) == TPL_FALSE) - goto error_cleanup; - - event.events = EPOLLIN; - event.data.ptr = tdm_client; - ret = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, tdm_fd, &event); - if (ret != 0) { - TPL_ERR_ERRNO("tdm epoll ctl epoll_fd: %d, tdm_fd: %d.", - epoll_fd, tdm_fd); - goto error_cleanup; - } - - *ret_vblank = tdm_vblank; - *ret_client = tdm_client; - - return TPL_TRUE; - -error_cleanup: - if (tdm_vblank) - tdm_client_vblank_destroy(tdm_vblank); - if (tdm_client) - tdm_client_destroy(tdm_client); - return TPL_FALSE; -} - -static void * -__tpl_worker_thread_loop(void *arg) -{ -#define EPOLL_MAX_SIZE 100 - int ret, epoll_fd = epoll_create(EPOLL_MAX_SIZE); - struct epoll_event ev_list[EPOLL_MAX_SIZE]; - tdm_client *tdm_client = NULL; - tdm_client_vblank *tdm_vblank = NULL; - - if (epoll_fd == -1) { - TPL_ERR_ERRNO("epoll create failed"); - goto cleanup; - } - - /* event fd */ - tpl_worker_thread.event_fd = __tpl_worker_prepare_event_fd(epoll_fd); - if (tpl_worker_thread.event_fd == -1) - goto cleanup; - - /* vblank fd */ - /* FIXME: vblank has performance problem */ - /*if (__tpl_worker_prepare_vblank(epoll_fd, &tdm_client, &tdm_vblank)) - tpl_worker_thread.support_vblank = TPL_TRUE;*/ - tpl_worker_thread.support_vblank = TPL_TRUE; - - while (tpl_worker_thread.running) { - int i; - tpl_list_node_t *trail; - - /* set buffer's sync fd and vblank list */ - if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { - TPL_ERR_ERRNO("surface list mutex lock failed"); - goto cleanup; - } - - for (trail = __tpl_list_get_front_node(&tpl_worker_thread.surface_list); - trail != NULL; - trail = __tpl_list_node_next(trail)) { - tpl_worker_surface_t *surface = __tpl_list_node_get_data(trail); - TPL_ASSERT(surface); - - __tpl_worker_prepare_draw_wait_buffer(epoll_fd, surface); - } - pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); - - /* wait events */ -cont_epoll_wait: - ret = epoll_wait(epoll_fd, ev_list, EPOLL_MAX_SIZE, -1); - if (ret == -1) { - if (errno != EINTR) - TPL_ERR_ERRNO("epoll fd: %d.", epoll_fd); - goto cont_epoll_wait; - } - - for (i = 0; i < ret; i++) { - if (ev_list[i].data.ptr == &tpl_worker_thread) { - /* thread terminate event */ - if (ev_list[i].events & EPOLLIN) { - int len; - uint64_t read_buf; - - len = read(tpl_worker_thread.event_fd, - &read_buf, sizeof(uint64_t)); - if (len < 0) { - TPL_WARN_ERRNO("event fd(%d) read failed.", - tpl_worker_thread.event_fd); - continue; - } else { - continue; - } - } - } else if (ev_list[i].data.ptr == tdm_client) { - /* vblank */ - tdm_error tdm_err = tdm_client_handle_events(tdm_client); - - if (tdm_err != TDM_ERROR_NONE) { - TPL_ERR("Failed to tdm_client_handle_events"); - /** - * TODO: Error handling - * - * Currently, no error handling implemented to keep flow identical - * to ensure no side effect. - */ - } - /* process in __tpl_worker_cb_vblank */ - } else { - /* draw done */ - tpl_worker_surface_t *surface = ev_list[i].data.ptr; - - if (!(ev_list[i].events & EPOLLIN)) - continue; - - if (surface->draw_wait_buffer) { - int wait_fd; - - wait_fd = surface->draw_wait_fd_get(surface->surface, - surface->draw_wait_buffer); - if (wait_fd == -1) { - if (surface->draw_done) - surface->draw_done(surface->surface, surface->draw_wait_buffer, - TPL_ERROR_INVALID_OPERATION); - surface->draw_wait_buffer = NULL; - } else { - int fence_result; - - switch (fence_result = tbm_sync_fence_wait(wait_fd, 0)) { - case 0: - TPL_ERR_ERRNO("sync_fence_wait return error."); - break; - case 1: - /* some time recieve event two times */ - epoll_ctl(epoll_fd, EPOLL_CTL_DEL, wait_fd, NULL); - if (surface->draw_done) - surface->draw_done(surface->surface, - surface->draw_wait_buffer, - TPL_ERROR_NONE); - surface->draw_wait_buffer = NULL; - break; - case -1: - TPL_WARN("sync_fence_wait return timeout."); - break; - } - } - } else { - TPL_WARN("recieve already signaled event\n"); - } - - /* prepare next buffer in loop start time */ - } - } - } - -cleanup: - /* thread cleanup */ - if (tdm_vblank) - tdm_client_vblank_destroy(tdm_vblank); - if (tdm_client) - tdm_client_destroy(tdm_client); - - if (epoll_fd != -1) { - close(epoll_fd); - epoll_fd = -1; - } - if (tpl_worker_thread.event_fd != -1) { - close(tpl_worker_thread.event_fd); - tpl_worker_thread.event_fd = -1; - } - - return NULL; -} - -static void __attribute__((constructor)) -__tpl_worker_init(void) -{ - /* - * It can be move to display or surface create function - * with pthread_once - */ - tpl_worker_thread.running = 1; - tpl_worker_thread.support_vblank = TPL_FALSE; - - if (pthread_mutex_init(&tpl_worker_thread.surface_mutex, NULL) != 0) { - TPL_ERR_ERRNO("surface list mutex init failed"); - goto error; - } - - __tpl_list_init(&tpl_worker_thread.surface_list); - - if (pthread_create(&tpl_worker_thread.worker_id, NULL, - __tpl_worker_thread_loop, - NULL) != 0) { - TPL_ERR_ERRNO("worker thread create failed"); - goto error_thread_create; - } - /*pthread_setname_np(tpl_worker_thread.worker_id, "tpl_worker_thread");*/ - - return; - -error_thread_create: - pthread_mutex_destroy(&tpl_worker_thread.surface_mutex); - -error: - tpl_worker_thread.running = 0; -} - -static void __attribute__((destructor)) -__tpl_worker_fini(void) -{ - if (tpl_worker_thread.running == 0) - return; - - /* deinitailize global object */ - tpl_worker_thread.running = 0; - - /* maybe EPOLLRDHUP not work with eventfd */ - /* close(tpl_worker_thread.event_fd); */ - __tpl_worker_event_send(); - - if (__tpl_list_get_count(&tpl_worker_thread.surface_list)) - TPL_WARN("called destructor, but tpl surface count: %d", - __tpl_list_get_count(&tpl_worker_thread.surface_list)); - - pthread_join(tpl_worker_thread.worker_id, NULL); - pthread_mutex_destroy(&tpl_worker_thread.surface_mutex); -} diff --git a/src/tpl_worker_thread.h b/src/tpl_worker_thread.h deleted file mode 100644 index f7562a9..0000000 --- a/src/tpl_worker_thread.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef TPL_WORKER_THREAD_H -#define TPL_WORKER_THREAD_H - -#include "tpl.h" -#include -#include -#include -#include - -typedef struct __tpl_worker_surface tpl_worker_surface_t; - -struct __tpl_worker_surface { - tpl_surface_t *surface; - tbm_surface_queue_h tbm_queue; - - void (*draw_done)(tpl_surface_t *surface, tbm_surface_h tbm_surface, tpl_result_t result); - int (*draw_wait_fd_get)(tpl_surface_t *surface, tbm_surface_h tbm_surface); - void (*vblank)(tpl_surface_t *surface, unsigned int sequence, unsigned int tv_sec, - unsigned int tv_usec); - tbm_surface_h (*draw_wait_buffer_get)(tpl_surface_t *surface); - - tbm_surface_h draw_wait_buffer; -}; - -tpl_bool_t __tpl_worker_support_vblank(); -void __tpl_worker_surface_list_insert(tpl_worker_surface_t *surface); -void __tpl_worker_surface_list_remove(tpl_worker_surface_t *surface); -void __tpl_worker_new_buffer_notify(tpl_worker_surface_t *surface); - -#endif //TPL_WORKER_THREAD_H -- 2.7.4 From 1d20c9999f37de5e8706d2bc4ffa58e4154706b3 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 20 Mar 2020 12:31:46 +0900 Subject: [PATCH 10/16] tpl_tbm: Deleted unncessary codes related with vulkan/worker_thread - vulkan's tbm backend is not available. - Currently, tbm backend in libtpl-egl means offscreen render backend for wayland-server compositor. - It can be extended, but delete unnecessary code before that. Change-Id: I69f52f623430ed0bb18da6af5d5424b5712df68b Signed-off-by: Joonbum Ko --- src/tpl_tbm.c | 588 +++------------------------------------------------------- 1 file changed, 23 insertions(+), 565 deletions(-) diff --git a/src/tpl_tbm.c b/src/tpl_tbm.c index 43dee96..b38c7c5 100644 --- a/src/tpl_tbm.c +++ b/src/tpl_tbm.c @@ -10,13 +10,9 @@ #include #include -#include "tpl_worker_thread.h" -#include -#include typedef struct _tpl_tbm_display tpl_tbm_display_t; typedef struct _tpl_tbm_surface tpl_tbm_surface_t; -typedef struct _tpl_tbm_buffer tpl_tbm_buffer_t; struct _tpl_tbm_display { int need_dpy_deinit; @@ -24,70 +20,9 @@ struct _tpl_tbm_display { }; struct _tpl_tbm_surface { - /* tbm_surface list */ - tpl_list_t vblank_list; - pthread_mutex_t vblank_list_mutex; - - tpl_list_t draw_waiting_queue; - pthread_mutex_t draw_waiting_mutex; - - tpl_bool_t vblank_done; - - tpl_worker_surface_t worker_surface; - - tpl_bool_t need_worker_clear; - int present_mode; - - tpl_bool_t need_reset; -}; - -struct _tpl_tbm_buffer { - tbm_fd wait_sync; + tbm_surface_queue_h tbm_queue; }; -static int tpl_tbm_buffer_key; -#define KEY_tpl_tbm_buffer (unsigned long)(&tpl_tbm_buffer_key) - -static void -__tpl_tbm_buffer_free(tpl_tbm_buffer_t *tbm_buffer) -{ - TPL_ASSERT(tbm_buffer); - if (tbm_buffer->wait_sync != -1) - close(tbm_buffer->wait_sync); - free(tbm_buffer); -} - -static void -__tpl_tbm_buffer_remove_from_list(void *data) -{ - tbm_surface_h tbm_surface = data; - tbm_surface_internal_unref(tbm_surface); -} - -static TPL_INLINE tpl_tbm_buffer_t * -__tpl_tbm_get_tbm_buffer_from_tbm_surface(tbm_surface_h surface) -{ - tpl_tbm_buffer_t *buf = NULL; - - if (!tbm_surface_internal_is_valid(surface)) - return NULL; - - tbm_surface_internal_get_user_data(surface, KEY_tpl_tbm_buffer, - (void **)&buf); - return buf; -} - -static TPL_INLINE void -__tpl_tbm_set_tbm_buffer_to_tbm_surface(tbm_surface_h surface, - tpl_tbm_buffer_t *buf) -{ - tbm_surface_internal_add_user_data(surface, - KEY_tpl_tbm_buffer, - (tbm_data_free)__tpl_tbm_buffer_free); - tbm_surface_internal_set_user_data(surface, - KEY_tpl_tbm_buffer, buf); -} - static tpl_result_t __tpl_tbm_display_init(tpl_display_t *display) { @@ -230,223 +165,6 @@ __tpl_tbm_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) return (tbm_surface_h)pixmap; } -static void -__tpl_tbm_surface_queue_notify_cb(tbm_surface_queue_h surface_queue, void *data) -{ - /* Do something */ -} - -static void -__tpl_tbm_draw_done(tpl_surface_t *surface, tbm_surface_h tbm_surface, - tpl_result_t result) -{ - tpl_tbm_surface_t *tpl_tbm_surface = NULL; - tpl_tbm_buffer_t *tpl_tbm_buffer = NULL; - tbm_surface_queue_h tbm_queue = NULL; - - TPL_ASSERT(surface); - TPL_ASSERT(tbm_surface); - TPL_ASSERT(tbm_surface_internal_is_valid(tbm_surface)); - - tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data; - tpl_tbm_buffer = __tpl_tbm_get_tbm_buffer_from_tbm_surface(tbm_surface); - tbm_queue = (tbm_surface_queue_h)surface->native_handle; - - TPL_ASSERT(tpl_tbm_surface); - TPL_ASSERT(tpl_tbm_buffer); - TPL_ASSERT(tbm_queue); - - close(tpl_tbm_buffer->wait_sync); - tpl_tbm_buffer->wait_sync = -1; - - /* if server supported current supported mode then just send */ - - if (tpl_tbm_surface->present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO) { - pthread_mutex_lock(&tpl_tbm_surface->vblank_list_mutex); - /* unref in tpl list remove callback - (__tpl_tbm_buffer_remove_from_list) */ - tbm_surface_internal_ref(tbm_surface); - __tpl_list_push_back(&tpl_tbm_surface->vblank_list, tbm_surface); - pthread_mutex_unlock(&tpl_tbm_surface->vblank_list_mutex); - } else if (tpl_tbm_surface->present_mode == - TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED && - tpl_tbm_surface->vblank_done == TPL_FALSE) { - /* if can't process previous vblank event, send buffer immediately */ - pthread_mutex_lock(&tpl_tbm_surface->vblank_list_mutex); - /* unref in tpl list remove callback - (__tpl_tbm_buffer_remove_from_list) */ - tbm_surface_internal_ref(tbm_surface); - __tpl_list_push_back(&tpl_tbm_surface->vblank_list, tbm_surface); - tpl_tbm_surface->vblank_done = TPL_TRUE; - pthread_mutex_unlock(&tpl_tbm_surface->vblank_list_mutex); - } else { - tbm_surface_internal_unref(tbm_surface); - if (tbm_surface_queue_enqueue(tbm_queue, - tbm_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("tbm_surface_queue_enqueue failed. tbm_queue(%p) tbm_surface(%p)", - tbm_queue, tbm_surface); - } - } -} - -static int -__tpl_tbm_draw_wait_fd_get(tpl_surface_t *surface, tbm_surface_h tbm_surface) -{ - tpl_tbm_buffer_t *tpl_tbm_buffer; - - TPL_ASSERT(tbm_surface); - TPL_ASSERT(tbm_surface_internal_is_valid(tbm_surface)); - - tpl_tbm_buffer = __tpl_tbm_get_tbm_buffer_from_tbm_surface(tbm_surface); - return tpl_tbm_buffer->wait_sync; -} - -static void -__tpl_tbm_vblank(tpl_surface_t *surface, unsigned int sequence, - unsigned int tv_sec, - unsigned int tv_usec) -{ - tpl_tbm_surface_t *tpl_tbm_surface; - tbm_surface_h tbm_surface; - - TPL_ASSERT(surface); - - tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data; - - TPL_ASSERT(tpl_tbm_surface); - - if ((tpl_tbm_surface->present_mode & - (TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED)) == 0) - return; - - pthread_mutex_lock(&tpl_tbm_surface->vblank_list_mutex); - tbm_surface = __tpl_list_pop_front(&tpl_tbm_surface->vblank_list, - __tpl_tbm_buffer_remove_from_list); - pthread_mutex_unlock(&tpl_tbm_surface->vblank_list_mutex); - - if (tbm_surface_internal_is_valid(tbm_surface)) { - tbm_surface_queue_h tbm_queue = (tbm_surface_queue_h)surface->native_handle; - if (tbm_surface_queue_enqueue(tbm_queue, - tbm_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("tbm_surface_queue_enqueue failed. tbm_queue(%p) tbm_surface(%p)", - tbm_queue, tbm_surface); - } - tpl_tbm_surface->vblank_done = TPL_TRUE; - } else { - tpl_tbm_surface->vblank_done = TPL_FALSE; - } - -} - -static tbm_surface_h -__tpl_tbm_draw_wait_buffer_get(tpl_surface_t *surface) -{ - tpl_tbm_surface_t *tpl_tbm_surface; - tbm_surface_h tbm_surface; - - tpl_tbm_surface = surface->backend.data; - pthread_mutex_lock(&tpl_tbm_surface->draw_waiting_mutex); - tbm_surface = __tpl_list_pop_front(&tpl_tbm_surface->draw_waiting_queue, NULL); - pthread_mutex_unlock(&tpl_tbm_surface->draw_waiting_mutex); - - return tbm_surface; -} - -static tpl_result_t -__tpl_tbm_surface_create_swapchain(tpl_surface_t *surface, - tbm_format format, int width, - int height, int buffer_count, int present_mode) -{ - tpl_tbm_surface_t *tpl_tbm_surface = NULL; - - TPL_ASSERT(surface); - - tpl_tbm_surface = (tpl_tbm_surface_t *) surface->backend.data; - TPL_ASSERT(tpl_tbm_surface); - - /* FIXME: vblank has performance problem so replace all present mode to MAILBOX */ - present_mode = TPL_DISPLAY_PRESENT_MODE_MAILBOX; - - /* TODO: check server supported present modes */ - switch (present_mode) { - case TPL_DISPLAY_PRESENT_MODE_MAILBOX: - case TPL_DISPLAY_PRESENT_MODE_IMMEDIATE: - break; - case TPL_DISPLAY_PRESENT_MODE_FIFO: - case TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED: - if (__tpl_worker_support_vblank() == TPL_FALSE) { - TPL_ERR("Unsupported present mode: %d, worker not support vblank", - present_mode); - return TPL_ERROR_INVALID_PARAMETER; - } - break; - default: - TPL_ERR("Unsupported present mode: %d", present_mode); - return TPL_ERROR_INVALID_PARAMETER; - } - - tpl_tbm_surface->present_mode = present_mode; - - tpl_tbm_surface->worker_surface.surface = surface; - tpl_tbm_surface->worker_surface.draw_done = __tpl_tbm_draw_done; - tpl_tbm_surface->worker_surface.draw_wait_fd_get = __tpl_tbm_draw_wait_fd_get; - tpl_tbm_surface->worker_surface.vblank = __tpl_tbm_vblank; - tpl_tbm_surface->worker_surface.draw_wait_buffer_get = - __tpl_tbm_draw_wait_buffer_get; - - __tpl_list_init(&tpl_tbm_surface->vblank_list); - __tpl_list_init(&tpl_tbm_surface->draw_waiting_queue); - pthread_mutex_init(&tpl_tbm_surface->vblank_list_mutex, NULL); - pthread_mutex_init(&tpl_tbm_surface->draw_waiting_mutex, NULL); - - __tpl_worker_surface_list_insert(&tpl_tbm_surface->worker_surface); - tpl_tbm_surface->need_worker_clear = TPL_TRUE; - - return TPL_ERROR_NONE; -} - -static tpl_result_t -__tpl_tbm_surface_destroy_swapchain(tpl_surface_t *surface) -{ - tpl_tbm_surface_t *tpl_tbm_surface = NULL; - - TPL_ASSERT(surface); - - tpl_tbm_surface = (tpl_tbm_surface_t *) surface->backend.data; - TPL_ASSERT(tpl_tbm_surface); - - __tpl_worker_surface_list_remove(&tpl_tbm_surface->worker_surface); - - pthread_mutex_lock(&tpl_tbm_surface->vblank_list_mutex); - __tpl_list_fini(&tpl_tbm_surface->vblank_list, NULL); - pthread_mutex_unlock(&tpl_tbm_surface->vblank_list_mutex); - pthread_mutex_destroy(&tpl_tbm_surface->vblank_list_mutex); - - pthread_mutex_lock(&tpl_tbm_surface->draw_waiting_mutex); - __tpl_list_fini(&tpl_tbm_surface->draw_waiting_queue, NULL); - pthread_mutex_unlock(&tpl_tbm_surface->draw_waiting_mutex); - pthread_mutex_destroy(&tpl_tbm_surface->draw_waiting_mutex); - tpl_tbm_surface->need_worker_clear = TPL_FALSE; - - return TPL_ERROR_NONE; -} - -static void -__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, - void *data) -{ - tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)data; - - if (!tpl_tbm_surface) { - TPL_ERR("Invalid parameter. tpl_tbm_surface(%p)", tpl_tbm_surface); - return; - } - - TPL_LOG_B("TBM", "tbm_queue(%p) has been reset!", tbm_queue); - - tpl_tbm_surface->need_reset = TPL_TRUE; -} - static tpl_result_t __tpl_tbm_surface_init(tpl_surface_t *surface) { @@ -461,50 +179,12 @@ __tpl_tbm_surface_init(tpl_surface_t *surface) surface->backend.data = (void *)tpl_tbm_surface; - tpl_tbm_surface->need_reset = TPL_FALSE; - - if (surface->type == TPL_SURFACE_TYPE_WINDOW) { - if (__tpl_tbm_display_get_window_info(surface->display, - surface->native_handle, &surface->width, - &surface->height, NULL, 0, 0) != TPL_ERROR_NONE) { - TPL_ERR("Failed to get native window(%p) info.", - surface->native_handle); - goto error; - } - - tbm_surface_queue_add_destroy_cb((tbm_surface_queue_h)surface->native_handle, - __tpl_tbm_surface_queue_notify_cb, - surface); - - /* Set reset_callback to tbm_queue */ - tbm_surface_queue_add_reset_cb((tbm_surface_queue_h)surface->native_handle, - __cb_tbm_queue_reset_callback, - (void *)tpl_tbm_surface); - - TPL_LOG_B("TBM", "[INIT] tpl_surface(%p) tpl_tbm_surface_t(%p) tbm_surface_queue(%p)", - surface, tpl_tbm_surface, surface->native_handle); - - return TPL_ERROR_NONE; - } else if (surface->type == TPL_SURFACE_TYPE_PIXMAP) { - if (__tpl_tbm_display_get_pixmap_info(surface->display, - surface->native_handle, &surface->width, - &surface->height, NULL) != TPL_TRUE) { - TPL_ERR("Failed to get native pixmap(%p) info.", - surface->native_handle); - - goto error; - } + tpl_tbm_surface->tbm_queue = (tbm_surface_queue_h)surface->native_handle; - tbm_surface_internal_ref((tbm_surface_h)surface->native_handle); - - return TPL_ERROR_NONE; - } - -error: - free(tpl_tbm_surface); - surface->backend.data = NULL; + TPL_LOG_B("TBM", "[INIT] tpl_surface(%p) tpl_tbm_surface_t(%p) tbm_surface_queue(%p)", + surface, tpl_tbm_surface, surface->native_handle); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_NONE; } static void @@ -517,25 +197,15 @@ __tpl_tbm_surface_fini(tpl_surface_t *surface) tpl_tbm_surface = (tpl_tbm_surface_t *) surface->backend.data; TPL_ASSERT(tpl_tbm_surface); - if (tpl_tbm_surface->need_worker_clear) - __tpl_tbm_surface_destroy_swapchain(surface); - TPL_ASSERT(surface); TPL_ASSERT(surface->display); TPL_LOG_B("TBM", "[FINI] tpl_surface(%p) tpl_tbm_surface_t(%p) native_handle(%p)", surface, surface->backend.data, surface->native_handle); - if (surface->type == TPL_SURFACE_TYPE_PIXMAP) - tbm_surface_internal_unref((tbm_surface_h)surface->native_handle); - else if (surface->type == TPL_SURFACE_TYPE_WINDOW) { - tbm_surface_queue_remove_destroy_cb( - (tbm_surface_queue_h)surface->native_handle, - __tpl_tbm_surface_queue_notify_cb, surface); - /*TODO: we need fix for dequeued surface*/ - } + tpl_tbm_surface->tbm_queue = NULL; + free(tpl_tbm_surface); - free(surface->backend.data); surface->backend.data = NULL; } @@ -544,12 +214,7 @@ __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface, int num_rects, const int *rects, tbm_fd sync_fence) { - tpl_tbm_surface_t *tpl_tbm_surface = NULL; - tpl_tbm_buffer_t *tpl_tbm_buffer = NULL; - tbm_surface_queue_h tbm_queue = NULL; - int ret = 0; - int union_x, union_y; - int union_w, union_h; + tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data; TPL_ASSERT(surface); TPL_ASSERT(surface->display); @@ -565,91 +230,20 @@ __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); - if (surface->type == TPL_SURFACE_TYPE_PIXMAP) { - TPL_ERR("Pixmap cannot post(%p, %p)", surface, - surface->native_handle); - return TPL_ERROR_INVALID_PARAMETER; + if (sync_fence != -1) { + tbm_sync_fence_wait(sync_fence, -1); + close(sync_fence); } - if (surface->backend.type == TPL_BACKEND_TBM_VULKAN_WSI) { - tpl_tbm_surface = surface->backend.data; - - tpl_tbm_buffer = __tpl_tbm_get_tbm_buffer_from_tbm_surface(tbm_surface); - tpl_tbm_buffer->wait_sync = sync_fence; - - tbm_surface_internal_ref(tbm_surface); - pthread_mutex_init(&tpl_tbm_surface->draw_waiting_mutex, NULL); - __tpl_list_push_back(&tpl_tbm_surface->draw_waiting_queue, tbm_surface); - pthread_mutex_unlock(&tpl_tbm_surface->draw_waiting_mutex); - __tpl_worker_new_buffer_notify(&tpl_tbm_surface->worker_surface); - } else { - tbm_queue = (tbm_surface_queue_h)surface->native_handle; - - if (!tbm_queue) { - TPL_ERR("tbm_surface_queue is invalid."); - return TPL_ERROR_INVALID_PARAMETER; - } - - if (sync_fence != -1) { - tbm_sync_fence_wait(sync_fence, -1); - close(sync_fence); - } - - /* If there are given damage rects for given tbm_surface, */ - if (num_rects != 0 && rects != NULL) { - int i; - int left = surface->width; - int bottom = surface->height; - int right = 0, top = 0; - - /* Carculate the union region of given damage rectangles */ - for (i = 0; i < num_rects; i++) { - int rect_i = i * 4; - int x = rects[rect_i]; - int y = rects[rect_i + 1]; - int w = rects[rect_i + 2]; - int h = rects[rect_i + 3]; - - left = (x < left) ? x : left; - bottom = (y < bottom) ? y : bottom; - right = ((x + w) > right) ? (x + w) : right; - top = ((y + h) > top) ? (y + h) : top; - } - - /* Calibrate so as not to exceed the range. */ - left = (left < 0) ? 0 : left; - bottom = (bottom < 0) ? 0 : bottom; - right = (right > surface->width) ? surface->width : right; - top = (top > surface->height) ? surface->height : top; - - /* And set its union rect to tbm_surface as its damage region. */ - union_w = right - left; - union_h = top - bottom; - union_x = left; - union_y = top; - } else { - /* If there are no any damage rects, - * set its full size of surface as its damage region. */ - union_w = surface->width; - union_h = surface->height; - union_x = 0; - union_y = 0; - } - - if (!(ret = tbm_surface_internal_set_damage(tbm_surface, union_x, union_y, - union_w, union_h))) - TPL_WARN("Failed to set damage rect to tbm_surface(%p)", tbm_surface); - - if (tbm_surface_queue_enqueue(tbm_queue, tbm_surface) - != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("tbm_surface_queue_enqueue failed. tbm_queue(%p) tbm_surface(%p)", - tbm_queue, tbm_surface); - return TPL_ERROR_INVALID_OPERATION; - } + if (tbm_surface_queue_enqueue(tpl_tbm_surface->tbm_queue, tbm_surface) + != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("tbm_surface_queue_enqueue failed. tbm_queue(%p) tbm_surface(%p)", + tpl_tbm_surface->tbm_queue, tbm_surface); + return TPL_ERROR_INVALID_OPERATION; } TPL_LOG_B("TBM", "[ENQ] tpl_surface(%p) tbm_queue(%p) tbm_surface(%p) bo(%d)", - surface, tbm_queue, tbm_surface, + surface, tpl_tbm_surface->tbm_queue, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); return TPL_ERROR_NONE; @@ -658,12 +252,7 @@ __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface, static tpl_bool_t __tpl_tbm_surface_validate(tpl_surface_t *surface) { - tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data; - tpl_bool_t ret = TPL_TRUE; - - ret = !tpl_tbm_surface->need_reset; - - return ret; + return TPL_TRUE; } static tbm_surface_h @@ -671,9 +260,7 @@ __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, tbm_fd *sync_fence) { tbm_surface_h tbm_surface = NULL; - tbm_surface_queue_h tbm_queue = NULL; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE; - tpl_tbm_buffer_t *tpl_tbm_buffer = NULL; tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data; TPL_ASSERT(surface); @@ -684,11 +271,9 @@ __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, if (sync_fence) *sync_fence = -1; - tbm_queue = (tbm_surface_queue_h)surface->native_handle; - TPL_OBJECT_UNLOCK(surface); - if (tbm_surface_queue_can_dequeue(tbm_queue, 1) == 1) - tsq_err = tbm_surface_queue_dequeue(tbm_queue, &tbm_surface); + if (tbm_surface_queue_can_dequeue(tpl_tbm_surface->tbm_queue, 1) == 1) + tsq_err = tbm_surface_queue_dequeue(tpl_tbm_surface->tbm_queue, &tbm_surface); TPL_OBJECT_LOCK(surface); if (!tbm_surface) { @@ -697,100 +282,20 @@ __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, return NULL; } - if (surface->backend.type == TPL_BACKEND_TBM_VULKAN_WSI) { - if ((tpl_tbm_buffer = __tpl_tbm_get_tbm_buffer_from_tbm_surface( - tbm_surface)) == NULL) { - tpl_tbm_buffer = (tpl_tbm_buffer_t *) calloc(1, sizeof(tpl_tbm_buffer_t)); - if (!tpl_tbm_buffer) { - TPL_ERR("Mem alloc for tpl_tbm_buffer failed!"); - return NULL; - } - __tpl_tbm_set_tbm_buffer_to_tbm_surface(tbm_surface, tpl_tbm_buffer); - } - } - /* Inc ref count about tbm_surface */ /* It will be dec when before tbm_surface_queue_enqueue called */ tbm_surface_internal_ref(tbm_surface); - tpl_tbm_surface->need_reset = TPL_FALSE; - TPL_LOG_B("TBM", "[DEQ] tpl_surface(%p) tbm_queue(%p) tbm_surface(%p) bo(%d)", - surface, tbm_queue, tbm_surface, + surface, tpl_tbm_surface->tbm_queue, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO(%d)", + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); return tbm_surface; } -static tpl_result_t -__tpl_tbm_surface_get_swapchain_buffers(tpl_surface_t *surface, - tbm_surface_h **buffers, - int *buffer_count) -{ - tbm_surface_h buffer = NULL; - tbm_surface_queue_h tbm_queue = NULL; - tbm_surface_h *swapchain_buffers = NULL; - tbm_surface_queue_error_e tsq_err; - tpl_result_t ret = TPL_ERROR_NONE; - int i, queue_size, dequeue_count = 0; - - TPL_ASSERT(surface); - TPL_ASSERT(buffers); - TPL_ASSERT(buffer_count); - - tbm_queue = (tbm_surface_queue_h)surface->native_handle; - TPL_ASSERT(tbm_queue); - - queue_size = tbm_surface_queue_get_size(tbm_queue); - swapchain_buffers = (tbm_surface_h *)calloc(1, - sizeof(tbm_surface_h) * queue_size); - if (!swapchain_buffers) { - TPL_ERR("Failed to allocate memory for buffers."); - return TPL_ERROR_OUT_OF_MEMORY; - } - - for (i = 0; i < queue_size; i++) { - tsq_err = tbm_surface_queue_dequeue(tbm_queue, &buffer); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to get tbm_surface from tbm_surface_queue | tsq_err = %d", - tsq_err); - dequeue_count = i; - ret = TPL_ERROR_OUT_OF_MEMORY; - goto get_buffer_fail; - } - swapchain_buffers[i] = buffer; - } - - for (i = 0 ; i < queue_size; i++) { - tsq_err = tbm_surface_queue_release(tbm_queue, swapchain_buffers[i]); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to release tbm_surface. | tsq_err = %d", tsq_err); - ret = TPL_ERROR_INVALID_OPERATION; - goto release_buffer_fail; - } - } - - *buffers = swapchain_buffers; - *buffer_count = queue_size; - return TPL_ERROR_NONE; - -get_buffer_fail: - for (i = 0 ; i < dequeue_count ; i++) { - tsq_err = tbm_surface_queue_release(tbm_queue, swapchain_buffers[i]); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to release tbm_surface. | tsq_err = %d", tsq_err); - goto release_buffer_fail; - } - } - -release_buffer_fail: - free(swapchain_buffers); - return ret; - -} - tpl_bool_t __tpl_display_choose_backend_tbm(tpl_handle_t native_dpy) { @@ -811,40 +316,6 @@ __tpl_display_choose_backend_tbm(tpl_handle_t native_dpy) return ret; } -static tpl_result_t -__tpl_tbm_display_query_window_supported_buffer_count( - tpl_display_t *display, - tpl_handle_t window, int *min, int *max) -{ - TPL_ASSERT(display); - - if (!display->backend.data) return TPL_ERROR_INVALID_OPERATION; - - if (min) *min = 0; - if (max) *max = 0; /* 0 mean no limit in vulkan */ - - return TPL_ERROR_NONE; -} - -static tpl_result_t -__tpl_tbm_display_query_window_supported_present_modes( - tpl_display_t *display, - tpl_handle_t window, int *modes) -{ - TPL_ASSERT(display); - - if (!display->backend.data) return TPL_ERROR_INVALID_OPERATION; - - if (modes) { - *modes = TPL_DISPLAY_PRESENT_MODE_MAILBOX | TPL_DISPLAY_PRESENT_MODE_IMMEDIATE; - - if (__tpl_worker_support_vblank() == TPL_TRUE) - *modes |= TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED; - } - - - return TPL_ERROR_NONE; -} void __tpl_display_init_backend_tbm(tpl_display_backend_t *backend, @@ -863,11 +334,6 @@ __tpl_display_init_backend_tbm(tpl_display_backend_t *backend, backend->get_pixmap_info = __tpl_tbm_display_get_pixmap_info; backend->get_buffer_from_native_pixmap = __tpl_tbm_display_get_buffer_from_native_pixmap; - backend->query_window_supported_buffer_count = - __tpl_tbm_display_query_window_supported_buffer_count; - backend->query_window_supported_present_modes = - __tpl_tbm_display_query_window_supported_present_modes; - } void @@ -884,13 +350,5 @@ __tpl_surface_init_backend_tbm(tpl_surface_backend_t *backend, backend->validate = __tpl_tbm_surface_validate; backend->dequeue_buffer = __tpl_tbm_surface_dequeue_buffer; backend->enqueue_buffer = __tpl_tbm_surface_enqueue_buffer; - backend->create_swapchain = __tpl_tbm_surface_create_swapchain; - - if (type == TPL_BACKEND_TBM_VULKAN_WSI) { - backend->destroy_swapchain = __tpl_tbm_surface_destroy_swapchain; - - backend->get_swapchain_buffers = - __tpl_tbm_surface_get_swapchain_buffers; - } } -- 2.7.4 From 19e53eaa2ddb24306a878625e570b389e42564cf Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 9 Mar 2020 12:48:19 +0900 Subject: [PATCH 11/16] tpl_wl_egl_thread: Fixed the reset flag management in reset callback. Change-Id: Idf98816d628fe2dfecc05804345070ad7a53550f Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 980c599..92c7ef0 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -256,7 +256,6 @@ __cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, TPL_LOG_T("WL_EGL", "[QUEUE_RESIZE_CB] wayland_egl_surface(%p) tbm_queue(%p) (%dx%d)", wayland_egl_surface, surface_queue, width, height); - wayland_egl_surface->reset = TPL_TRUE; } /* When queue_reset_callback is called, if is_activated is different from @@ -273,9 +272,10 @@ __cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, "[DEACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)", wayland_egl_surface, surface_queue); } - wayland_egl_surface->reset = TPL_TRUE; } + wayland_egl_surface->reset = TPL_TRUE; + if (surface->reset_cb) surface->reset_cb(surface->reset_data); } -- 2.7.4 From d0b1d8b1b244cc9c241746f4bde7636c93812f45 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 20 Mar 2020 12:41:48 +0900 Subject: [PATCH 12/16] Package version up to 1.7.3 Change-Id: Id157d776471c4280aa75fe9f10a8e581f0af72d9 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index d4eccf4..3c19064 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 7 -%define TPL_VERSION_PATCH 2 +%define TPL_VERSION_PATCH 3 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 44a5cd55fe4c3f63044edd10ec7d6fce87c7f2e7 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 20 Mar 2020 12:42:09 +0900 Subject: [PATCH 13/16] wayland-egl-tizen: Package version up to 1.0.1 Change-Id: I35974fbbac80c3169ec5967988e01e93aa9e534d Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index 3c19064..542be2b 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -26,7 +26,7 @@ #WAYLAND-EGL-TIZEN VERSION MACROS %define WL_EGL_TIZEN_VERSION_MAJOR 1 %define WL_EGL_TIZEN_VERSION_MINOR 0 -%define WL_EGL_TIZEN_VERSION_PATCH 0 +%define WL_EGL_TIZEN_VERSION_PATCH 1 %define WL_EGL_TIZEN_VERSION %{WL_EGL_TIZEN_VERSION_MAJOR}.%{WL_EGL_TIZEN_VERSION_MINOR}.%{WL_EGL_TIZEN_VERSION_PATCH} #TPL WINDOW SYSTEM CHECK -- 2.7.4 From 36d17df9abb534c242acb37c29a5703f1efc2117 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 18 Mar 2020 14:50:19 +0900 Subject: [PATCH 14/16] tpl_wayland_egl_thread: Deleted unnecessary sub_thread for vk_display. - vk_sub_thread has created to wait for the buffer to render done. - The definition of how to wait for render done was lacking, so it was temporarily operated. - In the future, when we need to know the render done for the vulkan, we will use the explicit fence fd feature. Change-Id: Ic9284cbfbd90a0fdb35311c701d2a01749d77a47 Signed-off-by: Joonbum Ko --- src/tpl_wayland_egl_thread.c | 273 +------------------------------------------ 1 file changed, 5 insertions(+), 268 deletions(-) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 0176383..4950c7d 100755 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -34,7 +34,6 @@ typedef struct _twe_wl_surf_source twe_wl_surf_source; typedef struct _twe_wl_buffer_info twe_wl_buffer_info; typedef struct _twe_tdm_source twe_tdm_source; typedef struct _twe_del_source twe_del_source; -typedef struct _vk_sync_draw_source twe_sync_draw_source; struct _twe_thread_context { GThread *twe_thread; @@ -141,13 +140,6 @@ struct _twe_wl_surf_source { /* for waiting draw done */ tpl_bool_t use_sync_fence; - /* Temporary sub worker thread */ - GThread *vk_sub_thread; - GMainLoop *vk_sub_loop; - GMutex sub_thread_mutex; - GCond sub_thread_cond; - int draw_done_count; - int post_interval; }; @@ -177,9 +169,6 @@ struct _twe_wl_buffer_info { tbm_fd sync_fd; tpl_bool_t is_vk_image; - /* for waiting draw done */ - twe_sync_draw_source *sync_draw_source; - tbm_surface_h tbm_surface; twe_wl_surf_source *surf_source; @@ -188,16 +177,6 @@ struct _twe_wl_buffer_info { unsigned int serial; }; -struct _vk_sync_draw_source { - GSource gsource; - gpointer tag; - int event_fd; - int draw_done_signal_fd; - tbm_fd draw_fence_fd; - tbm_surface_h tbm_surface; - twe_wl_buffer_info *buf_info; -}; - static twe_thread_context *_twe_ctx; static twe_tdm_source * _twe_thread_tdm_source_create(void); @@ -1613,7 +1592,6 @@ _twe_surface_set_wl_buffer_info(twe_wl_surf_source *surf_source, buf_info->need_to_commit = TPL_TRUE; buf_info->draw_done = TPL_FALSE; buf_info->tbm_surface = tbm_surface; - buf_info->sync_draw_source = NULL; buf_info->sync_fd = -1; buf_info->sync_timeline = -1; buf_info->is_vk_image = surf_source->disp_source->is_vulkan_dpy; @@ -2220,10 +2198,6 @@ _twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source) * to commit or pending, depending on whether vblank_done * after acquire as much as possible. */ while (tbm_surface_queue_can_acquire(surf_source->tbm_queue, 0)) { - /* If its backend is vulkan, it should be checked with draw_done_count. - * Because vulkan surface's [enqueue] doesn't mean render done state */ - if (disp_source->is_vulkan_dpy && surf_source->draw_done_count <= 0) - return; tsq_err = tbm_surface_queue_acquire(surf_source->tbm_queue, &tbm_surface); if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { @@ -2266,8 +2240,6 @@ _twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source) tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); - surf_source->draw_done_count--; - switch (surf_source->swapchain_properties.present_mode) { case TPL_DISPLAY_PRESENT_MODE_IMMEDIATE: _twe_thread_wl_vk_surface_commit(surf_source, tbm_surface); @@ -2333,14 +2305,8 @@ _twe_thread_wl_surface_dispatch(GSource *source, GSourceFunc cb, gpointer data) TPL_ERR("Failed to read from event_fd(%d)", surf_source->event_fd); - if (surf_source->disp_source->is_vulkan_dpy && - surf_source->use_sync_fence) { - g_mutex_lock(&surf_source->sub_thread_mutex); - _twe_thread_wl_surface_acquire_and_commit(surf_source); - g_mutex_unlock(&surf_source->sub_thread_mutex); - } else { - _twe_thread_wl_surface_acquire_and_commit(surf_source); - } + _twe_thread_wl_surface_acquire_and_commit(surf_source); + } else { TPL_ERR("eventfd(%d) cannot wake up with other condition. cond(%d)", surf_source->event_fd, cond); @@ -2659,23 +2625,6 @@ _twe_thread_wl_surf_source_destroy(void *source) g_source_unref(&surf_source->gsource); } -static gpointer -_vk_sub_thread_loop(gpointer data) -{ - twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data; - - g_mutex_lock(&surf_source->sub_thread_mutex); - /* Do nothing */ - TPL_DEBUG("vk_sub_thread(%p) vk_sub_loop(%p) run", - surf_source->vk_sub_thread, surf_source->vk_sub_loop); - g_cond_signal(&surf_source->sub_thread_cond); - g_mutex_unlock(&surf_source->sub_thread_mutex); - - g_main_loop_run(surf_source->vk_sub_loop); - - return surf_source->vk_sub_thread; -} - twe_surface_h twe_surface_add(twe_thread* thread, twe_display_h twe_display, @@ -2741,10 +2690,6 @@ twe_surface_add(twe_thread* thread, source->vblank_waiting_buffers = NULL; source->draw_done_buffer = NULL; - source->vk_sub_loop = NULL; - source->vk_sub_thread = NULL; - source->draw_done_count = 0; - source->set_serial_is_used = TPL_FALSE; source->serial = 0; @@ -2868,7 +2813,6 @@ twe_surface_create_swapchain(twe_surface_h twe_surface, { twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface; twe_wl_disp_source *disp_source = NULL; - GMainContext *context; tbm_bufmgr bufmgr = NULL; unsigned int capability; @@ -2997,22 +2941,6 @@ twe_surface_create_swapchain(twe_surface_h twe_surface, "[SWAPCHAIN_CREATE][2/2] w(%d) h(%d) f(%d) p(%d) b_cnt(%d)", width, height, format, present_mode, buffer_count); - /* TODO : Below lines about sub_thread will be deleted. - * It is temporary code for sync_drawing. */ - - g_mutex_init(&surf_source->sub_thread_mutex); - g_cond_init(&surf_source->sub_thread_cond); - - context = g_main_context_new(); - surf_source->vk_sub_loop = g_main_loop_new(context, FALSE); - - g_main_context_unref(context); - g_mutex_lock(&surf_source->sub_thread_mutex); - surf_source->vk_sub_thread = g_thread_new("twe_sub_thread", _vk_sub_thread_loop, - surf_source); - g_cond_wait(&surf_source->sub_thread_cond, &surf_source->sub_thread_mutex); - g_mutex_unlock(&surf_source->sub_thread_mutex); - return TPL_ERROR_NONE; } @@ -3029,29 +2957,6 @@ twe_surface_destroy_swapchain(twe_surface_h twe_surface) TPL_LOG_T(BACKEND, "[SWAPCHAIN_DESTROY] twe_surface(%p) tbm_queue(%p)", twe_surface, surf_source->tbm_queue); - while (surf_source->vk_sub_loop && - !g_main_loop_is_running(surf_source->vk_sub_loop)) { - /* Do nothing */ - } - - if (surf_source->vk_sub_thread) { - TPL_DEBUG("vk_sub_thread(%p) exit.", surf_source->vk_sub_thread); - g_mutex_lock(&surf_source->sub_thread_mutex); - - g_main_loop_quit(surf_source->vk_sub_loop); - /* Waiting for all drawing buffers. */ - g_thread_join(surf_source->vk_sub_thread); - g_main_loop_unref(surf_source->vk_sub_loop); - - g_mutex_unlock(&surf_source->sub_thread_mutex); - - surf_source->vk_sub_thread = NULL; - surf_source->vk_sub_loop = NULL; - - g_mutex_clear(&surf_source->sub_thread_mutex); - g_cond_clear(&surf_source->sub_thread_cond); - } - /* Waiting for vblank to commit all draw done buffers.*/ while (surf_source->vblank_waiting_buffers && !__tpl_list_is_empty(surf_source->vblank_waiting_buffers)) { @@ -3274,181 +3179,13 @@ twe_surface_commit_without_enqueue(twe_surface_h twe_surface, g_mutex_unlock(&surf_source->surf_mutex); } -static gboolean -_twe_thread_sync_draw_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) -{ - twe_sync_draw_source *sync_draw_source = (twe_sync_draw_source *)source; - GIOCondition cond; - - if (g_source_is_destroyed(source)) { - TPL_ERR("del_source(%p) already destroyed.", source); - return G_SOURCE_REMOVE; - } - - cond = g_source_query_unix_fd(source, sync_draw_source->tag); - - if (cond & G_IO_IN) { - ssize_t s; - uint64_t u; - int ret; - uint64_t value = 1; - tbm_surface_h tbm_surface = sync_draw_source->tbm_surface; - twe_wl_surf_source *surf_source = sync_draw_source->buf_info->surf_source; - - tbm_surface_internal_ref(tbm_surface); - - s = read(sync_draw_source->event_fd, &u, sizeof(uint64_t)); - if (s != sizeof(uint64_t)) - TPL_ERR("Failed to read from event_fd(%d)", - sync_draw_source->event_fd); - - TRACE_BEGIN("Fence waiting. BO(%d)", - tbm_bo_export( - tbm_surface_internal_get_bo(tbm_surface, 0))); - /* Below API is blocking call. - * It is waiting for drawing complete in GPU. */ - if (tbm_sync_fence_wait(sync_draw_source->draw_fence_fd, -1) != 1) { - char buf[1024]; - strerror_r(errno, buf, sizeof(buf)); - TPL_ERR("Failed to wait sync fence(%d). | error: %d(%s)", - sync_draw_source->draw_fence_fd, errno, buf); - } - - g_mutex_lock(&surf_source->sub_thread_mutex); - surf_source->draw_done_count++; - g_mutex_unlock(&surf_source->sub_thread_mutex); - - /* Draw done */ - /* Send event to twe_wl_surf_source */ - ret = write(sync_draw_source->draw_done_signal_fd, - &value, sizeof(uint64_t)); - if (ret == -1) { - TPL_ERR("failed to send acquirable event. tbm_surface(%p)", - tbm_surface); - } - TRACE_END(); - - tbm_surface_internal_unref(tbm_surface); - } - - return G_SOURCE_REMOVE; -} - -static void -_twe_thread_sync_draw_source_finalize(GSource *source) -{ - twe_sync_draw_source *sync_draw_source = (twe_sync_draw_source *)source; - twe_wl_buffer_info *buf_info = sync_draw_source->buf_info; - - TPL_LOG_T(BACKEND, "gsource(%p) event_fd(%d)", - source, sync_draw_source->event_fd); - - close(sync_draw_source->event_fd); - close(sync_draw_source->draw_fence_fd); - - buf_info->sync_draw_source = NULL; - sync_draw_source->draw_fence_fd = -1; - sync_draw_source->tag = NULL; - sync_draw_source->event_fd = -1; - sync_draw_source->draw_done_signal_fd = -1; - sync_draw_source->buf_info = NULL; - sync_draw_source->tbm_surface = NULL; -} - -static GSourceFuncs _twe_sync_draw_source_funcs = { - .prepare = NULL, - .check = NULL, - .dispatch = _twe_thread_sync_draw_source_dispatch, - .finalize = _twe_thread_sync_draw_source_finalize, -}; - -static twe_sync_draw_source * -_twe_sync_draw_source_attach(twe_wl_surf_source *surf_source, - twe_wl_buffer_info *buf_info, - tbm_fd sync_fd) -{ - twe_sync_draw_source *source = NULL; - - if (!surf_source->vk_sub_thread || !surf_source->vk_sub_loop) { - TPL_ERR("Invalid parameter. vk_sub_thread is not initialized."); - return NULL; - } - - if (!buf_info) { - TPL_ERR("Invalid parameter. buf_info is NULL"); - return NULL; - } - - source = (twe_sync_draw_source *)g_source_new(&_twe_sync_draw_source_funcs, - sizeof(twe_sync_draw_source)); - if (!source) { - TPL_ERR("[THREAD] Failed to create GSource"); - return NULL; - } - - source->event_fd = eventfd(0, EFD_CLOEXEC); - if (source->event_fd < 0) { - TPL_ERR("[THREAD] Failed to create eventfd. errno(%d)", errno); - g_source_unref(&source->gsource); - return NULL; - } - - source->tag = g_source_add_unix_fd(&source->gsource, - source->event_fd, - G_IO_IN); - source->draw_done_signal_fd = surf_source->event_fd; - source->draw_fence_fd = sync_fd; - source->buf_info = buf_info; - source->tbm_surface = buf_info->tbm_surface; - - g_source_attach(&source->gsource, g_main_loop_get_context(surf_source->vk_sub_loop)); - g_source_unref(&source->gsource); - - return source; -} - tpl_result_t twe_surface_set_sync_fd(twe_surface_h twe_surface, tbm_surface_h tbm_surface, tbm_fd sync_fd) { - twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface; - twe_wl_buffer_info *buf_info = NULL; - twe_sync_draw_source *sync_draw_source = NULL; - int ret; - uint64_t value = 1; - - tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO, - (void **)&buf_info); - if (!buf_info) { - TPL_ERR("wl_buffer_info is not existed in tbm_surface(%p)", - tbm_surface); - surf_source->use_sync_fence = TPL_FALSE; - return TPL_ERROR_INVALID_OPERATION; - } - - sync_draw_source = _twe_sync_draw_source_attach(surf_source, - buf_info, sync_fd); - if (!sync_draw_source) { - TPL_ERR("Failed to create and attach sync_draw_source."); - surf_source->use_sync_fence = TPL_FALSE; - return TPL_ERROR_INVALID_OPERATION; - } - - /* Draw done */ - /* Send event to twe_wl_surf_source */ - ret = write(sync_draw_source->event_fd, - &value, sizeof(uint64_t)); - if (ret == -1) { - TPL_ERR("failed to send event to wait sync_fd(%d). twe_wl_surf_source(%p)", - sync_fd, surf_source); - g_source_remove_unix_fd(&sync_draw_source->gsource, - sync_draw_source->tag); - g_source_destroy(&sync_draw_source->gsource); - surf_source->use_sync_fence = TPL_FALSE; - return TPL_ERROR_INVALID_OPERATION; - } - - surf_source->use_sync_fence = TPL_TRUE; + TPL_IGNORE(twe_surface); + TPL_IGNORE(tbm_surface); + TPL_IGNORE(sync_fd); return TPL_ERROR_NONE; } -- 2.7.4 From cc2b14875786b34d9782cbd918e20ebbb4a2c07d Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 18 Mar 2020 15:26:59 +0900 Subject: [PATCH 15/16] tpl_wayland_egl_thread: Added struct twe_fence_wait_source to wait draw done fence. Change-Id: I4a55e917c552af29a7005c3c07ef95ebe4200b65 Signed-off-by: Joonbum Ko --- src/tpl_wayland_egl_thread.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 4950c7d..e8b68ac 100755 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -34,6 +34,7 @@ typedef struct _twe_wl_surf_source twe_wl_surf_source; typedef struct _twe_wl_buffer_info twe_wl_buffer_info; typedef struct _twe_tdm_source twe_tdm_source; typedef struct _twe_del_source twe_del_source; +typedef struct _twe_fence_wait_source twe_fence_wait_source; struct _twe_thread_context { GThread *twe_thread; @@ -177,6 +178,14 @@ struct _twe_wl_buffer_info { unsigned int serial; }; +struct _twe_fence_wait_source { + GSource gsource; + gpointer tag; + tbm_fd fence_fd; + tbm_surface_h tbm_surface; + twe_wl_surf_source *surf_source; +}; + static twe_thread_context *_twe_ctx; static twe_tdm_source * _twe_thread_tdm_source_create(void); -- 2.7.4 From b5c2f21b24acb5edd1581bdf4c8ee3307fb14ad2 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 18 Mar 2020 18:01:00 +0900 Subject: [PATCH 16/16] wayland_egl_thread: Implemented the function waiting sync_fence to render done. - In the case of mesa, tbm_surface with guaranteed 'render done' is not enqueued. - To compensate for this, fence fd is delivered to the platform side from mesa driver, but libtpl-egl did not have the function to use it. - The added function ensures the following operation. 1. When __tpl_wl_egl_surface_enqueue_buffer is called, if sync_fd (!=-1) is received. 2. This is the case when the buffer is not complete. 3. Create gsource to poll sync_fd in twe_thread. 4. Attach it to twe_thread. (poll starting) 5. If G_IO_IN occurs in sync_fd, it is known as render_done and acquire_and_commit is performed. (twe_thread) 6. After completing just one role, the created source will be removed. Change-Id: I16558f700df98afd372bbc83613a8633062953c0 Signed-off-by: Joonbum Ko --- .vscode/settings.json | 5 -- src/tpl_wayland_egl_thread.c | 113 ++++++++++++++++++++++++++++++++++++++++--- src/tpl_wayland_egl_thread.h | 2 +- src/tpl_wl_egl_thread.c | 8 ++- 4 files changed, 114 insertions(+), 14 deletions(-) delete mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index d10fd7e..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "files.associations": { - "tpl_utils.h": "c" - } -} \ No newline at end of file diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index e8b68ac..8abbd6d 100755 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -1712,8 +1712,7 @@ __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h surface_queue, uint64_t value = 1; int ret; - if (!surf_source->disp_source->is_vulkan_dpy - || !surf_source->use_sync_fence) { + if (!surf_source->use_sync_fence) { ret = write(surf_source->event_fd, &value, sizeof(uint64_t)); if (ret == -1) { TPL_ERR("failed to send acquirable event. twe_wl_surf_source(%p)", @@ -3188,15 +3187,117 @@ twe_surface_commit_without_enqueue(twe_surface_h twe_surface, g_mutex_unlock(&surf_source->surf_mutex); } +static gboolean +_twe_thread_fence_wait_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) +{ + twe_fence_wait_source *wait_source = (twe_fence_wait_source *)source; + twe_wl_surf_source *surf_source = wait_source->surf_source; + tbm_surface_h tbm_surface = wait_source->tbm_surface; + GIOCondition cond = g_source_query_unix_fd(source, wait_source->tag); + + if (cond & G_IO_IN) { + TPL_LOG_T(BACKEND, "[RENDER DONE] wait_source(%p) tbm_surface(%p) fence_fd(%d)", + wait_source, tbm_surface, wait_source->fence_fd); + } else { + /* When some io errors occur, it is not considered as a critical error. + * There may be problems with the screen, but it does not affect the operation. */ + TPL_WARN("Invalid GIOCondition occured. fd(%d) cond(%d)", + wait_source->fence_fd, cond); + } + + /* Since this source is going to be removed, acquire_and_commit must be + * executed even in a situation other than G_IO_IN. + * Nevertheless, there may be room for improvement. */ + _twe_thread_wl_surface_acquire_and_commit(surf_source); + tbm_surface_internal_unref(tbm_surface); + + /* This source is used only once and does not allow reuse. + * So finalize will be executed immediately. */ + g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag); + g_source_destroy(&wait_source->gsource); + g_source_unref(&wait_source->gsource); + + return G_SOURCE_REMOVE; +} + +static void +_twe_thread_fence_wait_source_finalize(GSource *source) +{ + twe_fence_wait_source *wait_source = (twe_fence_wait_source *)source; + + TPL_DEBUG("[FINALIZE] wait_source(%p) fence_fd(%d)", + wait_source, wait_source->fence_fd); + + close(wait_source->fence_fd); + + wait_source->fence_fd = -1; + wait_source->surf_source = NULL; + wait_source->tbm_surface = NULL; + wait_source->tag = NULL; +} + +static GSourceFuncs _twe_fence_wait_source_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = _twe_thread_fence_wait_source_dispatch, + .finalize = _twe_thread_fence_wait_source_finalize, +}; + +tpl_result_t +_twe_thread_fence_wait_source_attach(twe_wl_surf_source *surf_source, + tbm_surface_h tbm_surface, tbm_fd sync_fd) +{ + twe_fence_wait_source *wait_source = NULL; + + wait_source = (twe_fence_wait_source *)g_source_new(&_twe_fence_wait_source_funcs, + sizeof(twe_fence_wait_source)); + if (!wait_source) { + TPL_ERR("[WAIT_SOURCE] Failed to create GSource"); + return TPL_ERROR_OUT_OF_MEMORY; + } + + tbm_surface_internal_ref(tbm_surface); + + wait_source->fence_fd = sync_fd; + wait_source->surf_source = surf_source; + wait_source->tbm_surface = tbm_surface; + + wait_source->tag = g_source_add_unix_fd(&wait_source->gsource, + wait_source->fence_fd, + G_IO_IN); + g_source_attach(&wait_source->gsource, g_main_loop_get_context(_twe_ctx->twe_loop)); + + return TPL_ERROR_NONE; +} + tpl_result_t twe_surface_set_sync_fd(twe_surface_h twe_surface, tbm_surface_h tbm_surface, tbm_fd sync_fd) { - TPL_IGNORE(twe_surface); - TPL_IGNORE(tbm_surface); - TPL_IGNORE(sync_fd); + twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface; + tpl_result_t ret = TPL_ERROR_NONE; - return TPL_ERROR_NONE; + if (!surf_source) { + TPL_ERR("Invalid parameter. twe_surface(%p)", twe_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!tbm_surface || !tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + ret = _twe_thread_fence_wait_source_attach(surf_source, tbm_surface, sync_fd); + if (ret != TPL_ERROR_NONE) { + TPL_ERR("Failed to attach source with fence_fd(%d) ret(%d)", + sync_fd, ret); + surf_source->use_sync_fence = TPL_FALSE; + return ret; + } + + surf_source->use_sync_fence = TPL_TRUE; + + return ret; } tbm_fd diff --git a/src/tpl_wayland_egl_thread.h b/src/tpl_wayland_egl_thread.h index 067def1..3a17305 100755 --- a/src/tpl_wayland_egl_thread.h +++ b/src/tpl_wayland_egl_thread.h @@ -90,7 +90,7 @@ twe_surface_set_damage_region(tbm_surface_h tbm_surface, tpl_result_t twe_surface_set_sync_fd(twe_surface_h twe_surface, - tbm_surface_h tbm_surface, tbm_fd wait_fd); + tbm_surface_h tbm_surface, tbm_fd sync_fd); tbm_fd twe_surface_create_sync_fd(tbm_surface_h tbm_surface); diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 92c7ef0..e20801a 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -583,8 +583,12 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, } if (sync_fence != -1) { - tbm_sync_fence_wait(sync_fence, -1); - close(sync_fence); + ret = twe_surface_set_sync_fd(wayland_egl_surface->twe_surface, + tbm_surface, sync_fence); + if (ret != TPL_ERROR_NONE) { + TPL_WARN("Failed to set sync fd (%d). But it will continue.", + sync_fence); + } } tsq_err = tbm_surface_queue_enqueue(wayland_egl_surface->tbm_queue, -- 2.7.4