From b80b12e7fe1936f18345e72c56241448ae8f4eb2 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 3 Mar 2021 10:57:25 +0900 Subject: [PATCH 01/16] Fixed a problem occured by use_wait_vblank exception. Change-Id: Id7ba6ab41d7fef4441cf3a03ef63074c6d2c0256 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index fba6b66..c9b2592 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -811,17 +811,17 @@ __tpl_wl_egl_display_init(tpl_display_t *display) goto free_display; } - TPL_LOG_T("WL_EGL", - "[INIT DISPLAY] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", - wl_egl_display, - wl_egl_display->thread, - wl_egl_display->wl_display); - - TPL_LOG_T("WL_EGL", - "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)", - wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE", - wl_egl_display->tss ? "TRUE" : "FALSE", - wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE"); + TPL_INFO("[DISPLAY_INIT]", + "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_egl_display, + wl_egl_display->thread, + wl_egl_display->wl_display); + + TPL_INFO("[DISPLAY_INIT]", + "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)", + wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE", + wl_egl_display->tss ? "TRUE" : "FALSE", + wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE"); return TPL_ERROR_NONE; @@ -2344,7 +2344,7 @@ __tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_OPERATION; } - TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] wl_egl_surface(%p) tbm_surface(%p) bo(%d)", + TPL_INFO("[CANCEL_BUFFER]", "wl_egl_surface(%p) tbm_surface(%p) bo(%d)", wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); return TPL_ERROR_NONE; @@ -2479,6 +2479,7 @@ __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message) tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource); tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface; wl_egl_surface->render_done_cnt++; @@ -2496,7 +2497,7 @@ __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - if (wl_egl_surface->vblank_done) + if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done) _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer); else __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, @@ -2592,7 +2593,7 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) } if (ready_to_commit) { - if (wl_egl_surface->vblank_done) + if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done) ready_to_commit = TPL_TRUE; else { wl_egl_buffer->status = WAITING_VBLANK; @@ -3086,7 +3087,7 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name); - if (wl_egl_display->tdm_initialized && + if (wl_egl_display->use_wait_vblank && _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE) TPL_ERR("Failed to set wait vblank."); -- 2.7.4 From a7f4ebadd50667726c24a8327c4d2339427b922c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 3 Mar 2021 11:21:31 +0900 Subject: [PATCH 02/16] Add a new tpl_gthread API to use g_cond_wait_until. Change-Id: I7187b036fa58cbe7182659b015a3cb0d5eb56966 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 12 ++++++++++++ src/tpl_utils_gthread.h | 19 ++++++++++++++++--- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 95a1665..65a1db2 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -410,6 +410,18 @@ tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex) g_cond_wait(gcond, gmutex); } +tpl_result_t +tpl_cond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, + int64_t timeout_ms) +{ + gint64 end_time = g_get_monotonic_time() + + (timeout_ms * G_TIME_SPAN_MILLISECOND); + if (!g_cond_wait_until(gcond, gmutex, end_time)) + return TPL_ERROR_TIME_OUT; + + return TPL_ERROR_NONE; +} + void tpl_gcond_signal(tpl_gcond *gcond) { diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 8c0d066..a1d4ce1 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -154,7 +154,7 @@ tpl_gmutex_unlock(tpl_gmutex *gmutex); /** * wrapping g_cond_init() * - * @param gmutex Pointer to tpl_gcond. + * @param gcond Pointer to tpl_gcond. */ void tpl_gcond_init(tpl_gcond *gcond); @@ -162,7 +162,7 @@ tpl_gcond_init(tpl_gcond *gcond); /** * wrapping g_cond_clear() * - * @param gmutex Pointer to tpl_gcond. + * @param gcond Pointer to tpl_gcond. */ void tpl_gcond_clear(tpl_gcond *gcond); @@ -170,12 +170,25 @@ tpl_gcond_clear(tpl_gcond *gcond); /** * wrapping g_cond_wait() * - * @param gmutex Pointer to tpl_gcond. + * @param gcond Pointer to tpl_gcond. + * @param gmutex Pointer to tpl_gmutex */ void tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex); /** + * wrapping g_cond_wait_until() + * + * @param gcond Pointer to tpl_gcond. + * @param gmutex Pointer to tpl_gmutex. + * @param timeout_ms int64_t time(ms) to wait. + * + * @return tpl_result_t TPL_ERROR_NONE or TPL_ERROR_TIME_OUT + */ +tpl_result_t +tpl_cond_timed_wait(tpl_gcond *gcond, tpl_gmutex *gmutex, int64_t timeout_ms); + +/** * wrapping g_cond_signal() * * @param gmutex Pointer to tpl_gcond. -- 2.7.4 From d76e6e95bdae459e26b18e7d05f68896b22d97f0 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 3 Mar 2021 12:18:49 +0900 Subject: [PATCH 03/16] Use tpl_gcond_timed_wait to prevent deadlock. - There may be cases where the ddk terminates without sending a fence release signal. - In such a case, waiting for signaled with tpl_gcond_wait() may result in deadlock. - The newly added tpl_gcond_timed_wait() is used to force release if a signal does not come within 16ms. Change-Id: I11801bb9f8ff4450a00b12656694940e28121a69 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index c9b2592..36310eb 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -1450,8 +1450,15 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) need_to_cancel = wl_egl_buffer->status == DEQUEUED; - if (wl_egl_buffer->status == WAITING_SIGNALED) - tpl_gcond_wait(&wl_egl_buffer->cond, &wl_egl_buffer->mutex); + if (wl_egl_buffer->status == WAITING_SIGNALED) { + tpl_result_t wait_result = TPL_ERROR_NONE; + wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond, + &wl_egl_buffer->mutex, + 16); + if (wait_result == TPL_ERROR_TIME_OUT) + TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", + wl_egl_buffer); + } if (need_to_release) { tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, -- 2.7.4 From cb57bc607f440c4377561bf94dc0a08d86fc904c Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 4 Mar 2021 10:36:02 +0900 Subject: [PATCH 04/16] Add mutex lock/unlock before when clear mutex. Change-Id: Ibeed901734d50be64fce2843ee93982e4071a5cf Signed-off-by: Joonbum Ko --- src/tpl_wl_egl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index 36310eb..5d7b669 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -1950,6 +1950,8 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex); + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); tpl_gmutex_clear(&wl_egl_surface->surf_mutex); tpl_gcond_clear(&wl_egl_surface->surf_cond); -- 2.7.4 From daac578a4025e5e0d9dd2dd6fd32082c63295e75 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 4 Mar 2021 12:58:28 +0900 Subject: [PATCH 05/16] Add string_to_status to print status logs as string. Change-Id: I2166c284714a70a5c5c82a446e00e9e22472aa6a Signed-off-by: Joonbum Ko --- src/tpl_wl_egl.c | 48 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 16 deletions(-) diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index 5d7b669..a9f5e39 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -122,15 +122,25 @@ struct _tpl_wl_egl_surface { }; typedef enum buffer_status { - RELEASED = 0, - DEQUEUED, - ENQUEUED, - ACQUIRED, - WAITING_SIGNALED, - WAITING_VBLANK, - COMMITTED, + RELEASED = 0, // 0 + DEQUEUED, // 1 + ENQUEUED, // 2 + ACQUIRED, // 3 + WAITING_SIGNALED, // 4 + WAITING_VBLANK, // 5 + COMMITTED, // 6 } buffer_status_t; +static const char *status_to_string[7] = { + "RELEASED", // 0 + "DEQUEUED", // 1 + "ENQUEUED", // 2 + "ACQUIRED", // 3 + "WAITING_SIGNALED", // 4 + "WAITING_VBLANK", // 5 + "COMMITTED", // 6 +}; + struct _tpl_wl_egl_buffer { tbm_surface_h tbm_surface; int bo_name; @@ -785,7 +795,8 @@ __tpl_wl_egl_display_init(tpl_display_t *display) /* Create gthread */ wl_egl_display->thread = tpl_gthread_create("wl_egl_thread", - (tpl_gthread_func)_thread_init, (void *)wl_egl_display); + (tpl_gthread_func)_thread_init, + (void *)wl_egl_display); if (!wl_egl_display->thread) { TPL_ERR("Failed to create wl_egl_thread"); goto free_display; @@ -939,9 +950,11 @@ __tpl_wl_egl_display_get_window_info(tpl_display_t *display, if (width) *width = wl_egl_window->width; if (height) *height = wl_egl_window->height; if (format) { - struct tizen_private *tizen_private = (struct tizen_private *)wl_egl_window->driver_private; + struct tizen_private *tizen_private = + (struct tizen_private *)wl_egl_window->driver_private; if (tizen_private && tizen_private->data) { - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)tizen_private->data; *format = wl_egl_surface->format; } else { if (a_size == 8) @@ -1428,9 +1441,10 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); wl_egl_buffer = wl_egl_surface->buffers[idx]; if (wl_egl_buffer) { - TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%d)", + TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)", idx, wl_egl_buffer, - wl_egl_buffer->tbm_surface, wl_egl_buffer->status); + wl_egl_buffer->tbm_surface, + status_to_string[wl_egl_buffer->status]); wl_egl_surface->buffers[idx] = NULL; wl_egl_surface->buffer_cnt--; @@ -2309,9 +2323,11 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wl_egl_surface->reset = TPL_FALSE; TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", wl_egl_buffer->bo_name); + TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", + wl_egl_buffer->bo_name); TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", - wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name, release_fence ? *release_fence : -1); + wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name, + release_fence ? *release_fence : -1); tpl_gmutex_unlock(&wl_egl_buffer->mutex); tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); @@ -3253,10 +3269,10 @@ _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx]; if (wl_egl_buffer) { TPL_INFO("[INFO]", - "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%d)", + "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", idx, wl_egl_buffer, wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name, - wl_egl_buffer->status); + status_to_string[wl_egl_buffer->status]); } } tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); -- 2.7.4 From 5d4e0d974877e6e1b756663461a356e8dbdf4a7d Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 8 Mar 2021 17:46:31 +0900 Subject: [PATCH 06/16] Rename old one by adding _legacy postfix. Change-Id: I7a69b027438ce99044d0842b971809efa61d49e3 --- src/Makefile.am | 3 +- src/tpl_wl_egl.c | 3279 ------------------------------------ src/tpl_wl_egl_thread.c | 3605 +++++++++++++++++++++++++++++++++------- src/tpl_wl_egl_thread_legacy.c | 868 ++++++++++ 4 files changed, 3878 insertions(+), 3877 deletions(-) delete mode 100644 src/tpl_wl_egl.c create mode 100644 src/tpl_wl_egl_thread_legacy.c diff --git a/src/Makefile.am b/src/Makefile.am index 491d40b..62240c6 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -20,7 +20,8 @@ libtpl_egl_la_SOURCES = tpl.c \ tpl_object.c \ tpl_surface.c \ tpl_utils_hlist.c \ - tpl_utils_map.c + tpl_utils_map.c \ + tpl_utils_gthread.c # Wayland if WITH_WAYLAND diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c deleted file mode 100644 index a9f5e39..0000000 --- a/src/tpl_wl_egl.c +++ /dev/null @@ -1,3279 +0,0 @@ - -#include "tpl_internal.h" - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -#include "wayland-egl-tizen/wayland-egl-tizen.h" -#include "wayland-egl-tizen/wayland-egl-tizen-priv.h" - -#include -#include -#include - -#include "tpl_utils_gthread.h" - -static int wl_egl_buffer_key; -#define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key) - -/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */ -#define CLIENT_QUEUE_SIZE 3 -#define BUFFER_ARRAY_SIZE (CLIENT_QUEUE_SIZE * 2) - -typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t; -typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t; -typedef struct _tpl_wl_egl_buffer tpl_wl_egl_buffer_t; - -struct _tpl_wl_egl_display { - tpl_gsource *disp_source; - tpl_gthread *thread; - tpl_gmutex wl_event_mutex; - - struct wl_display *wl_display; - struct wl_event_queue *ev_queue; - struct wayland_tbm_client *wl_tbm_client; - int last_error; /* errno of the last wl_display error*/ - - tpl_bool_t wl_initialized; - tpl_bool_t tdm_initialized; - - tdm_client *tdm_client; - tpl_gsource *tdm_source; - int tdm_display_fd; - - tpl_bool_t use_wait_vblank; - tpl_bool_t use_explicit_sync; - tpl_bool_t prepared; - - struct tizen_surface_shm *tss; /* used for surface buffer_flush */ - struct wp_presentation *presentation; /* for presentation feedback */ - struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */ -}; - -struct _tpl_wl_egl_surface { - tpl_gsource *surf_source; - - tbm_surface_queue_h tbm_queue; - - struct wl_egl_window *wl_egl_window; - struct wl_surface *wl_surface; - struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ - struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */ - - tdm_client_vblank *vblank; - - /* surface information */ - int render_done_cnt; - unsigned int serial; - - int width; - int height; - int format; - int latest_transform; - int rotation; - int post_interval; - - tpl_wl_egl_display_t *wl_egl_display; - tpl_surface_t *tpl_surface; - - /* wl_egl_buffer array for buffer tracing */ - tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE]; - int buffer_cnt; /* the number of using wl_egl_buffers */ - tpl_gmutex buffers_mutex; - - tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */ - tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */ - - struct { - tpl_gmutex mutex; - int fd; - } commit_sync; - - struct { - tpl_gmutex mutex; - int fd; - } presentation_sync; - - tpl_gmutex surf_mutex; - tpl_gcond surf_cond; - - /* for waiting draw done */ - tpl_bool_t use_render_done_fence; - tpl_bool_t is_activated; - tpl_bool_t reset; /* TRUE if queue reseted by external */ - tpl_bool_t need_to_enqueue; - tpl_bool_t prerotation_capability; - tpl_bool_t vblank_done; - tpl_bool_t set_serial_is_used; -}; - -typedef enum buffer_status { - RELEASED = 0, // 0 - DEQUEUED, // 1 - ENQUEUED, // 2 - ACQUIRED, // 3 - WAITING_SIGNALED, // 4 - WAITING_VBLANK, // 5 - COMMITTED, // 6 -} buffer_status_t; - -static const char *status_to_string[7] = { - "RELEASED", // 0 - "DEQUEUED", // 1 - "ENQUEUED", // 2 - "ACQUIRED", // 3 - "WAITING_SIGNALED", // 4 - "WAITING_VBLANK", // 5 - "COMMITTED", // 6 -}; - -struct _tpl_wl_egl_buffer { - tbm_surface_h tbm_surface; - int bo_name; - - struct wl_proxy *wl_buffer; - int dx, dy; /* position to attach to wl_surface */ - int width, height; /* size to attach to wl_surface */ - - buffer_status_t status; /* for tracing buffer status */ - int idx; /* position index in buffers array of wl_egl_surface */ - - /* for damage region */ - int num_rects; - int *rects; - - /* for wayland_tbm_client_set_buffer_transform */ - int w_transform; - tpl_bool_t w_rotated; - - /* for wl_surface_set_buffer_transform */ - int transform; - - /* for wayland_tbm_client_set_buffer_serial */ - unsigned int serial; - - /* for checking need_to_commit (frontbuffer mode) */ - tpl_bool_t need_to_commit; - - /* for checking draw done */ - tpl_bool_t draw_done; - - - /* to get release event via zwp_linux_buffer_release_v1 */ - struct zwp_linux_buffer_release_v1 *buffer_release; - - /* each buffers own its release_fence_fd, until it passes ownership - * to it to EGL */ - int32_t release_fence_fd; - - /* each buffers own its acquire_fence_fd. - * If it use zwp_linux_buffer_release_v1 the ownership of this fd - * will be passed to display server - * Otherwise it will be used as a fence waiting for render done - * on tpl thread */ - int32_t acquire_fence_fd; - - /* Fd to send a signal when wl_surface_commit with this buffer */ - int32_t commit_sync_fd; - - /* Fd to send a siganl when receive the - * presentation feedback from display server */ - int32_t presentation_sync_fd; - - tpl_gsource *waiting_source; - - tpl_gmutex mutex; - tpl_gcond cond; - - tpl_wl_egl_surface_t *wl_egl_surface; -}; - -struct pst_feedback { - /* to get presentation feedback from display server */ - struct wp_presentation_feedback *presentation_feedback; - - int32_t pst_sync_fd; - - int bo_name; - tpl_wl_egl_surface_t *wl_egl_surface; - -}; - -static int -_get_tbm_surface_bo_name(tbm_surface_h tbm_surface); -static void -_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface); -static void -__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer); -static tpl_wl_egl_buffer_t * -_get_wl_egl_buffer(tbm_surface_h tbm_surface); -static int -_write_to_eventfd(int eventfd); -static void -_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface); -static tpl_result_t -_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface); -static void -_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, - tpl_wl_egl_buffer_t *wl_egl_buffer); - -static tpl_bool_t -_check_native_handle_is_wl_display(tpl_handle_t display) -{ - struct wl_interface *wl_egl_native_dpy = *(void **) display; - - if (!wl_egl_native_dpy) { - TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy); - return TPL_FALSE; - } - - /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value - is a memory address pointing the structure of wl_display_interface. */ - if (wl_egl_native_dpy == &wl_display_interface) - return TPL_TRUE; - - if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name, - strlen(wl_display_interface.name)) == 0) { - return TPL_TRUE; - } - - return TPL_FALSE; -} - -static tpl_bool_t -__thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) -{ - tpl_wl_egl_display_t *wl_egl_display = NULL; - tdm_error tdm_err = TDM_ERROR_NONE; - - TPL_IGNORE(message); - - wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - if (!wl_egl_display) { - TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource); - TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); - return TPL_FALSE; - } - - tdm_err = tdm_client_handle_events(wl_egl_display->tdm_client); - - /* If an error occurs in tdm_client_handle_events, it cannot be recovered. - * When tdm_source is no longer available due to an unexpected situation, - * wl_egl_thread must remove it from the thread and destroy it. - * In that case, tdm_vblank can no longer be used for surfaces and displays - * that used this tdm_source. */ - if (tdm_err != TDM_ERROR_NONE) { - TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)", - tdm_err); - TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); - - tpl_gsource_destroy(gsource, TPL_FALSE); - - wl_egl_display->tdm_source = NULL; - - return TPL_FALSE; - } - - return TPL_TRUE; -} - -static void -__thread_func_tdm_finalize(tpl_gsource *gsource) -{ - tpl_wl_egl_display_t *wl_egl_display = NULL; - - wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - - TPL_LOG_T("WL_EGL", "tdm_destroy| wl_egl_display(%p) tdm_client(%p)", - wl_egl_display, wl_egl_display->tdm_client); - - if (wl_egl_display->tdm_client) { - tdm_client_destroy(wl_egl_display->tdm_client); - wl_egl_display->tdm_client = NULL; - wl_egl_display->tdm_display_fd = -1; - } - - wl_egl_display->tdm_initialized = TPL_FALSE; -} - -static tpl_gsource_functions tdm_funcs = { - .prepare = NULL, - .check = NULL, - .dispatch = __thread_func_tdm_dispatch, - .finalize = __thread_func_tdm_finalize, -}; - -tpl_result_t -_thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display) -{ - tdm_client *tdm_client = NULL; - int tdm_display_fd = -1; - tdm_error tdm_err = TDM_ERROR_NONE; - - tdm_client = tdm_client_create(&tdm_err); - if (!tdm_client || tdm_err != TDM_ERROR_NONE) { - TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err); - return TPL_ERROR_INVALID_OPERATION; - } - - tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd); - if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) { - TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err); - tdm_client_destroy(tdm_client); - return TPL_ERROR_INVALID_OPERATION; - } - - wl_egl_display->tdm_display_fd = tdm_display_fd; - wl_egl_display->tdm_client = tdm_client; - wl_egl_display->tdm_source = NULL; - wl_egl_display->tdm_initialized = TPL_TRUE; - - TPL_INFO("[TDM_CLIENT_INIT]", - "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)", - wl_egl_display, tdm_client, tdm_display_fd); - - return TPL_ERROR_NONE; -} - -#define IMPL_TIZEN_SURFACE_SHM_VERSION 2 - -static void -__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, - uint32_t name, const char *interface, - uint32_t version) -{ - tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; - - if (!strcmp(interface, "tizen_surface_shm")) { - wl_egl_display->tss = wl_registry_bind(wl_registry, - name, - &tizen_surface_shm_interface, - ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ? - version : IMPL_TIZEN_SURFACE_SHM_VERSION)); - } else if (!strcmp(interface, wp_presentation_interface.name)) { - wl_egl_display->presentation = - wl_registry_bind(wl_registry, - name, &wp_presentation_interface, 1); - TPL_DEBUG("bind wp_presentation_interface"); - } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) { - char *env = tpl_getenv("TPL_EFS"); - if (env && atoi(env)) { - wl_egl_display->explicit_sync = - wl_registry_bind(wl_registry, name, - &zwp_linux_explicit_synchronization_v1_interface, 1); - wl_egl_display->use_explicit_sync = TPL_TRUE; - TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface"); - } else { - wl_egl_display->use_explicit_sync = TPL_FALSE; - } - } -} - -static void -__cb_wl_resistry_global_remove_callback(void *data, - struct wl_registry *wl_registry, - uint32_t name) -{ -} - -static const struct wl_registry_listener registry_listener = { - __cb_wl_resistry_global_callback, - __cb_wl_resistry_global_remove_callback -}; - -static void -_wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display, - const char *func_name) -{ - int dpy_err; - char buf[1024]; - strerror_r(errno, buf, sizeof(buf)); - - if (wl_egl_display->last_error == errno) - return; - - TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf); - - dpy_err = wl_display_get_error(wl_egl_display->wl_display); - if (dpy_err == EPROTO) { - const struct wl_interface *err_interface; - uint32_t err_proxy_id, err_code; - err_code = wl_display_get_protocol_error(wl_egl_display->wl_display, - &err_interface, - &err_proxy_id); - TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d", - err_interface->name, err_code, err_proxy_id); - } - - wl_egl_display->last_error = errno; -} - -tpl_result_t -_thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display) -{ - struct wl_registry *registry = NULL; - struct wl_event_queue *queue = NULL; - struct wl_display *display_wrapper = NULL; - struct wl_proxy *wl_tbm = NULL; - struct wayland_tbm_client *wl_tbm_client = NULL; - int ret; - tpl_result_t result = TPL_ERROR_NONE; - - queue = wl_display_create_queue(wl_egl_display->wl_display); - if (!queue) { - TPL_ERR("Failed to create wl_queue wl_display(%p)", - wl_egl_display->wl_display); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display); - if (!wl_egl_display->ev_queue) { - TPL_ERR("Failed to create wl_queue wl_display(%p)", - wl_egl_display->wl_display); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display); - if (!display_wrapper) { - TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)", - wl_egl_display->wl_display); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue); - - registry = wl_display_get_registry(display_wrapper); - if (!registry) { - TPL_ERR("Failed to create wl_registry"); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - wl_proxy_wrapper_destroy(display_wrapper); - display_wrapper = NULL; - - wl_tbm_client = wayland_tbm_client_init(wl_egl_display->wl_display); - if (!wl_tbm_client) { - TPL_ERR("Failed to initialize wl_tbm_client."); - result = TPL_ERROR_INVALID_CONNECTION; - goto fini; - } - - wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client); - if (!wl_tbm) { - TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client); - result = TPL_ERROR_INVALID_CONNECTION; - goto fini; - } - - wl_proxy_set_queue(wl_tbm, wl_egl_display->ev_queue); - wl_egl_display->wl_tbm_client = wl_tbm_client; - - if (wl_registry_add_listener(registry, ®istry_listener, - wl_egl_display)) { - TPL_ERR("Failed to wl_registry_add_listener"); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue); - if (ret == -1) { - _wl_display_print_err(wl_egl_display, "roundtrip_queue"); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - /* set tizen_surface_shm's queue as client's private queue */ - if (wl_egl_display->tss) { - wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss, - wl_egl_display->ev_queue); - TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss); - } - - if (wl_egl_display->presentation) { - wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation, - wl_egl_display->ev_queue); - TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.", - wl_egl_display->presentation); - } - - if (wl_egl_display->explicit_sync) { - wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync, - wl_egl_display->ev_queue); - TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.", - wl_egl_display->explicit_sync); - } - - wl_egl_display->wl_initialized = TPL_TRUE; - - TPL_INFO("[WAYLAND_INIT]", - "wl_egl_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)", - wl_egl_display, wl_egl_display->wl_display, - wl_egl_display->wl_tbm_client, wl_egl_display->ev_queue); - TPL_INFO("[WAYLAND_INIT]", - "tizen_surface_shm(%p) wp_presentation(%p) explicit_sync(%p)", - wl_egl_display->tss, wl_egl_display->presentation, - wl_egl_display->explicit_sync); - -fini: - if (display_wrapper) - wl_proxy_wrapper_destroy(display_wrapper); - if (registry) - wl_registry_destroy(registry); - if (queue) - wl_event_queue_destroy(queue); - - return result; -} - -void -_thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display) -{ - /* If wl_egl_display is in prepared state, cancel it */ - if (wl_egl_display->prepared) { - wl_display_cancel_read(wl_egl_display->wl_display); - wl_egl_display->prepared = TPL_FALSE; - } - - if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, - wl_egl_display->ev_queue) == -1) { - _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); - } - - if (wl_egl_display->tss) { - TPL_INFO("[TIZEN_SURFACE_SHM_DESTROY]", - "wl_egl_display(%p) tizen_surface_shm(%p) fini.", - wl_egl_display, wl_egl_display->tss); - tizen_surface_shm_destroy(wl_egl_display->tss); - wl_egl_display->tss = NULL; - } - - if (wl_egl_display->presentation) { - TPL_INFO("[WP_PRESENTATION_DESTROY]", - "wl_egl_display(%p) wp_presentation(%p) fini.", - wl_egl_display, wl_egl_display->presentation); - wp_presentation_destroy(wl_egl_display->presentation); - wl_egl_display->presentation = NULL; - } - - if (wl_egl_display->explicit_sync) { - TPL_INFO("[EXPLICIT_SYNC_DESTROY]", - "wl_egl_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.", - wl_egl_display, wl_egl_display->explicit_sync); - zwp_linux_explicit_synchronization_v1_destroy(wl_egl_display->explicit_sync); - wl_egl_display->explicit_sync = NULL; - } - - if (wl_egl_display->wl_tbm_client) { - struct wl_proxy *wl_tbm = NULL; - - wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm( - wl_egl_display->wl_tbm_client); - if (wl_tbm) { - wl_proxy_set_queue(wl_tbm, NULL); - } - - TPL_INFO("[WL_TBM_DEINIT]", - "wl_egl_display(%p) wl_tbm_client(%p)", - wl_egl_display, wl_egl_display->wl_tbm_client); - wayland_tbm_client_deinit(wl_egl_display->wl_tbm_client); - wl_egl_display->wl_tbm_client = NULL; - } - - wl_event_queue_destroy(wl_egl_display->ev_queue); - - wl_egl_display->wl_initialized = TPL_FALSE; - - TPL_INFO("[DISPLAY_FINI]", "wl_egl_display(%p) wl_display(%p)", - wl_egl_display, wl_egl_display->wl_display); -} - -static void* -_thread_init(void *data) -{ - tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; - - if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) { - TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)", - wl_egl_display, wl_egl_display->wl_display); - } - - if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) { - TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED"); - } - - return wl_egl_display; -} - -static tpl_bool_t -__thread_func_disp_prepare(tpl_gsource *gsource) -{ - tpl_wl_egl_display_t *wl_egl_display = - (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - - /* If this wl_egl_display is already prepared, - * do nothing in this function. */ - if (wl_egl_display->prepared) - return TPL_FALSE; - - /* If there is a last_error, there is no need to poll, - * so skip directly to dispatch. - * prepare -> dispatch */ - if (wl_egl_display->last_error) - return TPL_TRUE; - - while (wl_display_prepare_read_queue(wl_egl_display->wl_display, - wl_egl_display->ev_queue) != 0) { - if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, - wl_egl_display->ev_queue) == -1) { - _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); - } - } - - wl_egl_display->prepared = TPL_TRUE; - - wl_display_flush(wl_egl_display->wl_display); - - return TPL_FALSE; -} - -static tpl_bool_t -__thread_func_disp_check(tpl_gsource *gsource) -{ - tpl_wl_egl_display_t *wl_egl_display = - (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - tpl_bool_t ret = TPL_FALSE; - - if (!wl_egl_display->prepared) - return ret; - - /* If prepared, but last_error is set, - * cancel_read is executed and FALSE is returned. - * That can lead to G_SOURCE_REMOVE by calling disp_prepare again - * and skipping disp_check from prepare to disp_dispatch. - * check -> prepare -> dispatch -> G_SOURCE_REMOVE */ - if (wl_egl_display->prepared && wl_egl_display->last_error) { - wl_display_cancel_read(wl_egl_display->wl_display); - return ret; - } - - if (tpl_gsource_check_io_condition(gsource)) { - if (wl_display_read_events(wl_egl_display->wl_display) == -1) - _wl_display_print_err(wl_egl_display, "read_event"); - ret = TPL_TRUE; - } else { - wl_display_cancel_read(wl_egl_display->wl_display); - ret = TPL_FALSE; - } - - wl_egl_display->prepared = TPL_FALSE; - - return ret; -} - -static tpl_bool_t -__thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message) -{ - tpl_wl_egl_display_t *wl_egl_display = - (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - - TPL_IGNORE(message); - - /* If there is last_error, SOURCE_REMOVE should be returned - * to remove the gsource from the main loop. - * This is because wl_egl_display is not valid since last_error was set.*/ - if (wl_egl_display->last_error) { - return TPL_FALSE; - } - - tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); - if (tpl_gsource_check_io_condition(gsource)) { - if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, - wl_egl_display->ev_queue) == -1) { - _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); - } - } - - wl_display_flush(wl_egl_display->wl_display); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - - return TPL_TRUE; -} - -static void -__thread_func_disp_finalize(tpl_gsource *gsource) -{ - tpl_wl_egl_display_t *wl_egl_display = - (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - - if (wl_egl_display->wl_initialized) - _thread_wl_display_fini(wl_egl_display); - - TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)", - wl_egl_display, gsource); - - return; -} - - -static tpl_gsource_functions disp_funcs = { - .prepare = __thread_func_disp_prepare, - .check = __thread_func_disp_check, - .dispatch = __thread_func_disp_dispatch, - .finalize = __thread_func_disp_finalize, -}; - -static tpl_result_t -__tpl_wl_egl_display_init(tpl_display_t *display) -{ - tpl_wl_egl_display_t *wl_egl_display = NULL; - - TPL_ASSERT(display); - - /* Do not allow default display in wayland. */ - if (!display->native_handle) { - TPL_ERR("Invalid native handle for display."); - return TPL_ERROR_INVALID_PARAMETER; - } - - if (!_check_native_handle_is_wl_display(display->native_handle)) { - TPL_ERR("native_handle(%p) is not wl_display", display->native_handle); - return TPL_ERROR_INVALID_PARAMETER; - } - - wl_egl_display = (tpl_wl_egl_display_t *) calloc(1, - sizeof(tpl_wl_egl_display_t)); - if (!wl_egl_display) { - TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t."); - return TPL_ERROR_OUT_OF_MEMORY; - } - - display->backend.data = wl_egl_display; - display->bufmgr_fd = -1; - - wl_egl_display->tdm_initialized = TPL_FALSE; - wl_egl_display->wl_initialized = TPL_FALSE; - - wl_egl_display->ev_queue = NULL; - wl_egl_display->wl_display = (struct wl_display *)display->native_handle; - wl_egl_display->last_error = 0; - wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled - wl_egl_display->prepared = TPL_FALSE; - - /* Wayland Interfaces */ - wl_egl_display->tss = NULL; - wl_egl_display->presentation = NULL; - wl_egl_display->explicit_sync = NULL; - wl_egl_display->wl_tbm_client = NULL; - - wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled - { - char *env = tpl_getenv("TPL_WAIT_VBLANK"); - if (env && !atoi(env)) { - wl_egl_display->use_wait_vblank = TPL_FALSE; - } - } - - tpl_gmutex_init(&wl_egl_display->wl_event_mutex); - - /* Create gthread */ - wl_egl_display->thread = tpl_gthread_create("wl_egl_thread", - (tpl_gthread_func)_thread_init, - (void *)wl_egl_display); - if (!wl_egl_display->thread) { - TPL_ERR("Failed to create wl_egl_thread"); - goto free_display; - } - - wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread, - (void *)wl_egl_display, - wl_display_get_fd(wl_egl_display->wl_display), - &disp_funcs, SOURCE_TYPE_NORMAL); - if (!wl_egl_display->disp_source) { - TPL_ERR("Failed to add native_display(%p) to thread(%p)", - display->native_handle, - wl_egl_display->thread); - goto free_display; - } - - wl_egl_display->tdm_source = tpl_gsource_create(wl_egl_display->thread, - (void *)wl_egl_display, - wl_egl_display->tdm_display_fd, - &tdm_funcs, SOURCE_TYPE_NORMAL); - if (!wl_egl_display->tdm_source) { - TPL_ERR("Failed to create tdm_gsource\n"); - goto free_display; - } - - TPL_INFO("[DISPLAY_INIT]", - "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", - wl_egl_display, - wl_egl_display->thread, - wl_egl_display->wl_display); - - TPL_INFO("[DISPLAY_INIT]", - "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)", - wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE", - wl_egl_display->tss ? "TRUE" : "FALSE", - wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE"); - - return TPL_ERROR_NONE; - -free_display: - if (wl_egl_display->thread) { - if (wl_egl_display->tdm_source) - tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); - if (wl_egl_display->disp_source) - tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); - - tpl_gthread_destroy(wl_egl_display->thread); - } - - wl_egl_display->thread = NULL; - free(wl_egl_display); - - display->backend.data = NULL; - return TPL_ERROR_INVALID_OPERATION; -} - -static void -__tpl_wl_egl_display_fini(tpl_display_t *display) -{ - tpl_wl_egl_display_t *wl_egl_display; - - TPL_ASSERT(display); - - wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data; - if (wl_egl_display) { - TPL_INFO("[DISPLAY_FINI]", - "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", - wl_egl_display, - wl_egl_display->thread, - wl_egl_display->wl_display); - - if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) { - tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); - wl_egl_display->tdm_source = NULL; - } - - if (wl_egl_display->disp_source) { - tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); - wl_egl_display->disp_source = NULL; - } - - if (wl_egl_display->thread) { - tpl_gthread_destroy(wl_egl_display->thread); - wl_egl_display->thread = NULL; - } - - tpl_gmutex_clear(&wl_egl_display->wl_event_mutex); - - free(wl_egl_display); - } - - display->backend.data = NULL; -} - -static tpl_result_t -__tpl_wl_egl_display_query_config(tpl_display_t *display, - tpl_surface_type_t surface_type, - int red_size, int green_size, - int blue_size, int alpha_size, - int color_depth, int *native_visual_id, - tpl_bool_t *is_slow) -{ - TPL_ASSERT(display); - - if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 && - green_size == 8 && blue_size == 8 && - (color_depth == 32 || color_depth == 24)) { - - if (alpha_size == 8) { - if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888; - if (is_slow) *is_slow = TPL_FALSE; - return TPL_ERROR_NONE; - } - if (alpha_size == 0) { - if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888; - if (is_slow) *is_slow = TPL_FALSE; - return TPL_ERROR_NONE; - } - } - - return TPL_ERROR_INVALID_PARAMETER; -} - -static tpl_result_t -__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id, - int alpha_size) -{ - TPL_IGNORE(display); - TPL_IGNORE(visual_id); - TPL_IGNORE(alpha_size); - return TPL_ERROR_NONE; -} - -static tpl_result_t -__tpl_wl_egl_display_get_window_info(tpl_display_t *display, - tpl_handle_t window, int *width, - int *height, tbm_format *format, - int depth, int a_size) -{ - tpl_result_t ret = TPL_ERROR_NONE; - struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window; - - TPL_ASSERT(display); - TPL_ASSERT(window); - - if (!wl_egl_window) { - TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window); - return TPL_ERROR_INVALID_PARAMETER; - } - - if (width) *width = wl_egl_window->width; - if (height) *height = wl_egl_window->height; - if (format) { - struct tizen_private *tizen_private = - (struct tizen_private *)wl_egl_window->driver_private; - if (tizen_private && tizen_private->data) { - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)tizen_private->data; - *format = wl_egl_surface->format; - } else { - if (a_size == 8) - *format = TBM_FORMAT_ARGB8888; - else - *format = TBM_FORMAT_XRGB8888; - } - } - - return ret; -} - -static tpl_result_t -__tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display, - tpl_handle_t pixmap, int *width, - int *height, tbm_format *format) -{ - tbm_surface_h tbm_surface = NULL; - - if (!pixmap) { - TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap); - return TPL_ERROR_INVALID_PARAMETER; - } - - tbm_surface = wayland_tbm_server_get_surface(NULL, - (struct wl_resource *)pixmap); - if (!tbm_surface) { - TPL_ERR("Failed to get tbm_surface from wayland_tbm."); - return TPL_ERROR_INVALID_PARAMETER; - } - - if (width) *width = tbm_surface_get_width(tbm_surface); - if (height) *height = tbm_surface_get_height(tbm_surface); - if (format) *format = tbm_surface_get_format(tbm_surface); - - return TPL_ERROR_NONE; -} - -static tbm_surface_h -__tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) -{ - tbm_surface_h tbm_surface = NULL; - - TPL_ASSERT(pixmap); - - tbm_surface = wayland_tbm_server_get_surface(NULL, - (struct wl_resource *)pixmap); - if (!tbm_surface) { - TPL_ERR("Failed to get tbm_surface_h from wayland_tbm."); - return NULL; - } - - return tbm_surface; -} - -tpl_bool_t -__tpl_display_choose_backend_wl_egl_thread2(tpl_handle_t native_dpy) -{ - struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy; - TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE); - - /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value - is a memory address pointing the structure of wl_display_interface. */ - if (wl_egl_native_dpy == &wl_display_interface) - return TPL_TRUE; - - if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name, - strlen(wl_display_interface.name)) == 0) { - return TPL_TRUE; - } - - return TPL_FALSE; -} - -/* -- BEGIN -- wl_egl_window callback functions */ -static void -__cb_destroy_callback(void *private) -{ - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - - if (!tizen_private) { - TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface"); - return; - } - - wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - if (wl_egl_surface) { - TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.", - wl_egl_surface->wl_egl_window); - TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface."); - - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - wl_egl_surface->wl_egl_window->destroy_window_callback = NULL; - wl_egl_surface->wl_egl_window->resize_callback = NULL; - wl_egl_surface->wl_egl_window->driver_private = NULL; - wl_egl_surface->wl_egl_window = NULL; - wl_egl_surface->wl_surface = NULL; - - tizen_private->set_window_serial_callback = NULL; - tizen_private->rotate_callback = NULL; - tizen_private->get_rotation_capability = NULL; - tizen_private->set_frontbuffer_callback = NULL; - tizen_private->create_commit_sync_fd = NULL; - tizen_private->create_presentation_sync_fd = NULL; - tizen_private->data = NULL; - - free(tizen_private); - tizen_private = NULL; - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - } -} - -static void -__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private) -{ - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); - - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - int cur_w, cur_h, req_w, req_h, format; - - if (!wl_egl_surface) { - TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", - wl_egl_window); - return; - } - - format = wl_egl_surface->format; - cur_w = wl_egl_surface->width; - cur_h = wl_egl_surface->height; - req_w = wl_egl_window->width; - req_h = wl_egl_window->height; - - TPL_INFO("[WINDOW_RESIZE]", - "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)", - wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h); - - if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format) - != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue); - return; - } -} -/* -- END -- wl_egl_window callback functions */ - -/* -- BEGIN -- wl_egl_window tizen private callback functions */ - -/* There is no usecase for using prerotation callback below */ -static void -__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private) -{ - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); - - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - int rotation = tizen_private->rotation; - - if (!wl_egl_surface) { - TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", - wl_egl_window); - return; - } - - TPL_INFO("[WINDOW_ROTATE]", - "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)", - wl_egl_surface, wl_egl_window, - wl_egl_surface->rotation, rotation); - - wl_egl_surface->rotation = rotation; -} - -/* There is no usecase for using prerotation callback below */ -static int -__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window, - void *private) -{ - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); - - int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE; - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - - if (!wl_egl_surface) { - TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", - wl_egl_window); - return rotation_capability; - } - - if (wl_egl_surface->prerotation_capability == TPL_TRUE) - rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED; - else - rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED; - - - return rotation_capability; -} - -static void -__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window, - void *private, unsigned int serial) -{ - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); - - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - - if (!wl_egl_surface) { - TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", - wl_egl_window); - return; - } - - wl_egl_surface->set_serial_is_used = TPL_TRUE; - wl_egl_surface->serial = serial; -} - -static int -__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) -{ - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); - - int commit_sync_fd = -1; - - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - - if (!wl_egl_surface) { - TPL_ERR("Invalid parameter. wl_egl_surface(%p) is NULL", wl_egl_surface); - return -1; - } - - tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); - - if (wl_egl_surface->commit_sync.fd != -1) { - commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); - TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)", - wl_egl_surface->commit_sync.fd, commit_sync_fd); - TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)", - wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd); - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); - return commit_sync_fd; - } - - wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC); - if (wl_egl_surface->commit_sync.fd == -1) { - TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)", wl_egl_surface); - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); - return -1; - } - - commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); - - TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)", - wl_egl_surface->commit_sync.fd, commit_sync_fd); - TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)", - wl_egl_surface, commit_sync_fd); - - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); - - return commit_sync_fd; -} - -static int -__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private) -{ - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); - - int presentation_sync_fd = -1; - - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - - if (!wl_egl_surface) { - TPL_ERR("Invalid parameter. wl_egl_surface is NULL"); - return -1; - } - - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - if (wl_egl_surface->presentation_sync.fd != -1) { - presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); - TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)", - wl_egl_surface->presentation_sync.fd, presentation_sync_fd); - TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", - wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - return presentation_sync_fd; - } - - wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC); - if (wl_egl_surface->presentation_sync.fd == -1) { - TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)", wl_egl_surface); - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - return -1; - } - - presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); - TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)", - wl_egl_surface->presentation_sync.fd, presentation_sync_fd); - TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", - wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); - - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - - return presentation_sync_fd; -} -/* -- END -- wl_egl_window tizen private callback functions */ - -/* -- BEGIN -- tizen_surface_shm_flusher_listener */ -static void __cb_tss_flusher_flush_callback(void *data, - struct tizen_surface_shm_flusher *tss_flusher) -{ - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - - TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->tbm_queue); - - tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue); - return; - } -} - -static void __cb_tss_flusher_free_flush_callback(void *data, - struct tizen_surface_shm_flusher *tss_flusher) -{ - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - - TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->tbm_queue); - - tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue); - return; - } -} - -static const struct tizen_surface_shm_flusher_listener -tss_flusher_listener = { - __cb_tss_flusher_flush_callback, - __cb_tss_flusher_free_flush_callback -}; -/* -- END -- tizen_surface_shm_flusher_listener */ - - -/* -- BEGIN -- tbm_surface_queue callback funstions */ -static void -__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, - void *data) -{ - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - tpl_wl_egl_display_t *wl_egl_display = NULL; - tpl_surface_t *surface = NULL; - tpl_bool_t is_activated = TPL_FALSE; - int width, height; - - wl_egl_surface = (tpl_wl_egl_surface_t *)data; - TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); - - wl_egl_display = wl_egl_surface->wl_egl_display; - TPL_CHECK_ON_NULL_RETURN(wl_egl_display); - - surface = wl_egl_surface->tpl_surface; - TPL_CHECK_ON_NULL_RETURN(surface); - - /* When the queue is resized, change the reset flag to TPL_TRUE to reflect - * the changed window size at the next frame. */ - width = tbm_surface_queue_get_width(tbm_queue); - height = tbm_surface_queue_get_height(tbm_queue); - if (surface->width != width || surface->height != height) { - TPL_INFO("[QUEUE_RESIZE]", - "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)", - wl_egl_surface, tbm_queue, - surface->width, surface->height, width, height); - } - - /* When queue_reset_callback is called, if is_activated is different from - * its previous state change the reset flag to TPL_TRUE to get a new buffer - * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ - is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client, - wl_egl_surface->tbm_queue); - if (wl_egl_surface->is_activated != is_activated) { - if (is_activated) { - TPL_INFO("[ACTIVATED]", - "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); - } else { - TPL_LOG_T("[DEACTIVATED]", - " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); - } - } - - wl_egl_surface->reset = TPL_TRUE; - - if (surface->reset_cb) - surface->reset_cb(surface->reset_data); -} - -static void -__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue, - void *data) -{ - TPL_IGNORE(tbm_queue); - - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; - TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); - - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - - tpl_gsource_send_message(wl_egl_surface->surf_source, 2); - - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); -} -/* -- END -- tbm_surface_queue callback funstions */ - -static void -_thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) -{ - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - - TPL_INFO("[SURFACE_FINI]", - "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", - wl_egl_surface, wl_egl_surface->wl_egl_window, - wl_egl_surface->wl_surface); - - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - - if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) { - while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) { - struct pst_feedback *pst_feedback = - (struct pst_feedback *)__tpl_list_pop_front( - wl_egl_surface->presentation_feedbacks, NULL); - if (pst_feedback) { - _write_to_eventfd(pst_feedback->pst_sync_fd); - close(pst_feedback->pst_sync_fd); - pst_feedback->pst_sync_fd = -1; - - wp_presentation_feedback_destroy(pst_feedback->presentation_feedback); - pst_feedback->presentation_feedback = NULL; - - free(pst_feedback); - } - } - - __tpl_list_free(wl_egl_surface->presentation_feedbacks, NULL); - wl_egl_surface->presentation_feedbacks = NULL; - } - - if (wl_egl_surface->presentation_sync.fd != -1) { - _write_to_eventfd(wl_egl_surface->presentation_sync.fd); - close(wl_egl_surface->presentation_sync.fd); - wl_egl_surface->presentation_sync.fd = -1; - } - - if (wl_egl_surface->vblank_waiting_buffers) { - __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL); - wl_egl_surface->vblank_waiting_buffers = NULL; - } - - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - - - { - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - int idx = 0; - tpl_bool_t need_to_release = TPL_FALSE; - tpl_bool_t need_to_cancel = TPL_FALSE; - - while (wl_egl_surface->buffer_cnt) { - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - wl_egl_buffer = wl_egl_surface->buffers[idx]; - if (wl_egl_buffer) { - TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)", - idx, wl_egl_buffer, - wl_egl_buffer->tbm_surface, - status_to_string[wl_egl_buffer->status]); - - wl_egl_surface->buffers[idx] = NULL; - wl_egl_surface->buffer_cnt--; - } else { - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); - idx++; - continue; - } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); - - tpl_gmutex_lock(&wl_egl_buffer->mutex); - - need_to_release = (wl_egl_buffer->status == ACQUIRED || - wl_egl_buffer->status == WAITING_SIGNALED || - wl_egl_buffer->status == WAITING_VBLANK || - wl_egl_buffer->status == COMMITTED); - - need_to_cancel = wl_egl_buffer->status == DEQUEUED; - - if (wl_egl_buffer->status == WAITING_SIGNALED) { - tpl_result_t wait_result = TPL_ERROR_NONE; - wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond, - &wl_egl_buffer->mutex, - 16); - if (wait_result == TPL_ERROR_TIME_OUT) - TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", - wl_egl_buffer); - } - - if (need_to_release) { - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, - wl_egl_buffer->tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", - wl_egl_buffer->tbm_surface, tsq_err); - } - - if (need_to_cancel) { - tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, - wl_egl_buffer->tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)", - wl_egl_buffer->tbm_surface, tsq_err); - } - - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - - if (need_to_release || need_to_cancel) - tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); - - idx++; - } - } - - if (wl_egl_surface->surface_sync) { - TPL_INFO("[SURFACE_SYNC_DESTROY]", - "wl_egl_surface(%p) surface_sync(%p)", - wl_egl_surface, wl_egl_surface->surface_sync); - zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync); - wl_egl_surface->surface_sync = NULL; - } - - if (wl_egl_surface->tss_flusher) { - TPL_INFO("[FLUSHER_DESTROY]", - "wl_egl_surface(%p) tss_flusher(%p)", - wl_egl_surface, wl_egl_surface->tss_flusher); - tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher); - wl_egl_surface->tss_flusher = NULL; - } - - if (wl_egl_surface->vblank) { - TPL_INFO("[VBLANK_DESTROY]", - "wl_egl_surface(%p) vblank(%p)", - wl_egl_surface, wl_egl_surface->vblank); - tdm_client_vblank_destroy(wl_egl_surface->vblank); - wl_egl_surface->vblank = NULL; - } - - if (wl_egl_surface->tbm_queue) { - TPL_INFO("[TBM_QUEUE_DESTROY]", - "wl_egl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->tbm_queue); - tbm_surface_queue_destroy(wl_egl_surface->tbm_queue); - wl_egl_surface->tbm_queue = NULL; - } - - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); -} - -static tpl_bool_t -__thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) -{ - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - - wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource); - - /* Initialize surface */ - if (message == 1) { - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - TPL_DEBUG("wl_egl_surface(%p) initialize message received!", - wl_egl_surface); - _thread_wl_egl_surface_init(wl_egl_surface); - tpl_gcond_signal(&wl_egl_surface->surf_cond); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - } else if (message == 2) { - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - TPL_DEBUG("wl_egl_surface(%p) acquirable message received!", - wl_egl_surface); - _thread_surface_queue_acquire(wl_egl_surface); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - } - - return TPL_TRUE; -} - -static void -__thread_func_surf_finalize(tpl_gsource *gsource) -{ - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - - wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource); - TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); - - _thread_wl_egl_surface_fini(wl_egl_surface); - - TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%p)", - gsource, wl_egl_surface); -} - -static tpl_gsource_functions surf_funcs = { - .prepare = NULL, - .check = NULL, - .dispatch = __thread_func_surf_dispatch, - .finalize = __thread_func_surf_finalize, -}; - -static tpl_result_t -__tpl_wl_egl_surface_init(tpl_surface_t *surface) -{ - tpl_wl_egl_display_t *wl_egl_display = NULL; - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - tpl_gsource *surf_source = NULL; - - struct wl_egl_window *wl_egl_window = - (struct wl_egl_window *)surface->native_handle; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); - TPL_ASSERT(surface->native_handle); - - wl_egl_display = - (tpl_wl_egl_display_t *)surface->display->backend.data; - if (!wl_egl_display) { - TPL_ERR("Invalid parameter. wl_egl_display(%p)", - wl_egl_display); - return TPL_ERROR_INVALID_PARAMETER; - } - - wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1, - sizeof(tpl_wl_egl_surface_t)); - if (!wl_egl_surface) { - TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t."); - return TPL_ERROR_OUT_OF_MEMORY; - } - - surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface, - -1, &surf_funcs, SOURCE_TYPE_NORMAL); - if (!surf_source) { - TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)", - wl_egl_surface); - goto surf_source_create_fail; - } - - surface->backend.data = (void *)wl_egl_surface; - surface->width = wl_egl_window->width; - surface->height = wl_egl_window->height; - surface->rotation = 0; - - wl_egl_surface->tpl_surface = surface; - wl_egl_surface->width = wl_egl_window->width; - wl_egl_surface->height = wl_egl_window->height; - wl_egl_surface->format = surface->format; - - wl_egl_surface->surf_source = surf_source; - wl_egl_surface->wl_egl_window = wl_egl_window; - wl_egl_surface->wl_surface = wl_egl_window->surface; - - wl_egl_surface->wl_egl_display = wl_egl_display; - - wl_egl_surface->reset = TPL_FALSE; - wl_egl_surface->is_activated = TPL_FALSE; - wl_egl_surface->need_to_enqueue = TPL_TRUE; - wl_egl_surface->prerotation_capability = TPL_FALSE; - wl_egl_surface->vblank_done = TPL_TRUE; - wl_egl_surface->use_render_done_fence = TPL_FALSE; - wl_egl_surface->set_serial_is_used = TPL_FALSE; - - wl_egl_surface->latest_transform = 0; - wl_egl_surface->render_done_cnt = 0; - wl_egl_surface->serial = 0; - - wl_egl_surface->vblank = NULL; - wl_egl_surface->tss_flusher = NULL; - wl_egl_surface->surface_sync = NULL; - - wl_egl_surface->post_interval = surface->post_interval; - - wl_egl_surface->commit_sync.fd = -1; - wl_egl_surface->presentation_sync.fd = -1; - - { - int i = 0; - for (i = 0; i < BUFFER_ARRAY_SIZE; i++) - wl_egl_surface->buffers[i] = NULL; - wl_egl_surface->buffer_cnt = 0; - } - - { - struct tizen_private *tizen_private = NULL; - - if (wl_egl_window->driver_private) - tizen_private = (struct tizen_private *)wl_egl_window->driver_private; - else { - tizen_private = tizen_private_create(); - wl_egl_window->driver_private = (void *)tizen_private; - } - - if (tizen_private) { - tizen_private->data = (void *)wl_egl_surface; - tizen_private->rotate_callback = (void *)__cb_rotate_callback; - tizen_private->get_rotation_capability = (void *) - __cb_get_rotation_capability; - tizen_private->set_window_serial_callback = (void *) - __cb_set_window_serial_callback; - tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd; - tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd; - - wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback; - wl_egl_window->resize_callback = (void *)__cb_resize_callback; - } - } - - tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex); - tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex); - - tpl_gmutex_init(&wl_egl_surface->buffers_mutex); - - tpl_gmutex_init(&wl_egl_surface->surf_mutex); - tpl_gcond_init(&wl_egl_surface->surf_cond); - - /* Initialize in thread */ - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - tpl_gsource_send_message(wl_egl_surface->surf_source, 1); - tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - - TPL_ASSERT(wl_egl_surface->tbm_queue); - - TPL_INFO("[SURFACE_INIT]", - "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)", - surface, wl_egl_surface, wl_egl_surface->surf_source); - - return TPL_ERROR_NONE; - -surf_source_create_fail: - free(wl_egl_surface); - surface->backend.data = NULL; - return TPL_ERROR_INVALID_OPERATION; -} - -static tbm_surface_queue_h -_thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface, - struct wayland_tbm_client *wl_tbm_client, - int num_buffers) -{ - tbm_surface_queue_h tbm_queue = NULL; - tbm_bufmgr bufmgr = NULL; - unsigned int capability; - - struct wl_surface *wl_surface = wl_egl_surface->wl_surface; - int width = wl_egl_surface->width; - int height = wl_egl_surface->height; - int format = wl_egl_surface->format; - - if (!wl_tbm_client || !wl_surface) { - TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)", - wl_tbm_client, wl_surface); - return NULL; - } - - bufmgr = tbm_bufmgr_init(-1); - capability = tbm_bufmgr_get_capability(bufmgr); - tbm_bufmgr_deinit(bufmgr); - - if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) { - tbm_queue = wayland_tbm_client_create_surface_queue_tiled( - wl_tbm_client, - wl_surface, - num_buffers, - width, - height, - format); - } else { - tbm_queue = wayland_tbm_client_create_surface_queue( - wl_tbm_client, - wl_surface, - num_buffers, - width, - height, - format); - } - - if (!tbm_queue) { - TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)", - wl_tbm_client); - return NULL; - } - - if (tbm_surface_queue_set_modes( - tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) != - TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)", - tbm_queue); - tbm_surface_queue_destroy(tbm_queue); - return NULL; - } - - if (tbm_surface_queue_add_reset_cb( - tbm_queue, - __cb_tbm_queue_reset_callback, - (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", - tbm_queue); - tbm_surface_queue_destroy(tbm_queue); - return NULL; - } - - if (tbm_surface_queue_add_acquirable_cb( - tbm_queue, - __cb_tbm_queue_acquirable_callback, - (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", - tbm_queue); - tbm_surface_queue_destroy(tbm_queue); - return NULL; - } - - return tbm_queue; -} - -static tdm_client_vblank* -_thread_create_tdm_client_vblank(tdm_client *tdm_client) -{ - tdm_client_vblank *vblank = NULL; - tdm_client_output *tdm_output = NULL; - tdm_error tdm_err = TDM_ERROR_NONE; - - if (!tdm_client) { - TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client); - return NULL; - } - - tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err); - if (!tdm_output || tdm_err != TDM_ERROR_NONE) { - TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err); - return NULL; - } - - vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err); - if (!vblank || tdm_err != TDM_ERROR_NONE) { - TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err); - return NULL; - } - - tdm_client_vblank_set_enable_fake(vblank, 1); - tdm_client_vblank_set_sync(vblank, 0); - - return vblank; -} - -static void -_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface) -{ - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - - wl_egl_surface->tbm_queue = _thread_create_tbm_queue( - wl_egl_surface, - wl_egl_display->wl_tbm_client, - CLIENT_QUEUE_SIZE); - if (!wl_egl_surface->tbm_queue) { - TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)", - wl_egl_surface, wl_egl_display->wl_tbm_client); - return; - } - - TPL_INFO("[QUEUE_CREATION]", - "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)", - wl_egl_surface, wl_egl_surface->wl_surface, - wl_egl_display->wl_tbm_client); - TPL_INFO("[QUEUE_CREATION]", - "tbm_queue(%p) size(%d x %d) X %d format(%d)", - wl_egl_surface->tbm_queue, - wl_egl_surface->width, - wl_egl_surface->height, - CLIENT_QUEUE_SIZE, - wl_egl_surface->format); - - wl_egl_surface->vblank = _thread_create_tdm_client_vblank( - wl_egl_display->tdm_client); - if (wl_egl_surface->vblank) { - TPL_INFO("[VBLANK_INIT]", - "wl_egl_surface(%p) tdm_client(%p) vblank(%p)", - wl_egl_surface, wl_egl_display->tdm_client, - wl_egl_surface->vblank); - } - - if (wl_egl_display->tss) { - wl_egl_surface->tss_flusher = - tizen_surface_shm_get_flusher(wl_egl_display->tss, - wl_egl_surface->wl_surface); - } - - if (wl_egl_surface->tss_flusher) { - tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher, - &tss_flusher_listener, - wl_egl_surface); - TPL_INFO("[FLUSHER_INIT]", - "wl_egl_surface(%p) tss_flusher(%p)", - wl_egl_surface, wl_egl_surface->tss_flusher); - } - - if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) { - wl_egl_surface->surface_sync = - zwp_linux_explicit_synchronization_v1_get_synchronization( - wl_egl_display->explicit_sync, wl_egl_surface->wl_surface); - if (wl_egl_surface->surface_sync) { - TPL_INFO("[EXPLICIT_SYNC_INIT]", - "wl_egl_surface(%p) surface_sync(%p)", - wl_egl_surface, wl_egl_surface->surface_sync); - } else { - TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)", - wl_egl_surface); - wl_egl_display->use_explicit_sync = TPL_FALSE; - } - } - - wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc(); - wl_egl_surface->presentation_feedbacks = __tpl_list_alloc(); -} - -static void -__tpl_wl_egl_surface_fini(tpl_surface_t *surface) -{ - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - tpl_wl_egl_display_t *wl_egl_display = NULL; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->display); - - TPL_CHECK_ON_FALSE_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW); - - wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data; - TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); - - wl_egl_display = wl_egl_surface->wl_egl_display; - TPL_CHECK_ON_NULL_RETURN(wl_egl_display); - - TPL_INFO("[SURFACE_FINI][BEGIN]", - "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", - wl_egl_surface, - wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue); - - if (wl_egl_surface->surf_source) - tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); - wl_egl_surface->surf_source = NULL; - - _print_buffer_lists(wl_egl_surface); - - if (wl_egl_surface->wl_egl_window) { - struct tizen_private *tizen_private = NULL; - struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; - TPL_INFO("[WL_EGL_WINDOW_FINI]", - "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", - wl_egl_surface, wl_egl_window, - wl_egl_surface->wl_surface); - tizen_private = (struct tizen_private *)wl_egl_window->driver_private; - if (tizen_private) { - tizen_private->set_window_serial_callback = NULL; - tizen_private->rotate_callback = NULL; - tizen_private->get_rotation_capability = NULL; - tizen_private->create_presentation_sync_fd = NULL; - tizen_private->create_commit_sync_fd = NULL; - tizen_private->set_frontbuffer_callback = NULL; - tizen_private->merge_sync_fds = NULL; - tizen_private->data = NULL; - free(tizen_private); - - wl_egl_window->driver_private = NULL; - } - - wl_egl_window->destroy_window_callback = NULL; - wl_egl_window->resize_callback = NULL; - - wl_egl_surface->wl_egl_window = NULL; - } - - wl_egl_surface->wl_surface = NULL; - wl_egl_surface->wl_egl_display = NULL; - wl_egl_surface->tpl_surface = NULL; - - tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); - tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex); - - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex); - - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - tpl_gmutex_clear(&wl_egl_surface->surf_mutex); - tpl_gcond_clear(&wl_egl_surface->surf_cond); - - TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface); - - free(wl_egl_surface); - surface->backend.data = NULL; -} - -static tpl_result_t -__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface, - tpl_bool_t set) -{ - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - - TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); - - wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - - TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); - - TPL_INFO("[SET_PREROTATION_CAPABILITY]", - "wl_egl_surface(%p) prerotation capability set to [%s]", - wl_egl_surface, (set ? "TRUE" : "FALSE")); - - wl_egl_surface->prerotation_capability = set; - return TPL_ERROR_NONE; -} - -static tpl_result_t -__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface, - int post_interval) -{ - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - - TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); - - wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - - TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); - - TPL_INFO("[SET_POST_INTERVAL]", - "wl_egl_surface(%p) post_interval(%d -> %d)", - wl_egl_surface, wl_egl_surface->post_interval, post_interval); - - wl_egl_surface->post_interval = post_interval; - - return TPL_ERROR_NONE; -} - -static tpl_bool_t -__tpl_wl_egl_surface_validate(tpl_surface_t *surface) -{ - tpl_bool_t retval = TPL_TRUE; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)surface->backend.data; - - retval = !(wl_egl_surface->reset); - - return retval; -} - -static void -__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) -{ - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)surface->backend.data; - - if (width) - *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); - if (height) - *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); -} - -#define CAN_DEQUEUE_TIMEOUT_MS 10000 - -tpl_result_t -_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) -{ - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - - _print_buffer_lists(wl_egl_surface); - - if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue)) - != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)", - wl_egl_surface->tbm_queue, tsq_err); - return TPL_ERROR_INVALID_OPERATION; - } - - { - int i; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - for (i = 0; i < BUFFER_ARRAY_SIZE; i++) { - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - wl_egl_buffer = wl_egl_surface->buffers[i]; - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); - if (wl_egl_buffer && wl_egl_buffer->status == COMMITTED) { - wl_egl_buffer->status = RELEASED; - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, - wl_egl_buffer->tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", - wl_egl_buffer->tbm_surface, tsq_err); - tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); - } - } - } - - TPL_INFO("[FORCE_FLUSH]", - "wl_egl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->tbm_queue); - - return TPL_ERROR_NONE; -} - -static void -_wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer, - tpl_wl_egl_surface_t *wl_egl_surface) -{ - struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; - struct tizen_private *tizen_private = - (struct tizen_private *)wl_egl_window->driver_private; - - TPL_ASSERT(tizen_private); - - wl_egl_buffer->draw_done = TPL_FALSE; - wl_egl_buffer->need_to_commit = TPL_TRUE; - - wl_egl_buffer->acquire_fence_fd = -1; - wl_egl_buffer->release_fence_fd = -1; - wl_egl_buffer->commit_sync_fd = -1; - wl_egl_buffer->presentation_sync_fd = -1; - - wl_egl_buffer->buffer_release = NULL; - - wl_egl_buffer->transform = tizen_private->transform; - - if (wl_egl_buffer->w_transform != tizen_private->window_transform) { - wl_egl_buffer->w_transform = tizen_private->window_transform; - wl_egl_buffer->w_rotated = TPL_TRUE; - } - - if (wl_egl_surface->set_serial_is_used) { - wl_egl_buffer->serial = wl_egl_surface->serial; - } else { - wl_egl_buffer->serial = ++tizen_private->serial; - } - - if (wl_egl_buffer->rects) { - free(wl_egl_buffer->rects); - wl_egl_buffer->rects = NULL; - wl_egl_buffer->num_rects = 0; - } -} - -static tpl_wl_egl_buffer_t * -_get_wl_egl_buffer(tbm_surface_h tbm_surface) -{ - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER, - (void **)&wl_egl_buffer); - return wl_egl_buffer; -} - -static tpl_wl_egl_buffer_t * -_wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, - tbm_surface_h tbm_surface) -{ - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; - - wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); - - if (!wl_egl_buffer) { - wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t)); - TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL); - - tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER, - (tbm_data_free)__cb_wl_egl_buffer_free); - tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER, - wl_egl_buffer); - - wl_egl_buffer->wl_buffer = NULL; - wl_egl_buffer->tbm_surface = tbm_surface; - wl_egl_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface); - wl_egl_buffer->wl_egl_surface = wl_egl_surface; - - wl_egl_buffer->status = RELEASED; - - wl_egl_buffer->dx = wl_egl_window->dx; - wl_egl_buffer->dy = wl_egl_window->dy; - wl_egl_buffer->width = tbm_surface_get_width(tbm_surface); - wl_egl_buffer->height = tbm_surface_get_height(tbm_surface); - - tpl_gmutex_init(&wl_egl_buffer->mutex); - tpl_gcond_init(&wl_egl_buffer->cond); - - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - { - int i; - for (i = 0; i < BUFFER_ARRAY_SIZE; i++) - if (wl_egl_surface->buffers[i] == NULL) break; - - wl_egl_surface->buffer_cnt++; - wl_egl_surface->buffers[i] = wl_egl_buffer; - wl_egl_buffer->idx = i; - } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); - - TPL_INFO("[WL_EGL_BUFFER_CREATE]", - "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)", - wl_egl_surface, wl_egl_buffer, tbm_surface, - wl_egl_buffer->bo_name); - } - - _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface); - - return wl_egl_buffer; -} - -static tbm_surface_h -__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, - int32_t *release_fence) -{ - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->display->backend.data); - TPL_OBJECT_CHECK_RETURN(surface, NULL); - - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)surface->backend.data; - tpl_wl_egl_display_t *wl_egl_display = - (tpl_wl_egl_display_t *)surface->display->backend.data; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tpl_bool_t is_activated = 0; - int bo_name = 0; - tbm_surface_h tbm_surface = NULL; - - TPL_OBJECT_UNLOCK(surface); - tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( - wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS); - TPL_OBJECT_LOCK(surface); - - /* After the can dequeue state, lock the wl_event_mutex to prevent other - * events from being processed in wayland_egl_thread - * during below dequeue procedure. */ - tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); - - if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { - TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", - wl_egl_surface->tbm_queue, surface); - if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) { - TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)", - wl_egl_surface->tbm_queue, surface); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - return NULL; - } else { - tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - } - } - - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)", - wl_egl_surface->tbm_queue, surface); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - return NULL; - } - - /* wayland client can check their states (ACTIVATED or DEACTIVATED) with - * below function [wayland_tbm_client_queue_check_activate()]. - * This function has to be called before tbm_surface_queue_dequeue() - * in order to know what state the buffer will be dequeued next. - * - * ACTIVATED state means non-composite mode. Client can get buffers which - can be displayed directly(without compositing). - * DEACTIVATED state means composite mode. Client's buffer will be displayed - by compositor(E20) with compositing. - */ - is_activated = wayland_tbm_client_queue_check_activate( - wl_egl_display->wl_tbm_client, - wl_egl_surface->tbm_queue); - - wl_egl_surface->is_activated = is_activated; - - surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); - surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); - wl_egl_surface->width = surface->width; - wl_egl_surface->height = surface->height; - - if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) { - /* If surface->frontbuffer is already set in frontbuffer mode, - * it will return that frontbuffer if it is still activated, - * otherwise dequeue the new buffer after initializing - * surface->frontbuffer to NULL. */ - if (is_activated && !wl_egl_surface->reset) { - bo_name = _get_tbm_surface_bo_name(surface->frontbuffer); - - TPL_LOG_T("WL_EGL", - "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", - surface->frontbuffer, bo_name); - TRACE_ASYNC_BEGIN((int)surface->frontbuffer, - "[DEQ]~[ENQ] BO_NAME:%d", - bo_name); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - return surface->frontbuffer; - } else { - surface->frontbuffer = NULL; - wl_egl_surface->need_to_enqueue = TPL_TRUE; - } - } else { - surface->frontbuffer = NULL; - } - - tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue, - &tbm_surface); - if (!tbm_surface) { - TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d", - wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - return NULL; - } - - tbm_surface_internal_ref(tbm_surface); - - wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface); - TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer."); - - tpl_gmutex_lock(&wl_egl_buffer->mutex); - wl_egl_buffer->status = DEQUEUED; - - /* If wl_egl_buffer->release_fence_fd is -1, - * the tbm_surface can be used immediately. - * If not, user(EGL) have to wait until signaled. */ - if (release_fence) { - if (wl_egl_surface->surface_sync) { - *release_fence = wl_egl_buffer->release_fence_fd; - TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)", - wl_egl_surface, wl_egl_buffer, *release_fence); - } else { - *release_fence = -1; - } - } - - if (surface->is_frontbuffer_mode && is_activated) - surface->frontbuffer = tbm_surface; - - wl_egl_surface->reset = TPL_FALSE; - - TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", - wl_egl_buffer->bo_name); - TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", - wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name, - release_fence ? *release_fence : -1); - - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - - return tbm_surface; -} - -static tpl_result_t -__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface) -{ - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)surface->backend.data; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); - return TPL_ERROR_INVALID_PARAMETER; - } - - wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); - if (wl_egl_buffer) { - tpl_gmutex_lock(&wl_egl_buffer->mutex); - wl_egl_buffer->status = RELEASED; - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - } - - tbm_surface_internal_unref(tbm_surface); - - tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to release tbm_surface(%p) surface(%p)", - tbm_surface, surface); - return TPL_ERROR_INVALID_OPERATION; - } - - TPL_INFO("[CANCEL_BUFFER]", "wl_egl_surface(%p) tbm_surface(%p) bo(%d)", - wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); - - return TPL_ERROR_NONE; -} - -static tpl_result_t -__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface, - int num_rects, const int *rects, int32_t acquire_fence) -{ - TPL_ASSERT(surface); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(tbm_surface); - TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); - - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *) surface->backend.data; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - int bo_name = -1; - - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", - tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - return TPL_ERROR_INVALID_PARAMETER; - } - - bo_name = _get_tbm_surface_bo_name(tbm_surface); - - TRACE_MARK("[ENQ] BO_NAME:%d", bo_name); - - TPL_LOG_T("WL_EGL", - "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)", - wl_egl_surface, tbm_surface, bo_name, acquire_fence); - - wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); - tpl_gmutex_lock(&wl_egl_buffer->mutex); - - /* If there are received region information, save it to wl_egl_buffer */ - if (num_rects && rects) { - if (wl_egl_buffer->rects != NULL) { - free(wl_egl_buffer->rects); - wl_egl_buffer->rects = NULL; - wl_egl_buffer->num_rects = 0; - } - - wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects)); - wl_egl_buffer->num_rects = num_rects; - - if (!wl_egl_buffer->rects) { - TPL_ERR("Failed to allocate memory fo damage rects info."); - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - return TPL_ERROR_OUT_OF_MEMORY; - } - - memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects); - } - - if (!wl_egl_surface->need_to_enqueue || - !wl_egl_buffer->need_to_commit) { - TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", - ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - return TPL_ERROR_NONE; - } - - /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and - * commit if surface->frontbuffer that is already set and the tbm_surface - * client want to enqueue are the same. - */ - if (surface->is_frontbuffer_mode) { - /* The first buffer to be activated in frontbuffer mode must be - * committed. Subsequence frames do not need to be committed because - * the buffer is already displayed. - */ - if (surface->frontbuffer == tbm_surface) - wl_egl_surface->need_to_enqueue = TPL_FALSE; - - if (acquire_fence != -1) { - close(acquire_fence); - acquire_fence = -1; - } - } - - if (wl_egl_buffer->acquire_fence_fd != -1) - close(wl_egl_buffer->acquire_fence_fd); - - wl_egl_buffer->acquire_fence_fd = acquire_fence; - wl_egl_buffer->status = ENQUEUED; - - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - if (wl_egl_surface->presentation_sync.fd != -1) { - wl_egl_buffer->presentation_sync_fd = wl_egl_surface->presentation_sync.fd; - wl_egl_surface->presentation_sync.fd = -1; - } - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - - tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); - if (wl_egl_surface->commit_sync.fd != -1) { - wl_egl_buffer->commit_sync_fd = wl_egl_surface->commit_sync.fd; - wl_egl_surface->commit_sync.fd = -1; - TRACE_ASYNC_BEGIN(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - } - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); - - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - - tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - tbm_surface_internal_unref(tbm_surface); - TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d", - tbm_surface, wl_egl_surface, tsq_err); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - return TPL_ERROR_INVALID_OPERATION; - } - - tbm_surface_internal_unref(tbm_surface); - - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - - return TPL_ERROR_NONE; -} - -static tpl_bool_t -__thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message) -{ - tpl_wl_egl_buffer_t *wl_egl_buffer = - (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource); - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface; - - wl_egl_surface->render_done_cnt++; - - TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)", - wl_egl_buffer->acquire_fence_fd); - - TPL_DEBUG("[RENDER DONE] wl_egl_buffer(%p) tbm_surface(%p)", - wl_egl_buffer, tbm_surface); - - tpl_gmutex_lock(&wl_egl_buffer->mutex); - tpl_gcond_signal(&wl_egl_buffer->cond); - wl_egl_buffer->status = WAITING_VBLANK; - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - - if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done) - _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer); - else - __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, - wl_egl_buffer); - - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - - return TPL_FALSE; -} - -static void -__thread_func_waiting_source_finalize(tpl_gsource *gsource) -{ - tpl_wl_egl_buffer_t *wl_egl_buffer = - (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource); - - TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)", - wl_egl_buffer, wl_egl_buffer->waiting_source, - wl_egl_buffer->acquire_fence_fd); - - close(wl_egl_buffer->acquire_fence_fd); - wl_egl_buffer->acquire_fence_fd = -1; - wl_egl_buffer->waiting_source = NULL; -} - -static tpl_gsource_functions buffer_funcs = { - .prepare = NULL, - .check = NULL, - .dispatch = __thread_func_waiting_source_dispatch, - .finalize = __thread_func_waiting_source_finalize, -}; - -static tpl_result_t -_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) -{ - tbm_surface_h tbm_surface = NULL; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - tpl_bool_t ready_to_commit = TPL_FALSE; - - while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) { - tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue, - &tbm_surface); - if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to acquire from tbm_queue(%p)", - wl_egl_surface->tbm_queue); - return TPL_ERROR_INVALID_OPERATION; - } - - tbm_surface_internal_ref(tbm_surface); - - wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); - TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, - "wl_egl_buffer sould be not NULL"); - - tpl_gmutex_lock(&wl_egl_buffer->mutex); - - wl_egl_buffer->status = ACQUIRED; - - if (wl_egl_buffer->wl_buffer == NULL) { - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - wl_egl_buffer->wl_buffer = - (struct wl_proxy *)wayland_tbm_client_create_buffer( - wl_egl_display->wl_tbm_client, tbm_surface); - - if (!wl_egl_buffer->wl_buffer) { - TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)", - wl_egl_display->wl_tbm_client, tbm_surface); - } - } - - if (wl_egl_buffer->acquire_fence_fd != -1) { - if (wl_egl_surface->surface_sync) - ready_to_commit = TPL_TRUE; - else { - if (wl_egl_buffer->waiting_source) { - tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); - wl_egl_buffer->waiting_source = NULL; - } - - wl_egl_buffer->waiting_source = - tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer, - wl_egl_buffer->acquire_fence_fd, &buffer_funcs, - SOURCE_TYPE_DISPOSABLE); - wl_egl_buffer->status = WAITING_SIGNALED; - - TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)", - wl_egl_buffer->acquire_fence_fd); - - ready_to_commit = TPL_FALSE; - } - } - - if (ready_to_commit) { - if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done) - ready_to_commit = TPL_TRUE; - else { - wl_egl_buffer->status = WAITING_VBLANK; - __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, wl_egl_buffer); - ready_to_commit = TPL_FALSE; - } - } - - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - - if (ready_to_commit) - _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer); - } - - return TPL_ERROR_NONE; -} - -/* -- BEGIN -- tdm_client vblank callback function */ -static void -__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, - unsigned int sequence, unsigned int tv_sec, - unsigned int tv_usec, void *user_data) -{ - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - - TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK"); - TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface); - - if (error == TDM_ERROR_TIMEOUT) - TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)", - wl_egl_surface); - - wl_egl_surface->vblank_done = TPL_TRUE; - - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front( - wl_egl_surface->vblank_waiting_buffers, - NULL); - if (wl_egl_buffer) - _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); -} -/* -- END -- tdm_client vblank callback function */ - -static void -__cb_buffer_fenced_release(void *data, - struct zwp_linux_buffer_release_v1 *release, int32_t fence) -{ - tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; - tbm_surface_h tbm_surface = NULL; - - TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer); - - tbm_surface = wl_egl_buffer->tbm_surface; - - if (tbm_surface_internal_is_valid(tbm_surface)) { - if (wl_egl_buffer->status == COMMITTED) { - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tbm_surface_queue_error_e tsq_err; - - tpl_gmutex_lock(&wl_egl_buffer->mutex); - - zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); - wl_egl_buffer->buffer_release = NULL; - - wl_egl_buffer->release_fence_fd = fence; - wl_egl_buffer->status = RELEASED; - - TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", - _get_tbm_surface_bo_name(tbm_surface), - fence); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - - TPL_LOG_T("WL_EGL", - "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", - wl_egl_buffer->wl_buffer, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface), - fence); - - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); - - tbm_surface_internal_unref(tbm_surface); - } - } else { - TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); - } -} - -static void -__cb_buffer_immediate_release(void *data, - struct zwp_linux_buffer_release_v1 *release) -{ - tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; - tbm_surface_h tbm_surface = NULL; - - TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer); - - tbm_surface = wl_egl_buffer->tbm_surface; - - if (tbm_surface_internal_is_valid(tbm_surface)) { - if (wl_egl_buffer->status == COMMITTED) { - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tbm_surface_queue_error_e tsq_err; - - tpl_gmutex_lock(&wl_egl_buffer->mutex); - - zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); - wl_egl_buffer->buffer_release = NULL; - - wl_egl_buffer->release_fence_fd = -1; - wl_egl_buffer->status = RELEASED; - - TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - - TPL_LOG_T("WL_EGL", - "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)", - wl_egl_buffer->wl_buffer, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); - - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); - - tbm_surface_internal_unref(tbm_surface); - } - } else { - TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); - } -} - -static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = { - __cb_buffer_fenced_release, - __cb_buffer_immediate_release, -}; - -static void -__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) -{ - tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; - tbm_surface_h tbm_surface = NULL; - - TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer) - - tbm_surface = wl_egl_buffer->tbm_surface; - - if (tbm_surface_internal_is_valid(tbm_surface)) { - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE; - - tpl_gmutex_lock(&wl_egl_buffer->mutex); - - if (wl_egl_buffer->status == COMMITTED) { - - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); - - wl_egl_buffer->status = RELEASED; - - TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - - TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", - wl_egl_buffer->wl_buffer, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); - } - - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - - if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) - tbm_surface_internal_unref(tbm_surface); - } else { - TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); - } -} - -static const struct wl_buffer_listener wl_buffer_release_listener = { - (void *)__cb_wl_buffer_release, -}; - -static void -__cb_presentation_feedback_sync_output(void *data, - struct wp_presentation_feedback *presentation_feedback, - struct wl_output *output) -{ - TPL_IGNORE(data); - TPL_IGNORE(presentation_feedback); - TPL_IGNORE(output); - /* Nothing to do */ -} - -static void -__cb_presentation_feedback_presented(void *data, - struct wp_presentation_feedback *presentation_feedback, - uint32_t tv_sec_hi, - uint32_t tv_sec_lo, - uint32_t tv_nsec, - uint32_t refresh_nsec, - uint32_t seq_hi, - uint32_t seq_lo, - uint32_t flags) -{ - TPL_IGNORE(tv_sec_hi); - TPL_IGNORE(tv_sec_lo); - TPL_IGNORE(tv_nsec); - TPL_IGNORE(refresh_nsec); - TPL_IGNORE(seq_hi); - TPL_IGNORE(seq_lo); - TPL_IGNORE(flags); - - struct pst_feedback *pst_feedback = (struct pst_feedback *)data; - tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface; - - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - - TPL_DEBUG("[FEEDBACK][PRESENTED] pst_feedback(%p) presentation_feedback(%p) bo(%d)", - pst_feedback, presentation_feedback, pst_feedback->bo_name); - - if (pst_feedback->pst_sync_fd != -1) { - int ret = _write_to_eventfd(pst_feedback->pst_sync_fd); - if (ret == -1) { - TPL_ERR("Failed to send presentation_sync signal to fd(%d)", - pst_feedback->pst_sync_fd); - } - - TRACE_ASYNC_END(pst_feedback->pst_sync_fd, - "[PRESENTATION_SYNC] bo(%d)", - pst_feedback->bo_name); - - close(pst_feedback->pst_sync_fd); - pst_feedback->pst_sync_fd = -1; - } - - wp_presentation_feedback_destroy(presentation_feedback); - - pst_feedback->presentation_feedback = NULL; - pst_feedback->wl_egl_surface = NULL; - pst_feedback->bo_name = 0; - - __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback, - TPL_FIRST, NULL); - - free(pst_feedback); - - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); -} - -static void -__cb_presentation_feedback_discarded(void *data, - struct wp_presentation_feedback *presentation_feedback) -{ - struct pst_feedback *pst_feedback = (struct pst_feedback *)data; - tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface; - - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - - TPL_DEBUG("[FEEDBACK][DISCARDED] pst_feedback(%p) presentation_feedback(%p) bo(%d)", - pst_feedback, presentation_feedback, pst_feedback->bo_name); - - if (pst_feedback->pst_sync_fd != -1) { - int ret = _write_to_eventfd(pst_feedback->pst_sync_fd); - if (ret == -1) { - TPL_ERR("Failed to send presentation_sync signal to fd(%d)", - pst_feedback->pst_sync_fd); - } - - TRACE_ASYNC_END(pst_feedback->pst_sync_fd, - "[PRESENTATION_SYNC] bo(%d)", - pst_feedback->bo_name); - - close(pst_feedback->pst_sync_fd); - pst_feedback->pst_sync_fd = -1; - } - - wp_presentation_feedback_destroy(presentation_feedback); - - pst_feedback->presentation_feedback = NULL; - pst_feedback->wl_egl_surface = NULL; - pst_feedback->bo_name = 0; - - __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback, - TPL_FIRST, NULL); - - free(pst_feedback); - - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); -} - -static const struct wp_presentation_feedback_listener feedback_listener = { - __cb_presentation_feedback_sync_output, /* sync_output feedback -*/ - __cb_presentation_feedback_presented, - __cb_presentation_feedback_discarded -}; - -static tpl_result_t -_thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface) -{ - tdm_error tdm_err = TDM_ERROR_NONE; - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - - if (wl_egl_surface->vblank == NULL) { - wl_egl_surface->vblank = - _thread_create_tdm_client_vblank(wl_egl_display->tdm_client); - if (!wl_egl_surface->vblank) { - TPL_WARN("Failed to create vblank. wl_egl_surface(%p)", - wl_egl_surface); - return TPL_ERROR_OUT_OF_MEMORY; - } - } - - tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank, - wl_egl_surface->post_interval, - __cb_tdm_client_vblank, - (void *)wl_egl_surface); - - if (tdm_err == TDM_ERROR_NONE) { - wl_egl_surface->vblank_done = TPL_FALSE; - TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK"); - } else { - TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); - return TPL_ERROR_INVALID_OPERATION; - } - - return TPL_ERROR_NONE; -} - -static void -_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, - tpl_wl_egl_buffer_t *wl_egl_buffer) -{ - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - struct wl_surface *wl_surface = wl_egl_surface->wl_surface; - struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; - uint32_t version; - - TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, - "wl_egl_buffer sould be not NULL"); - - if (wl_egl_buffer->wl_buffer == NULL) { - wl_egl_buffer->wl_buffer = - (struct wl_proxy *)wayland_tbm_client_create_buffer( - wl_egl_display->wl_tbm_client, - wl_egl_buffer->tbm_surface); - } - TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL, - "[FATAL] Failed to create wl_buffer"); - - wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer, - &wl_buffer_release_listener, wl_egl_buffer); - - version = wl_proxy_get_version((struct wl_proxy *)wl_surface); - - /* create presentation feedback and add listener */ - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) { - - struct pst_feedback *pst_feedback = NULL; - pst_feedback = (struct pst_feedback *) calloc(1, sizeof(struct pst_feedback)); - if (pst_feedback) { - pst_feedback->presentation_feedback = - wp_presentation_feedback(wl_egl_display->presentation, - wl_surface); - - pst_feedback->wl_egl_surface = wl_egl_surface; - pst_feedback->bo_name = wl_egl_buffer->bo_name; - - pst_feedback->pst_sync_fd = wl_egl_buffer->presentation_sync_fd; - wl_egl_buffer->presentation_sync_fd = -1; - - wp_presentation_feedback_add_listener(pst_feedback->presentation_feedback, - &feedback_listener, pst_feedback); - __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, pst_feedback); - TRACE_ASYNC_BEGIN(pst_feedback->pst_sync_fd, - "[PRESENTATION_SYNC] bo(%d)", - pst_feedback->bo_name); - } else { - TPL_ERR("Failed to create presentation feedback. wl_egl_buffer(%p)", - wl_egl_buffer); - _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); - close(wl_egl_buffer->presentation_sync_fd); - wl_egl_buffer->presentation_sync_fd = -1; - } - } - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - - if (wl_egl_buffer->w_rotated == TPL_TRUE) { - wayland_tbm_client_set_buffer_transform( - wl_egl_display->wl_tbm_client, - (void *)wl_egl_buffer->wl_buffer, - wl_egl_buffer->w_transform); - wl_egl_buffer->w_rotated = TPL_FALSE; - } - - if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) { - wl_egl_surface->latest_transform = wl_egl_buffer->transform; - wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform); - } - - if (wl_egl_window) { - wl_egl_window->attached_width = wl_egl_buffer->width; - wl_egl_window->attached_height = wl_egl_buffer->height; - } - - wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer, - wl_egl_buffer->dx, wl_egl_buffer->dy); - - if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) { - if (version < 4) { - wl_surface_damage(wl_surface, - wl_egl_buffer->dx, wl_egl_buffer->dy, - wl_egl_buffer->width, wl_egl_buffer->height); - } else { - wl_surface_damage_buffer(wl_surface, - 0, 0, - wl_egl_buffer->width, wl_egl_buffer->height); - } - } else { - int i; - for (i = 0; i < wl_egl_buffer->num_rects; i++) { - int inverted_y = - wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] + - wl_egl_buffer->rects[i * 4 + 3]); - if (version < 4) { - wl_surface_damage(wl_surface, - wl_egl_buffer->rects[i * 4 + 0], - inverted_y, - wl_egl_buffer->rects[i * 4 + 2], - wl_egl_buffer->rects[i * 4 + 3]); - } else { - wl_surface_damage_buffer(wl_surface, - wl_egl_buffer->rects[i * 4 + 0], - inverted_y, - wl_egl_buffer->rects[i * 4 + 2], - wl_egl_buffer->rects[i * 4 + 3]); - } - } - } - - wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client, - (void *)wl_egl_buffer->wl_buffer, - wl_egl_buffer->serial); - - if (wl_egl_display->use_explicit_sync && - wl_egl_surface->surface_sync) { - - zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync, - wl_egl_buffer->acquire_fence_fd); - TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)", - wl_egl_surface, wl_egl_buffer->tbm_surface, wl_egl_buffer->acquire_fence_fd); - close(wl_egl_buffer->acquire_fence_fd); - wl_egl_buffer->acquire_fence_fd = -1; - - wl_egl_buffer->buffer_release = - zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync); - if (!wl_egl_buffer->buffer_release) { - TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface); - } else { - zwp_linux_buffer_release_v1_add_listener( - wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer); - TPL_DEBUG("add explicit_sync_release_listener."); - } - } - - wl_surface_commit(wl_surface); - - wl_display_flush(wl_egl_display->wl_display); - - TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - wl_egl_buffer->bo_name); - - wl_egl_buffer->need_to_commit = TPL_FALSE; - wl_egl_buffer->status = COMMITTED; - - TPL_LOG_T("WL_EGL", - "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)", - wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface, - wl_egl_buffer->bo_name); - - if (wl_egl_display->use_wait_vblank && - _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE) - TPL_ERR("Failed to set wait vblank."); - - tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); - - if (wl_egl_buffer->commit_sync_fd != -1) { - int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); - if (ret == -1) { - TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd); - } - - TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)", - wl_egl_buffer->bo_name); - TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)", - wl_egl_surface, wl_egl_buffer->commit_sync_fd); - - close(wl_egl_buffer->commit_sync_fd); - wl_egl_buffer->commit_sync_fd = -1; - } - - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); -} - -static int -_write_to_eventfd(int eventfd) -{ - uint64_t value = 1; - int ret; - - if (eventfd == -1) { - TPL_ERR("Invalid fd(-1)"); - return -1; - } - - ret = write(eventfd, &value, sizeof(uint64_t)); - if (ret == -1) { - TPL_ERR("failed to write to fd(%d)", eventfd); - return ret; - } - - return ret; -} - -void -__tpl_display_init_backend_wl_egl_thread2(tpl_display_backend_t *backend) -{ - TPL_ASSERT(backend); - - backend->type = TPL_BACKEND_WAYLAND_THREAD; - backend->data = NULL; - - backend->init = __tpl_wl_egl_display_init; - backend->fini = __tpl_wl_egl_display_fini; - backend->query_config = __tpl_wl_egl_display_query_config; - backend->filter_config = __tpl_wl_egl_display_filter_config; - backend->get_window_info = __tpl_wl_egl_display_get_window_info; - backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info; - backend->get_buffer_from_native_pixmap = - __tpl_wl_egl_display_get_buffer_from_native_pixmap; -} - -void -__tpl_surface_init_backend_wl_egl_thread2(tpl_surface_backend_t *backend) -{ - TPL_ASSERT(backend); - - backend->type = TPL_BACKEND_WAYLAND_THREAD; - backend->data = NULL; - - backend->init = __tpl_wl_egl_surface_init; - backend->fini = __tpl_wl_egl_surface_fini; - backend->validate = __tpl_wl_egl_surface_validate; - backend->cancel_dequeued_buffer = - __tpl_wl_egl_surface_cancel_dequeued_buffer; - backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer; - backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer; - backend->set_rotation_capability = - __tpl_wl_egl_surface_set_rotation_capability; - backend->set_post_interval = - __tpl_wl_egl_surface_set_post_interval; - backend->get_size = - __tpl_wl_egl_surface_get_size; -} - -static void -__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) -{ - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - - TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", - wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); - - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) { - wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL; - wl_egl_surface->buffer_cnt--; - - wl_egl_buffer->idx = -1; - } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); - - wl_display_flush(wl_egl_display->wl_display); - - if (wl_egl_buffer->wl_buffer) - wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, - (void *)wl_egl_buffer->wl_buffer); - - if (wl_egl_buffer->commit_sync_fd != -1) { - int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); - if (ret == -1) - TPL_ERR("Failed to send commit_sync signal to fd(%d)", - wl_egl_buffer->commit_sync_fd); - close(wl_egl_buffer->commit_sync_fd); - wl_egl_buffer->commit_sync_fd = -1; - } - - if (wl_egl_buffer->presentation_sync_fd != -1) { - int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); - if (ret == -1) - TPL_ERR("Failed to send presentation_sync signal to fd(%d)", - wl_egl_buffer->presentation_sync_fd); - close(wl_egl_buffer->presentation_sync_fd); - wl_egl_buffer->presentation_sync_fd = -1; - } - - if (wl_egl_buffer->rects) { - free(wl_egl_buffer->rects); - wl_egl_buffer->rects = NULL; - wl_egl_buffer->num_rects = 0; - } - - wl_egl_buffer->tbm_surface = NULL; - wl_egl_buffer->bo_name = -1; - - free(wl_egl_buffer); -} - -static int -_get_tbm_surface_bo_name(tbm_surface_h tbm_surface) -{ - return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); -} - -static void -_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) -{ - int idx = 0; - - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)", - wl_egl_surface, wl_egl_surface->buffer_cnt); - for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) { - tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx]; - if (wl_egl_buffer) { - TPL_INFO("[INFO]", - "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", - idx, wl_egl_buffer, wl_egl_buffer->tbm_surface, - wl_egl_buffer->bo_name, - status_to_string[wl_egl_buffer->status]); - } - } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); -} diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index feb420e..1cd44b8 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1,828 +1,3159 @@ -#define inline __inline__ - -#undef inline #include "tpl_internal.h" #include #include #include +#include #include #include #include #include -#include "tpl_wayland_egl_thread.h" +#include +#include +#include +#include + +#include + +#include "wayland-egl-tizen/wayland-egl-tizen.h" +#include "wayland-egl-tizen/wayland-egl-tizen-priv.h" + +#include +#include +#include + +#include "tpl_utils_gthread.h" + +static int wl_egl_buffer_key; +#define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key) /* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */ #define CLIENT_QUEUE_SIZE 3 +#define BUFFER_ARRAY_SIZE (CLIENT_QUEUE_SIZE * 2) + +typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t; +typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t; +typedef struct _tpl_wl_egl_buffer tpl_wl_egl_buffer_t; + +struct _tpl_wl_egl_display { + tpl_gsource *disp_source; + tpl_gthread *thread; + tpl_gmutex wl_event_mutex; -typedef struct _tpl_wayland_egl_display tpl_wayland_egl_display_t; -typedef struct _tpl_wayland_egl_surface tpl_wayland_egl_surface_t; + struct wl_display *wl_display; + struct wl_event_queue *ev_queue; + struct wayland_tbm_client *wl_tbm_client; + int last_error; /* errno of the last wl_display error*/ -struct _tpl_wayland_egl_display { - twe_thread *wl_egl_thread; - twe_display_h twe_display; + tpl_bool_t wl_initialized; + tpl_bool_t tdm_initialized; + + tdm_client *tdm_client; + tpl_gsource *tdm_source; + int tdm_display_fd; + + tpl_bool_t use_wait_vblank; + tpl_bool_t use_explicit_sync; + tpl_bool_t prepared; + + struct tizen_surface_shm *tss; /* used for surface buffer_flush */ + struct wp_presentation *presentation; /* for presentation feedback */ + struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */ }; -struct _tpl_wayland_egl_surface { - tpl_object_t base; - twe_surface_h twe_surface; - tbm_surface_queue_h tbm_queue; - tpl_bool_t is_activated; - tpl_bool_t reset; /* TRUE if queue reseted by external */ - tpl_bool_t need_to_enqueue; +struct _tpl_wl_egl_surface { + tpl_gsource *surf_source; + + tbm_surface_queue_h tbm_queue; + + struct wl_egl_window *wl_egl_window; + struct wl_surface *wl_surface; + struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ + struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */ + + tdm_client_vblank *vblank; + + /* surface information */ + int render_done_cnt; + unsigned int serial; + + int width; + int height; + int format; + int latest_transform; + int rotation; + int post_interval; + + tpl_wl_egl_display_t *wl_egl_display; + tpl_surface_t *tpl_surface; + + /* wl_egl_buffer array for buffer tracing */ + tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE]; + int buffer_cnt; /* the number of using wl_egl_buffers */ + tpl_gmutex buffers_mutex; + + tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */ + tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */ + + struct { + tpl_gmutex mutex; + int fd; + } commit_sync; + + struct { + tpl_gmutex mutex; + int fd; + } presentation_sync; + + tpl_gmutex surf_mutex; + tpl_gcond surf_cond; + + /* for waiting draw done */ + tpl_bool_t use_render_done_fence; + tpl_bool_t is_activated; + tpl_bool_t reset; /* TRUE if queue reseted by external */ + tpl_bool_t need_to_enqueue; + tpl_bool_t prerotation_capability; + tpl_bool_t vblank_done; + tpl_bool_t set_serial_is_used; }; -static tpl_result_t -__tpl_wl_egl_display_init(tpl_display_t *display) -{ - tpl_wayland_egl_display_t *wayland_egl_display = NULL; +typedef enum buffer_status { + RELEASED = 0, // 0 + DEQUEUED, // 1 + ENQUEUED, // 2 + ACQUIRED, // 3 + WAITING_SIGNALED, // 4 + WAITING_VBLANK, // 5 + COMMITTED, // 6 +} buffer_status_t; + +static const char *status_to_string[7] = { + "RELEASED", // 0 + "DEQUEUED", // 1 + "ENQUEUED", // 2 + "ACQUIRED", // 3 + "WAITING_SIGNALED", // 4 + "WAITING_VBLANK", // 5 + "COMMITTED", // 6 +}; - TPL_ASSERT(display); +struct _tpl_wl_egl_buffer { + tbm_surface_h tbm_surface; + int bo_name; - /* Do not allow default display in wayland. */ - if (!display->native_handle) { - TPL_ERR("Invalid native handle for display."); - return TPL_ERROR_INVALID_PARAMETER; - } + struct wl_proxy *wl_buffer; + int dx, dy; /* position to attach to wl_surface */ + int width, height; /* size to attach to wl_surface */ - wayland_egl_display = (tpl_wayland_egl_display_t *) calloc(1, - sizeof(tpl_wayland_egl_display_t)); - if (!wayland_egl_display) { - TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_display_t."); - return TPL_ERROR_OUT_OF_MEMORY; - } + buffer_status_t status; /* for tracing buffer status */ + int idx; /* position index in buffers array of wl_egl_surface */ - display->backend.data = wayland_egl_display; - display->bufmgr_fd = -1; + /* for damage region */ + int num_rects; + int *rects; - if (twe_check_native_handle_is_wl_display(display->native_handle)) { - wayland_egl_display->wl_egl_thread = twe_thread_create(); - if (!wayland_egl_display->wl_egl_thread) { - TPL_ERR("Failed to create twe_thread."); - goto free_display; - } + /* for wayland_tbm_client_set_buffer_transform */ + int w_transform; + tpl_bool_t w_rotated; - wayland_egl_display->twe_display = - twe_display_add(wayland_egl_display->wl_egl_thread, - display->native_handle, - display->backend.type); - if (!wayland_egl_display->twe_display) { - TPL_ERR("Failed to add native_display(%p) to thread(%p)", - display->native_handle, - wayland_egl_display->wl_egl_thread); - goto free_display; - } + /* for wl_surface_set_buffer_transform */ + int transform; - } else { - TPL_ERR("Invalid native handle for display."); - goto free_display; - } + /* for wayland_tbm_client_set_buffer_serial */ + unsigned int serial; - TPL_LOG_T("WL_EGL", - "[INIT DISPLAY] wayland_egl_display(%p) twe_thread(%p) twe_display(%p)", - wayland_egl_display, - wayland_egl_display->wl_egl_thread, - wayland_egl_display->twe_display); + /* for checking need_to_commit (frontbuffer mode) */ + tpl_bool_t need_to_commit; - return TPL_ERROR_NONE; + /* for checking draw done */ + tpl_bool_t draw_done; -free_display: - if (wayland_egl_display->twe_display) - twe_display_del(wayland_egl_display->twe_display); - if (wayland_egl_display->wl_egl_thread) - twe_thread_destroy(wayland_egl_display->wl_egl_thread); - wayland_egl_display->wl_egl_thread = NULL; - wayland_egl_display->twe_display = NULL; - - free(wayland_egl_display); - display->backend.data = NULL; - return TPL_ERROR_INVALID_OPERATION; -} -static void -__tpl_wl_egl_display_fini(tpl_display_t *display) -{ - tpl_wayland_egl_display_t *wayland_egl_display; + /* to get release event via zwp_linux_buffer_release_v1 */ + struct zwp_linux_buffer_release_v1 *buffer_release; - TPL_ASSERT(display); + /* each buffers own its release_fence_fd, until it passes ownership + * to it to EGL */ + int32_t release_fence_fd; - wayland_egl_display = (tpl_wayland_egl_display_t *)display->backend.data; - if (wayland_egl_display) { + /* each buffers own its acquire_fence_fd. + * If it use zwp_linux_buffer_release_v1 the ownership of this fd + * will be passed to display server + * Otherwise it will be used as a fence waiting for render done + * on tpl thread */ + int32_t acquire_fence_fd; - TPL_LOG_T("WL_EGL", - "[FINI] wayland_egl_display(%p) twe_thread(%p) twe_display(%p)", - wayland_egl_display, - wayland_egl_display->wl_egl_thread, - wayland_egl_display->twe_display); + /* Fd to send a signal when wl_surface_commit with this buffer */ + int32_t commit_sync_fd; - if (wayland_egl_display->twe_display) { - tpl_result_t ret = TPL_ERROR_NONE; - ret = twe_display_del(wayland_egl_display->twe_display); - if (ret != TPL_ERROR_NONE) - TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)", - wayland_egl_display->twe_display, - wayland_egl_display->wl_egl_thread); - wayland_egl_display->twe_display = NULL; - } + /* Fd to send a siganl when receive the + * presentation feedback from display server */ + int32_t presentation_sync_fd; - if (wayland_egl_display->wl_egl_thread) { - twe_thread_destroy(wayland_egl_display->wl_egl_thread); - wayland_egl_display->wl_egl_thread = NULL; - } + tpl_gsource *waiting_source; - free(wayland_egl_display); - } + tpl_gmutex mutex; + tpl_gcond cond; - display->backend.data = NULL; -} + tpl_wl_egl_surface_t *wl_egl_surface; +}; -static tpl_result_t -__tpl_wl_egl_display_query_config(tpl_display_t *display, - tpl_surface_type_t surface_type, - int red_size, int green_size, - int blue_size, int alpha_size, - int color_depth, int *native_visual_id, - tpl_bool_t *is_slow) -{ - TPL_ASSERT(display); +struct pst_feedback { + /* to get presentation feedback from display server */ + struct wp_presentation_feedback *presentation_feedback; - if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 && - green_size == 8 && blue_size == 8 && - (color_depth == 32 || color_depth == 24)) { + int32_t pst_sync_fd; - if (alpha_size == 8) { - if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888; - if (is_slow) *is_slow = TPL_FALSE; - return TPL_ERROR_NONE; - } - if (alpha_size == 0) { - if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888; - if (is_slow) *is_slow = TPL_FALSE; - return TPL_ERROR_NONE; - } - } + int bo_name; + tpl_wl_egl_surface_t *wl_egl_surface; - return TPL_ERROR_INVALID_PARAMETER; -} +}; +static int +_get_tbm_surface_bo_name(tbm_surface_h tbm_surface); +static void +_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface); +static void +__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer); +static tpl_wl_egl_buffer_t * +_get_wl_egl_buffer(tbm_surface_h tbm_surface); +static int +_write_to_eventfd(int eventfd); +static void +_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface); static tpl_result_t -__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id, - int alpha_size) -{ - TPL_IGNORE(display); - TPL_IGNORE(visual_id); - TPL_IGNORE(alpha_size); - return TPL_ERROR_NONE; -} +_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface); +static void +_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, + tpl_wl_egl_buffer_t *wl_egl_buffer); -static tpl_result_t -__tpl_wl_egl_display_get_window_info(tpl_display_t *display, - tpl_handle_t window, int *width, - int *height, tbm_format *format, - int depth, int a_size) +static tpl_bool_t +_check_native_handle_is_wl_display(tpl_handle_t display) { - tpl_result_t ret = TPL_ERROR_NONE; + struct wl_interface *wl_egl_native_dpy = *(void **) display; - TPL_ASSERT(display); - TPL_ASSERT(window); + if (!wl_egl_native_dpy) { + TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy); + return TPL_FALSE; + } - if ((ret = twe_get_native_window_info(window, width, height, format, a_size)) - != TPL_ERROR_NONE) { - TPL_ERR("Failed to get size info of native_window(%p)", window); + /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value + is a memory address pointing the structure of wl_display_interface. */ + if (wl_egl_native_dpy == &wl_display_interface) + return TPL_TRUE; + + if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name, + strlen(wl_display_interface.name)) == 0) { + return TPL_TRUE; } - return ret; + return TPL_FALSE; } -static tpl_result_t -__tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display, - tpl_handle_t pixmap, int *width, - int *height, tbm_format *format) +static tpl_bool_t +__thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) { - tbm_surface_h tbm_surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; - tbm_surface = twe_get_native_buffer_from_pixmap(pixmap); - if (!tbm_surface) { - TPL_ERR("Failed to get tbm_surface_h from native pixmap."); - return TPL_ERROR_INVALID_OPERATION; + TPL_IGNORE(message); + + wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + if (!wl_egl_display) { + TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource); + TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); + return TPL_FALSE; } - if (width) *width = tbm_surface_get_width(tbm_surface); - if (height) *height = tbm_surface_get_height(tbm_surface); - if (format) *format = tbm_surface_get_format(tbm_surface); + tdm_err = tdm_client_handle_events(wl_egl_display->tdm_client); - return TPL_ERROR_NONE; -} + /* If an error occurs in tdm_client_handle_events, it cannot be recovered. + * When tdm_source is no longer available due to an unexpected situation, + * wl_egl_thread must remove it from the thread and destroy it. + * In that case, tdm_vblank can no longer be used for surfaces and displays + * that used this tdm_source. */ + if (tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)", + tdm_err); + TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); -static tbm_surface_h -__tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) -{ - tbm_surface_h tbm_surface = NULL; + tpl_gsource_destroy(gsource, TPL_FALSE); - TPL_ASSERT(pixmap); + wl_egl_display->tdm_source = NULL; - tbm_surface = twe_get_native_buffer_from_pixmap(pixmap); - if (!tbm_surface) { - TPL_ERR("Failed to get tbm_surface_h from wayland_tbm."); - return NULL; + return TPL_FALSE; } - return tbm_surface; + return TPL_TRUE; } static void -__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, - void *data) +__thread_func_tdm_finalize(tpl_gsource *gsource) { - tpl_surface_t *surface = NULL; - tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; - tpl_bool_t is_activated = TPL_FALSE; - int width, height; + tpl_wl_egl_display_t *wl_egl_display = NULL; - surface = (tpl_surface_t *)data; - TPL_CHECK_ON_NULL_RETURN(surface); + wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; - TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface); + TPL_LOG_T("WL_EGL", "tdm_destroy| wl_egl_display(%p) tdm_client(%p)", + wl_egl_display, wl_egl_display->tdm_client); - /* When the queue is resized, change the reset flag to TPL_TRUE to reflect - * the changed window size at the next frame. */ - width = tbm_surface_queue_get_width(surface_queue); - height = tbm_surface_queue_get_height(surface_queue); - if (surface->width != width || surface->height != height) { - TPL_LOG_T("WL_EGL", - "[QUEUE_RESIZE_CB] wayland_egl_surface(%p) tbm_queue(%p) (%dx%d)", - wayland_egl_surface, surface_queue, width, height); + if (wl_egl_display->tdm_client) { + tdm_client_destroy(wl_egl_display->tdm_client); + wl_egl_display->tdm_client = NULL; + wl_egl_display->tdm_display_fd = -1; } - /* When queue_reset_callback is called, if is_activated is different from - * its previous state change the reset flag to TPL_TRUE to get a new buffer - * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ - is_activated = twe_surface_check_activated(wayland_egl_surface->twe_surface); - if (wayland_egl_surface->is_activated != is_activated) { - if (is_activated) { - TPL_LOG_T("WL_EGL", - "[ACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)", - wayland_egl_surface, surface_queue); + wl_egl_display->tdm_initialized = TPL_FALSE; +} + +static tpl_gsource_functions tdm_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_tdm_dispatch, + .finalize = __thread_func_tdm_finalize, +}; + +tpl_result_t +_thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display) +{ + tdm_client *tdm_client = NULL; + int tdm_display_fd = -1; + tdm_error tdm_err = TDM_ERROR_NONE; + + tdm_client = tdm_client_create(&tdm_err); + if (!tdm_client || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err); + return TPL_ERROR_INVALID_OPERATION; + } + + tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd); + if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err); + tdm_client_destroy(tdm_client); + return TPL_ERROR_INVALID_OPERATION; + } + + wl_egl_display->tdm_display_fd = tdm_display_fd; + wl_egl_display->tdm_client = tdm_client; + wl_egl_display->tdm_source = NULL; + wl_egl_display->tdm_initialized = TPL_TRUE; + + TPL_INFO("[TDM_CLIENT_INIT]", + "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_egl_display, tdm_client, tdm_display_fd); + + return TPL_ERROR_NONE; +} + +#define IMPL_TIZEN_SURFACE_SHM_VERSION 2 + +static void +__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, + uint32_t name, const char *interface, + uint32_t version) +{ + tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; + + if (!strcmp(interface, "tizen_surface_shm")) { + wl_egl_display->tss = wl_registry_bind(wl_registry, + name, + &tizen_surface_shm_interface, + ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ? + version : IMPL_TIZEN_SURFACE_SHM_VERSION)); + } else if (!strcmp(interface, wp_presentation_interface.name)) { + wl_egl_display->presentation = + wl_registry_bind(wl_registry, + name, &wp_presentation_interface, 1); + TPL_DEBUG("bind wp_presentation_interface"); + } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) { + char *env = tpl_getenv("TPL_EFS"); + if (env && atoi(env)) { + wl_egl_display->explicit_sync = + wl_registry_bind(wl_registry, name, + &zwp_linux_explicit_synchronization_v1_interface, 1); + wl_egl_display->use_explicit_sync = TPL_TRUE; + TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface"); } else { - TPL_LOG_T("WL_EGL", - "[DEACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)", - wayland_egl_surface, surface_queue); + wl_egl_display->use_explicit_sync = TPL_FALSE; } } +} - wayland_egl_surface->reset = TPL_TRUE; - - if (surface->reset_cb) - surface->reset_cb(surface->reset_data); +static void +__cb_wl_resistry_global_remove_callback(void *data, + struct wl_registry *wl_registry, + uint32_t name) +{ } -void __cb_window_rotate_callback(void *data) +static const struct wl_registry_listener registry_listener = { + __cb_wl_resistry_global_callback, + __cb_wl_resistry_global_remove_callback +}; + +static void +_wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display, + const char *func_name) { - tpl_surface_t *surface = (tpl_surface_t *)data; - tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; - int rotation; + int dpy_err; + char buf[1024]; + strerror_r(errno, buf, sizeof(buf)); - if (!surface) { - TPL_ERR("Inavlid parameter. surface is NULL."); + if (wl_egl_display->last_error == errno) return; - } - wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; - if (!wayland_egl_surface) { - TPL_ERR("Invalid parameter. surface->backend.data is NULL"); - return; + TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf); + + dpy_err = wl_display_get_error(wl_egl_display->wl_display); + if (dpy_err == EPROTO) { + const struct wl_interface *err_interface; + uint32_t err_proxy_id, err_code; + err_code = wl_display_get_protocol_error(wl_egl_display->wl_display, + &err_interface, + &err_proxy_id); + TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d", + err_interface->name, err_code, err_proxy_id); } - rotation = twe_surface_get_rotation(wayland_egl_surface->twe_surface); - - surface->rotation = rotation; + wl_egl_display->last_error = errno; } -static tpl_result_t -__tpl_wl_egl_surface_init(tpl_surface_t *surface) +tpl_result_t +_thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display) { - tpl_wayland_egl_display_t *wayland_egl_display = NULL; - tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; - tbm_surface_queue_h tbm_queue = NULL; - twe_surface_h twe_surface = NULL; - tpl_result_t ret = TPL_ERROR_NONE; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); - TPL_ASSERT(surface->native_handle); + struct wl_registry *registry = NULL; + struct wl_event_queue *queue = NULL; + struct wl_display *display_wrapper = NULL; + struct wl_proxy *wl_tbm = NULL; + struct wayland_tbm_client *wl_tbm_client = NULL; + int ret; + tpl_result_t result = TPL_ERROR_NONE; + + queue = wl_display_create_queue(wl_egl_display->wl_display); + if (!queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_egl_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } - wayland_egl_display = - (tpl_wayland_egl_display_t *)surface->display->backend.data; - if (!wayland_egl_display) { - TPL_ERR("Invalid parameter. wayland_egl_display(%p)", - wayland_egl_display); - return TPL_ERROR_INVALID_PARAMETER; + wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display); + if (!wl_egl_display->ev_queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_egl_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; } - wayland_egl_surface = (tpl_wayland_egl_surface_t *) calloc(1, - sizeof(tpl_wayland_egl_surface_t)); - if (!wayland_egl_surface) { - TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_surface_t."); - return TPL_ERROR_OUT_OF_MEMORY; + display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display); + if (!display_wrapper) { + TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)", + wl_egl_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; } - surface->backend.data = (void *)wayland_egl_surface; + wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue); - if (__tpl_object_init(&wayland_egl_surface->base, - TPL_OBJECT_SURFACE, - NULL) != TPL_ERROR_NONE) { - TPL_ERR("Failed to initialize backend surface's base object!"); - goto object_init_fail; + registry = wl_display_get_registry(display_wrapper); + if (!registry) { + TPL_ERR("Failed to create wl_registry"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; } - twe_surface = twe_surface_add(wayland_egl_display->wl_egl_thread, - wayland_egl_display->twe_display, - surface->native_handle, - surface->format, surface->num_buffers); - if (!twe_surface) { - TPL_ERR("Failed to add native_window(%p) to thread(%p)", - surface->native_handle, wayland_egl_display->wl_egl_thread); - goto create_twe_surface_fail; - } + wl_proxy_wrapper_destroy(display_wrapper); + display_wrapper = NULL; - tbm_queue = twe_surface_get_tbm_queue(twe_surface); - if (!tbm_queue) { - TPL_ERR("Failed to get tbm_queue from twe_surface(%p)", twe_surface); - goto queue_create_fail; + wl_tbm_client = wayland_tbm_client_init(wl_egl_display->wl_display); + if (!wl_tbm_client) { + TPL_ERR("Failed to initialize wl_tbm_client."); + result = TPL_ERROR_INVALID_CONNECTION; + goto fini; } - /* Set reset_callback to tbm_queue */ - if (tbm_surface_queue_add_reset_cb(tbm_queue, - __cb_tbm_surface_queue_reset_callback, - (void *)surface)) { - TPL_ERR("TBM surface queue add reset cb failed!"); - goto add_reset_cb_fail; + wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client); + if (!wl_tbm) { + TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client); + result = TPL_ERROR_INVALID_CONNECTION; + goto fini; } - wayland_egl_surface->reset = TPL_FALSE; - wayland_egl_surface->twe_surface = twe_surface; - wayland_egl_surface->tbm_queue = tbm_queue; - wayland_egl_surface->is_activated = TPL_FALSE; - wayland_egl_surface->need_to_enqueue = TPL_TRUE; + wl_proxy_set_queue(wl_tbm, wl_egl_display->ev_queue); + wl_egl_display->wl_tbm_client = wl_tbm_client; - surface->width = tbm_surface_queue_get_width(tbm_queue); - surface->height = tbm_surface_queue_get_height(tbm_queue); - surface->rotation = twe_surface_get_rotation(twe_surface); + if (wl_registry_add_listener(registry, ®istry_listener, + wl_egl_display)) { + TPL_ERR("Failed to wl_registry_add_listener"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } - ret = twe_surface_set_rotate_callback(twe_surface, (void *)surface, - (tpl_surface_cb_func_t)__cb_window_rotate_callback); - if (ret != TPL_ERROR_NONE) { - TPL_WARN("Failed to register rotate callback."); + ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue); + if (ret == -1) { + _wl_display_print_err(wl_egl_display, "roundtrip_queue"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; } - TPL_LOG_T("WL_EGL", - "[INIT1/2]tpl_surface(%p) tpl_wayland_egl_surface(%p) twe_surface(%p)", - surface, wayland_egl_surface, twe_surface); - TPL_LOG_T("WL_EGL", - "[INIT2/2]size(%dx%d)rot(%d)|tbm_queue(%p)|native_window(%p)", - surface->width, surface->height, surface->rotation, - tbm_queue, surface->native_handle); + /* set tizen_surface_shm's queue as client's private queue */ + if (wl_egl_display->tss) { + wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss, + wl_egl_display->ev_queue); + TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss); + } - return TPL_ERROR_NONE; + if (wl_egl_display->presentation) { + wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation, + wl_egl_display->ev_queue); + TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.", + wl_egl_display->presentation); + } -add_reset_cb_fail: -queue_create_fail: - twe_surface_del(twe_surface); -create_twe_surface_fail: -object_init_fail: - free(wayland_egl_surface); - surface->backend.data = NULL; - return TPL_ERROR_INVALID_OPERATION; + if (wl_egl_display->explicit_sync) { + wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync, + wl_egl_display->ev_queue); + TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.", + wl_egl_display->explicit_sync); + } + + wl_egl_display->wl_initialized = TPL_TRUE; + + TPL_INFO("[WAYLAND_INIT]", + "wl_egl_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)", + wl_egl_display, wl_egl_display->wl_display, + wl_egl_display->wl_tbm_client, wl_egl_display->ev_queue); + TPL_INFO("[WAYLAND_INIT]", + "tizen_surface_shm(%p) wp_presentation(%p) explicit_sync(%p)", + wl_egl_display->tss, wl_egl_display->presentation, + wl_egl_display->explicit_sync); + +fini: + if (display_wrapper) + wl_proxy_wrapper_destroy(display_wrapper); + if (registry) + wl_registry_destroy(registry); + if (queue) + wl_event_queue_destroy(queue); + + return result; } -static void -__tpl_wl_egl_surface_fini(tpl_surface_t *surface) +void +_thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display) { - tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; - tpl_wayland_egl_display_t *wayland_egl_display = NULL; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->display); + /* If wl_egl_display is in prepared state, cancel it */ + if (wl_egl_display->prepared) { + wl_display_cancel_read(wl_egl_display->wl_display); + wl_egl_display->prepared = TPL_FALSE; + } - wayland_egl_surface = (tpl_wayland_egl_surface_t *) surface->backend.data; - TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface); + if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, + wl_egl_display->ev_queue) == -1) { + _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); + } - TPL_OBJECT_LOCK(wayland_egl_surface); + if (wl_egl_display->tss) { + TPL_INFO("[TIZEN_SURFACE_SHM_DESTROY]", + "wl_egl_display(%p) tizen_surface_shm(%p) fini.", + wl_egl_display, wl_egl_display->tss); + tizen_surface_shm_destroy(wl_egl_display->tss); + wl_egl_display->tss = NULL; + } - wayland_egl_display = (tpl_wayland_egl_display_t *) - surface->display->backend.data; + if (wl_egl_display->presentation) { + TPL_INFO("[WP_PRESENTATION_DESTROY]", + "wl_egl_display(%p) wp_presentation(%p) fini.", + wl_egl_display, wl_egl_display->presentation); + wp_presentation_destroy(wl_egl_display->presentation); + wl_egl_display->presentation = NULL; + } - if (wayland_egl_display == NULL) { - TPL_ERR("check failed: wayland_egl_display == NULL"); - TPL_OBJECT_UNLOCK(wayland_egl_surface); - return; + if (wl_egl_display->explicit_sync) { + TPL_INFO("[EXPLICIT_SYNC_DESTROY]", + "wl_egl_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.", + wl_egl_display, wl_egl_display->explicit_sync); + zwp_linux_explicit_synchronization_v1_destroy(wl_egl_display->explicit_sync); + wl_egl_display->explicit_sync = NULL; } - if (surface->type == TPL_SURFACE_TYPE_WINDOW) { - TPL_LOG_T("WL_EGL", - "[FINI] wayland_egl_surface(%p) native_window(%p) twe_surface(%p)", - wayland_egl_surface, surface->native_handle, - wayland_egl_surface->twe_surface); + if (wl_egl_display->wl_tbm_client) { + struct wl_proxy *wl_tbm = NULL; - if (twe_surface_del(wayland_egl_surface->twe_surface) - != TPL_ERROR_NONE) { - TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", - wayland_egl_surface->twe_surface, - wayland_egl_display->wl_egl_thread); + wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm( + wl_egl_display->wl_tbm_client); + if (wl_tbm) { + wl_proxy_set_queue(wl_tbm, NULL); } - wayland_egl_surface->twe_surface = NULL; - wayland_egl_surface->tbm_queue = NULL; + TPL_INFO("[WL_TBM_DEINIT]", + "wl_egl_display(%p) wl_tbm_client(%p)", + wl_egl_display, wl_egl_display->wl_tbm_client); + wayland_tbm_client_deinit(wl_egl_display->wl_tbm_client); + wl_egl_display->wl_tbm_client = NULL; } - TPL_OBJECT_UNLOCK(wayland_egl_surface); - __tpl_object_fini(&wayland_egl_surface->base); - free(wayland_egl_surface); - surface->backend.data = NULL; + wl_event_queue_destroy(wl_egl_display->ev_queue); + + wl_egl_display->wl_initialized = TPL_FALSE; + + TPL_INFO("[DISPLAY_FINI]", "wl_egl_display(%p) wl_display(%p)", + wl_egl_display, wl_egl_display->wl_display); } -static tpl_result_t -__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface, - tpl_bool_t set) +static void* +_thread_init(void *data) { - tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; - if (!surface) { - TPL_ERR("Invalid parameter. tpl_surface(%p)", surface); - return TPL_ERROR_INVALID_PARAMETER; + if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) { + TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)", + wl_egl_display, wl_egl_display->wl_display); } - wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; - if (!wayland_egl_surface) { - TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)", - surface, wayland_egl_surface); - return TPL_ERROR_INVALID_PARAMETER; + if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) { + TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED"); } - if (!wayland_egl_surface->twe_surface) { - TPL_ERR("Invalid parameter. wayland_egl_surface(%p) twe_surface(%p)", - wayland_egl_surface, wayland_egl_surface->twe_surface); - return TPL_ERROR_INVALID_PARAMETER; + return wl_egl_display; +} + +static tpl_bool_t +__thread_func_disp_prepare(tpl_gsource *gsource) +{ + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + + /* If this wl_egl_display is already prepared, + * do nothing in this function. */ + if (wl_egl_display->prepared) + return TPL_FALSE; + + /* If there is a last_error, there is no need to poll, + * so skip directly to dispatch. + * prepare -> dispatch */ + if (wl_egl_display->last_error) + return TPL_TRUE; + + while (wl_display_prepare_read_queue(wl_egl_display->wl_display, + wl_egl_display->ev_queue) != 0) { + if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, + wl_egl_display->ev_queue) == -1) { + _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); + } } - twe_surface_set_rotation_capablity(wayland_egl_surface->twe_surface, - set); + wl_egl_display->prepared = TPL_TRUE; - return TPL_ERROR_NONE; + wl_display_flush(wl_egl_display->wl_display); + + return TPL_FALSE; } -static tpl_result_t -__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface, - int post_interval) +static tpl_bool_t +__thread_func_disp_check(tpl_gsource *gsource) { - tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + tpl_bool_t ret = TPL_FALSE; + + if (!wl_egl_display->prepared) + return ret; + + /* If prepared, but last_error is set, + * cancel_read is executed and FALSE is returned. + * That can lead to G_SOURCE_REMOVE by calling disp_prepare again + * and skipping disp_check from prepare to disp_dispatch. + * check -> prepare -> dispatch -> G_SOURCE_REMOVE */ + if (wl_egl_display->prepared && wl_egl_display->last_error) { + wl_display_cancel_read(wl_egl_display->wl_display); + return ret; + } - if (!surface) { - TPL_ERR("Invalid parameter. tpl_surface(%p)", surface); - return TPL_ERROR_INVALID_PARAMETER; + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_read_events(wl_egl_display->wl_display) == -1) + _wl_display_print_err(wl_egl_display, "read_event"); + ret = TPL_TRUE; + } else { + wl_display_cancel_read(wl_egl_display->wl_display); + ret = TPL_FALSE; } - wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; - if (!wayland_egl_surface) { - TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)", - surface, wayland_egl_surface); - return TPL_ERROR_INVALID_PARAMETER; + wl_egl_display->prepared = TPL_FALSE; + + return ret; +} + +static tpl_bool_t +__thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + + TPL_IGNORE(message); + + /* If there is last_error, SOURCE_REMOVE should be returned + * to remove the gsource from the main loop. + * This is because wl_egl_display is not valid since last_error was set.*/ + if (wl_egl_display->last_error) { + return TPL_FALSE; } - if (!wayland_egl_surface->twe_surface) { - TPL_ERR("Invalid parameter. wayland_egl_surface(%p) twe_surface(%p)", - wayland_egl_surface, wayland_egl_surface->twe_surface); - return TPL_ERROR_INVALID_PARAMETER; + tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, + wl_egl_display->ev_queue) == -1) { + _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); + } } - twe_surface_set_post_interval(wayland_egl_surface->twe_surface, - post_interval); + wl_display_flush(wl_egl_display->wl_display); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - return TPL_ERROR_NONE; + return TPL_TRUE; } -static tpl_result_t -__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface, - int num_rects, const int *rects, tbm_fd sync_fence) +static void +__thread_func_disp_finalize(tpl_gsource *gsource) { - TPL_ASSERT(surface); - TPL_ASSERT(surface->display); - TPL_ASSERT(tbm_surface); - TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - tpl_wayland_egl_surface_t *wayland_egl_surface = - (tpl_wayland_egl_surface_t *) surface->backend.data; - tbm_surface_queue_error_e tsq_err; - tpl_result_t ret = TPL_ERROR_NONE; - int bo_name = 0; + if (wl_egl_display->wl_initialized) + _thread_wl_display_fini(wl_egl_display); - TPL_OBJECT_LOCK(wayland_egl_surface); + TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)", + wl_egl_display, gsource); - bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); + return; +} - if (!wayland_egl_surface) { - TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)", - surface, wayland_egl_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_OBJECT_UNLOCK(wayland_egl_surface); + +static tpl_gsource_functions disp_funcs = { + .prepare = __thread_func_disp_prepare, + .check = __thread_func_disp_check, + .dispatch = __thread_func_disp_dispatch, + .finalize = __thread_func_disp_finalize, +}; + +static tpl_result_t +__tpl_wl_egl_display_init(tpl_display_t *display) +{ + tpl_wl_egl_display_t *wl_egl_display = NULL; + + TPL_ASSERT(display); + + /* Do not allow default display in wayland. */ + if (!display->native_handle) { + TPL_ERR("Invalid native handle for display."); return TPL_ERROR_INVALID_PARAMETER; } - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", - tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_OBJECT_UNLOCK(wayland_egl_surface); + if (!_check_native_handle_is_wl_display(display->native_handle)) { + TPL_ERR("native_handle(%p) is not wl_display", display->native_handle); return TPL_ERROR_INVALID_PARAMETER; } + wl_egl_display = (tpl_wl_egl_display_t *) calloc(1, + sizeof(tpl_wl_egl_display_t)); + if (!wl_egl_display) { + TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + display->backend.data = wl_egl_display; + display->bufmgr_fd = -1; + + wl_egl_display->tdm_initialized = TPL_FALSE; + wl_egl_display->wl_initialized = TPL_FALSE; + + wl_egl_display->ev_queue = NULL; + wl_egl_display->wl_display = (struct wl_display *)display->native_handle; + wl_egl_display->last_error = 0; + wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled + wl_egl_display->prepared = TPL_FALSE; + + /* Wayland Interfaces */ + wl_egl_display->tss = NULL; + wl_egl_display->presentation = NULL; + wl_egl_display->explicit_sync = NULL; + wl_egl_display->wl_tbm_client = NULL; + + wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled + { + char *env = tpl_getenv("TPL_WAIT_VBLANK"); + if (env && !atoi(env)) { + wl_egl_display->use_wait_vblank = TPL_FALSE; + } + } + + tpl_gmutex_init(&wl_egl_display->wl_event_mutex); + + /* Create gthread */ + wl_egl_display->thread = tpl_gthread_create("wl_egl_thread", + (tpl_gthread_func)_thread_init, + (void *)wl_egl_display); + if (!wl_egl_display->thread) { + TPL_ERR("Failed to create wl_egl_thread"); + goto free_display; + } + + wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread, + (void *)wl_egl_display, + wl_display_get_fd(wl_egl_display->wl_display), + &disp_funcs, SOURCE_TYPE_NORMAL); + if (!wl_egl_display->disp_source) { + TPL_ERR("Failed to add native_display(%p) to thread(%p)", + display->native_handle, + wl_egl_display->thread); + goto free_display; + } + + wl_egl_display->tdm_source = tpl_gsource_create(wl_egl_display->thread, + (void *)wl_egl_display, + wl_egl_display->tdm_display_fd, + &tdm_funcs, SOURCE_TYPE_NORMAL); + if (!wl_egl_display->tdm_source) { + TPL_ERR("Failed to create tdm_gsource\n"); + goto free_display; + } + + TPL_INFO("[DISPLAY_INIT]", + "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_egl_display, + wl_egl_display->thread, + wl_egl_display->wl_display); + + TPL_INFO("[DISPLAY_INIT]", + "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)", + wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE", + wl_egl_display->tss ? "TRUE" : "FALSE", + wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE"); + + return TPL_ERROR_NONE; + +free_display: + if (wl_egl_display->thread) { + if (wl_egl_display->tdm_source) + tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); + if (wl_egl_display->disp_source) + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); + + tpl_gthread_destroy(wl_egl_display->thread); + } + + wl_egl_display->thread = NULL; + free(wl_egl_display); + + display->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; +} + +static void +__tpl_wl_egl_display_fini(tpl_display_t *display) +{ + tpl_wl_egl_display_t *wl_egl_display; + + TPL_ASSERT(display); + + wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data; + if (wl_egl_display) { + TPL_INFO("[DISPLAY_FINI]", + "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_egl_display, + wl_egl_display->thread, + wl_egl_display->wl_display); + + if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) { + tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); + wl_egl_display->tdm_source = NULL; + } + + if (wl_egl_display->disp_source) { + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); + wl_egl_display->disp_source = NULL; + } + + if (wl_egl_display->thread) { + tpl_gthread_destroy(wl_egl_display->thread); + wl_egl_display->thread = NULL; + } + + tpl_gmutex_clear(&wl_egl_display->wl_event_mutex); + + free(wl_egl_display); + } + + display->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_egl_display_query_config(tpl_display_t *display, + tpl_surface_type_t surface_type, + int red_size, int green_size, + int blue_size, int alpha_size, + int color_depth, int *native_visual_id, + tpl_bool_t *is_slow) +{ + TPL_ASSERT(display); + + if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 && + green_size == 8 && blue_size == 8 && + (color_depth == 32 || color_depth == 24)) { + + if (alpha_size == 8) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + if (alpha_size == 0) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + } + + return TPL_ERROR_INVALID_PARAMETER; +} + +static tpl_result_t +__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id, + int alpha_size) +{ + TPL_IGNORE(display); + TPL_IGNORE(visual_id); + TPL_IGNORE(alpha_size); + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_egl_display_get_window_info(tpl_display_t *display, + tpl_handle_t window, int *width, + int *height, tbm_format *format, + int depth, int a_size) +{ + tpl_result_t ret = TPL_ERROR_NONE; + struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window; + + TPL_ASSERT(display); + TPL_ASSERT(window); + + if (!wl_egl_window) { + TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (width) *width = wl_egl_window->width; + if (height) *height = wl_egl_window->height; + if (format) { + struct tizen_private *tizen_private = + (struct tizen_private *)wl_egl_window->driver_private; + if (tizen_private && tizen_private->data) { + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)tizen_private->data; + *format = wl_egl_surface->format; + } else { + if (a_size == 8) + *format = TBM_FORMAT_ARGB8888; + else + *format = TBM_FORMAT_XRGB8888; + } + } + + return ret; +} + +static tpl_result_t +__tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display, + tpl_handle_t pixmap, int *width, + int *height, tbm_format *format) +{ + tbm_surface_h tbm_surface = NULL; + + if (!pixmap) { + TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap); + return TPL_ERROR_INVALID_PARAMETER; + } + + tbm_surface = wayland_tbm_server_get_surface(NULL, + (struct wl_resource *)pixmap); + if (!tbm_surface) { + TPL_ERR("Failed to get tbm_surface from wayland_tbm."); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (width) *width = tbm_surface_get_width(tbm_surface); + if (height) *height = tbm_surface_get_height(tbm_surface); + if (format) *format = tbm_surface_get_format(tbm_surface); + + return TPL_ERROR_NONE; +} + +static tbm_surface_h +__tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) +{ + tbm_surface_h tbm_surface = NULL; + + TPL_ASSERT(pixmap); + + tbm_surface = wayland_tbm_server_get_surface(NULL, + (struct wl_resource *)pixmap); + if (!tbm_surface) { + TPL_ERR("Failed to get tbm_surface_h from wayland_tbm."); + return NULL; + } + + return tbm_surface; +} + +tpl_bool_t +__tpl_display_choose_backend_wl_egl_thread2(tpl_handle_t native_dpy) +{ + struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE); + + /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value + is a memory address pointing the structure of wl_display_interface. */ + if (wl_egl_native_dpy == &wl_display_interface) + return TPL_TRUE; + + if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name, + strlen(wl_display_interface.name)) == 0) { + return TPL_TRUE; + } + + return TPL_FALSE; +} + +/* -- BEGIN -- wl_egl_window callback functions */ +static void +__cb_destroy_callback(void *private) +{ + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + + if (!tizen_private) { + TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface"); + return; + } + + wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + if (wl_egl_surface) { + TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.", + wl_egl_surface->wl_egl_window); + TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface."); + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + wl_egl_surface->wl_egl_window->destroy_window_callback = NULL; + wl_egl_surface->wl_egl_window->resize_callback = NULL; + wl_egl_surface->wl_egl_window->driver_private = NULL; + wl_egl_surface->wl_egl_window = NULL; + wl_egl_surface->wl_surface = NULL; + + tizen_private->set_window_serial_callback = NULL; + tizen_private->rotate_callback = NULL; + tizen_private->get_rotation_capability = NULL; + tizen_private->set_frontbuffer_callback = NULL; + tizen_private->create_commit_sync_fd = NULL; + tizen_private->create_presentation_sync_fd = NULL; + tizen_private->data = NULL; + + free(tizen_private); + tizen_private = NULL; + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } +} + +static void +__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + int cur_w, cur_h, req_w, req_h, format; + + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return; + } + + format = wl_egl_surface->format; + cur_w = wl_egl_surface->width; + cur_h = wl_egl_surface->height; + req_w = wl_egl_window->width; + req_h = wl_egl_window->height; + + TPL_INFO("[WINDOW_RESIZE]", + "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)", + wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h); + + if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format) + != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue); + return; + } +} +/* -- END -- wl_egl_window callback functions */ + +/* -- BEGIN -- wl_egl_window tizen private callback functions */ + +/* There is no usecase for using prerotation callback below */ +static void +__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + int rotation = tizen_private->rotation; + + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return; + } + + TPL_INFO("[WINDOW_ROTATE]", + "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)", + wl_egl_surface, wl_egl_window, + wl_egl_surface->rotation, rotation); + + wl_egl_surface->rotation = rotation; +} + +/* There is no usecase for using prerotation callback below */ +static int +__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window, + void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE; + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return rotation_capability; + } + + if (wl_egl_surface->prerotation_capability == TPL_TRUE) + rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED; + else + rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED; + + + return rotation_capability; +} + +static void +__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window, + void *private, unsigned int serial) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return; + } + + wl_egl_surface->set_serial_is_used = TPL_TRUE; + wl_egl_surface->serial = serial; +} + +static int +__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + int commit_sync_fd = -1; + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid parameter. wl_egl_surface(%p) is NULL", wl_egl_surface); + return -1; + } + + tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); + + if (wl_egl_surface->commit_sync.fd != -1) { + commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); + TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)", + wl_egl_surface->commit_sync.fd, commit_sync_fd); + TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)", + wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd); + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + return commit_sync_fd; + } + + wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC); + if (wl_egl_surface->commit_sync.fd == -1) { + TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)", wl_egl_surface); + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + return -1; + } + + commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); + + TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)", + wl_egl_surface->commit_sync.fd, commit_sync_fd); + TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)", + wl_egl_surface, commit_sync_fd); + + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + + return commit_sync_fd; +} + +static int +__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + int presentation_sync_fd = -1; + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid parameter. wl_egl_surface is NULL"); + return -1; + } + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + if (wl_egl_surface->presentation_sync.fd != -1) { + presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); + TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)", + wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", + wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + return presentation_sync_fd; + } + + wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC); + if (wl_egl_surface->presentation_sync.fd == -1) { + TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)", wl_egl_surface); + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + return -1; + } + + presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); + TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)", + wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", + wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + + return presentation_sync_fd; +} +/* -- END -- wl_egl_window tizen private callback functions */ + +/* -- BEGIN -- tizen_surface_shm_flusher_listener */ +static void __cb_tss_flusher_flush_callback(void *data, + struct tizen_surface_shm_flusher *tss_flusher) +{ + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); + + tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue); + return; + } +} + +static void __cb_tss_flusher_free_flush_callback(void *data, + struct tizen_surface_shm_flusher *tss_flusher) +{ + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); + + tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue); + return; + } +} + +static const struct tizen_surface_shm_flusher_listener +tss_flusher_listener = { + __cb_tss_flusher_flush_callback, + __cb_tss_flusher_free_flush_callback +}; +/* -- END -- tizen_surface_shm_flusher_listener */ + + +/* -- BEGIN -- tbm_surface_queue callback funstions */ +static void +__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, + void *data) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = NULL; + tpl_surface_t *surface = NULL; + tpl_bool_t is_activated = TPL_FALSE; + int width, height; + + wl_egl_surface = (tpl_wl_egl_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + wl_egl_display = wl_egl_surface->wl_egl_display; + TPL_CHECK_ON_NULL_RETURN(wl_egl_display); + + surface = wl_egl_surface->tpl_surface; + TPL_CHECK_ON_NULL_RETURN(surface); + + /* When the queue is resized, change the reset flag to TPL_TRUE to reflect + * the changed window size at the next frame. */ + width = tbm_surface_queue_get_width(tbm_queue); + height = tbm_surface_queue_get_height(tbm_queue); + if (surface->width != width || surface->height != height) { + TPL_INFO("[QUEUE_RESIZE]", + "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)", + wl_egl_surface, tbm_queue, + surface->width, surface->height, width, height); + } + + /* When queue_reset_callback is called, if is_activated is different from + * its previous state change the reset flag to TPL_TRUE to get a new buffer + * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ + is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client, + wl_egl_surface->tbm_queue); + if (wl_egl_surface->is_activated != is_activated) { + if (is_activated) { + TPL_INFO("[ACTIVATED]", + "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); + } else { + TPL_LOG_T("[DEACTIVATED]", + " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); + } + } + + wl_egl_surface->reset = TPL_TRUE; + + if (surface->reset_cb) + surface->reset_cb(surface->reset_data); +} + +static void +__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue, + void *data) +{ + TPL_IGNORE(tbm_queue); + + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + + tpl_gsource_send_message(wl_egl_surface->surf_source, 2); + + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); +} +/* -- END -- tbm_surface_queue callback funstions */ + +static void +_thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + + TPL_INFO("[SURFACE_FINI]", + "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", + wl_egl_surface, wl_egl_surface->wl_egl_window, + wl_egl_surface->wl_surface); + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + + if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) { + while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) { + struct pst_feedback *pst_feedback = + (struct pst_feedback *)__tpl_list_pop_front( + wl_egl_surface->presentation_feedbacks, NULL); + if (pst_feedback) { + _write_to_eventfd(pst_feedback->pst_sync_fd); + close(pst_feedback->pst_sync_fd); + pst_feedback->pst_sync_fd = -1; + + wp_presentation_feedback_destroy(pst_feedback->presentation_feedback); + pst_feedback->presentation_feedback = NULL; + + free(pst_feedback); + } + } + + __tpl_list_free(wl_egl_surface->presentation_feedbacks, NULL); + wl_egl_surface->presentation_feedbacks = NULL; + } + + if (wl_egl_surface->presentation_sync.fd != -1) { + _write_to_eventfd(wl_egl_surface->presentation_sync.fd); + close(wl_egl_surface->presentation_sync.fd); + wl_egl_surface->presentation_sync.fd = -1; + } + + if (wl_egl_surface->vblank_waiting_buffers) { + __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL); + wl_egl_surface->vblank_waiting_buffers = NULL; + } + + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + + + { + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + int idx = 0; + tpl_bool_t need_to_release = TPL_FALSE; + tpl_bool_t need_to_cancel = TPL_FALSE; + + while (wl_egl_surface->buffer_cnt) { + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + wl_egl_buffer = wl_egl_surface->buffers[idx]; + if (wl_egl_buffer) { + TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)", + idx, wl_egl_buffer, + wl_egl_buffer->tbm_surface, + status_to_string[wl_egl_buffer->status]); + + wl_egl_surface->buffers[idx] = NULL; + wl_egl_surface->buffer_cnt--; + } else { + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + idx++; + continue; + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + + tpl_gmutex_lock(&wl_egl_buffer->mutex); + + need_to_release = (wl_egl_buffer->status == ACQUIRED || + wl_egl_buffer->status == WAITING_SIGNALED || + wl_egl_buffer->status == WAITING_VBLANK || + wl_egl_buffer->status == COMMITTED); + + need_to_cancel = wl_egl_buffer->status == DEQUEUED; + + if (wl_egl_buffer->status == WAITING_SIGNALED) { + tpl_result_t wait_result = TPL_ERROR_NONE; + wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond, + &wl_egl_buffer->mutex, + 16); + if (wait_result == TPL_ERROR_TIME_OUT) + TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", + wl_egl_buffer); + } + + if (need_to_release) { + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + wl_egl_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + wl_egl_buffer->tbm_surface, tsq_err); + } + + if (need_to_cancel) { + tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, + wl_egl_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)", + wl_egl_buffer->tbm_surface, tsq_err); + } + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + + if (need_to_release || need_to_cancel) + tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); + + idx++; + } + } + + if (wl_egl_surface->surface_sync) { + TPL_INFO("[SURFACE_SYNC_DESTROY]", + "wl_egl_surface(%p) surface_sync(%p)", + wl_egl_surface, wl_egl_surface->surface_sync); + zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync); + wl_egl_surface->surface_sync = NULL; + } + + if (wl_egl_surface->tss_flusher) { + TPL_INFO("[FLUSHER_DESTROY]", + "wl_egl_surface(%p) tss_flusher(%p)", + wl_egl_surface, wl_egl_surface->tss_flusher); + tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher); + wl_egl_surface->tss_flusher = NULL; + } + + if (wl_egl_surface->vblank) { + TPL_INFO("[VBLANK_DESTROY]", + "wl_egl_surface(%p) vblank(%p)", + wl_egl_surface, wl_egl_surface->vblank); + tdm_client_vblank_destroy(wl_egl_surface->vblank); + wl_egl_surface->vblank = NULL; + } + + if (wl_egl_surface->tbm_queue) { + TPL_INFO("[TBM_QUEUE_DESTROY]", + "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); + tbm_surface_queue_destroy(wl_egl_surface->tbm_queue); + wl_egl_surface->tbm_queue = NULL; + } + + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); +} + +static tpl_bool_t +__thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + + wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource); + + /* Initialize surface */ + if (message == 1) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + TPL_DEBUG("wl_egl_surface(%p) initialize message received!", + wl_egl_surface); + _thread_wl_egl_surface_init(wl_egl_surface); + tpl_gcond_signal(&wl_egl_surface->surf_cond); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } else if (message == 2) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + TPL_DEBUG("wl_egl_surface(%p) acquirable message received!", + wl_egl_surface); + _thread_surface_queue_acquire(wl_egl_surface); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } + + return TPL_TRUE; +} + +static void +__thread_func_surf_finalize(tpl_gsource *gsource) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + + wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource); + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + _thread_wl_egl_surface_fini(wl_egl_surface); + + TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%p)", + gsource, wl_egl_surface); +} + +static tpl_gsource_functions surf_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_surf_dispatch, + .finalize = __thread_func_surf_finalize, +}; + +static tpl_result_t +__tpl_wl_egl_surface_init(tpl_surface_t *surface) +{ + tpl_wl_egl_display_t *wl_egl_display = NULL; + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tpl_gsource *surf_source = NULL; + + struct wl_egl_window *wl_egl_window = + (struct wl_egl_window *)surface->native_handle; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); + TPL_ASSERT(surface->native_handle); + + wl_egl_display = + (tpl_wl_egl_display_t *)surface->display->backend.data; + if (!wl_egl_display) { + TPL_ERR("Invalid parameter. wl_egl_display(%p)", + wl_egl_display); + return TPL_ERROR_INVALID_PARAMETER; + } + + wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1, + sizeof(tpl_wl_egl_surface_t)); + if (!wl_egl_surface) { + TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface, + -1, &surf_funcs, SOURCE_TYPE_NORMAL); + if (!surf_source) { + TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)", + wl_egl_surface); + goto surf_source_create_fail; + } + + surface->backend.data = (void *)wl_egl_surface; + surface->width = wl_egl_window->width; + surface->height = wl_egl_window->height; + surface->rotation = 0; + + wl_egl_surface->tpl_surface = surface; + wl_egl_surface->width = wl_egl_window->width; + wl_egl_surface->height = wl_egl_window->height; + wl_egl_surface->format = surface->format; + + wl_egl_surface->surf_source = surf_source; + wl_egl_surface->wl_egl_window = wl_egl_window; + wl_egl_surface->wl_surface = wl_egl_window->surface; + + wl_egl_surface->wl_egl_display = wl_egl_display; + + wl_egl_surface->reset = TPL_FALSE; + wl_egl_surface->is_activated = TPL_FALSE; + wl_egl_surface->need_to_enqueue = TPL_TRUE; + wl_egl_surface->prerotation_capability = TPL_FALSE; + wl_egl_surface->vblank_done = TPL_TRUE; + wl_egl_surface->use_render_done_fence = TPL_FALSE; + wl_egl_surface->set_serial_is_used = TPL_FALSE; + + wl_egl_surface->latest_transform = 0; + wl_egl_surface->render_done_cnt = 0; + wl_egl_surface->serial = 0; + + wl_egl_surface->vblank = NULL; + wl_egl_surface->tss_flusher = NULL; + wl_egl_surface->surface_sync = NULL; + + wl_egl_surface->post_interval = surface->post_interval; + + wl_egl_surface->commit_sync.fd = -1; + wl_egl_surface->presentation_sync.fd = -1; + + { + int i = 0; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) + wl_egl_surface->buffers[i] = NULL; + wl_egl_surface->buffer_cnt = 0; + } + + { + struct tizen_private *tizen_private = NULL; + + if (wl_egl_window->driver_private) + tizen_private = (struct tizen_private *)wl_egl_window->driver_private; + else { + tizen_private = tizen_private_create(); + wl_egl_window->driver_private = (void *)tizen_private; + } + + if (tizen_private) { + tizen_private->data = (void *)wl_egl_surface; + tizen_private->rotate_callback = (void *)__cb_rotate_callback; + tizen_private->get_rotation_capability = (void *) + __cb_get_rotation_capability; + tizen_private->set_window_serial_callback = (void *) + __cb_set_window_serial_callback; + tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd; + tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd; + + wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback; + wl_egl_window->resize_callback = (void *)__cb_resize_callback; + } + } + + tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex); + tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex); + + tpl_gmutex_init(&wl_egl_surface->buffers_mutex); + + tpl_gmutex_init(&wl_egl_surface->surf_mutex); + tpl_gcond_init(&wl_egl_surface->surf_cond); + + /* Initialize in thread */ + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + tpl_gsource_send_message(wl_egl_surface->surf_source, 1); + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + + TPL_ASSERT(wl_egl_surface->tbm_queue); + + TPL_INFO("[SURFACE_INIT]", + "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)", + surface, wl_egl_surface, wl_egl_surface->surf_source); + + return TPL_ERROR_NONE; + +surf_source_create_fail: + free(wl_egl_surface); + surface->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; +} + +static tbm_surface_queue_h +_thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface, + struct wayland_tbm_client *wl_tbm_client, + int num_buffers) +{ + tbm_surface_queue_h tbm_queue = NULL; + tbm_bufmgr bufmgr = NULL; + unsigned int capability; + + struct wl_surface *wl_surface = wl_egl_surface->wl_surface; + int width = wl_egl_surface->width; + int height = wl_egl_surface->height; + int format = wl_egl_surface->format; + + if (!wl_tbm_client || !wl_surface) { + TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)", + wl_tbm_client, wl_surface); + return NULL; + } + + bufmgr = tbm_bufmgr_init(-1); + capability = tbm_bufmgr_get_capability(bufmgr); + tbm_bufmgr_deinit(bufmgr); + + if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) { + tbm_queue = wayland_tbm_client_create_surface_queue_tiled( + wl_tbm_client, + wl_surface, + num_buffers, + width, + height, + format); + } else { + tbm_queue = wayland_tbm_client_create_surface_queue( + wl_tbm_client, + wl_surface, + num_buffers, + width, + height, + format); + } + + if (!tbm_queue) { + TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)", + wl_tbm_client); + return NULL; + } + + if (tbm_surface_queue_set_modes( + tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) != + TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return NULL; + } + + if (tbm_surface_queue_add_reset_cb( + tbm_queue, + __cb_tbm_queue_reset_callback, + (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return NULL; + } + + if (tbm_surface_queue_add_acquirable_cb( + tbm_queue, + __cb_tbm_queue_acquirable_callback, + (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return NULL; + } + + return tbm_queue; +} + +static tdm_client_vblank* +_thread_create_tdm_client_vblank(tdm_client *tdm_client) +{ + tdm_client_vblank *vblank = NULL; + tdm_client_output *tdm_output = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; + + if (!tdm_client) { + TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client); + return NULL; + } + + tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err); + if (!tdm_output || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err); + return NULL; + } + + vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err); + if (!vblank || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err); + return NULL; + } + + tdm_client_vblank_set_enable_fake(vblank, 1); + tdm_client_vblank_set_sync(vblank, 0); + + return vblank; +} + +static void +_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + + wl_egl_surface->tbm_queue = _thread_create_tbm_queue( + wl_egl_surface, + wl_egl_display->wl_tbm_client, + CLIENT_QUEUE_SIZE); + if (!wl_egl_surface->tbm_queue) { + TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)", + wl_egl_surface, wl_egl_display->wl_tbm_client); + return; + } + + TPL_INFO("[QUEUE_CREATION]", + "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)", + wl_egl_surface, wl_egl_surface->wl_surface, + wl_egl_display->wl_tbm_client); + TPL_INFO("[QUEUE_CREATION]", + "tbm_queue(%p) size(%d x %d) X %d format(%d)", + wl_egl_surface->tbm_queue, + wl_egl_surface->width, + wl_egl_surface->height, + CLIENT_QUEUE_SIZE, + wl_egl_surface->format); + + wl_egl_surface->vblank = _thread_create_tdm_client_vblank( + wl_egl_display->tdm_client); + if (wl_egl_surface->vblank) { + TPL_INFO("[VBLANK_INIT]", + "wl_egl_surface(%p) tdm_client(%p) vblank(%p)", + wl_egl_surface, wl_egl_display->tdm_client, + wl_egl_surface->vblank); + } + + if (wl_egl_display->tss) { + wl_egl_surface->tss_flusher = + tizen_surface_shm_get_flusher(wl_egl_display->tss, + wl_egl_surface->wl_surface); + } + + if (wl_egl_surface->tss_flusher) { + tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher, + &tss_flusher_listener, + wl_egl_surface); + TPL_INFO("[FLUSHER_INIT]", + "wl_egl_surface(%p) tss_flusher(%p)", + wl_egl_surface, wl_egl_surface->tss_flusher); + } + + if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) { + wl_egl_surface->surface_sync = + zwp_linux_explicit_synchronization_v1_get_synchronization( + wl_egl_display->explicit_sync, wl_egl_surface->wl_surface); + if (wl_egl_surface->surface_sync) { + TPL_INFO("[EXPLICIT_SYNC_INIT]", + "wl_egl_surface(%p) surface_sync(%p)", + wl_egl_surface, wl_egl_surface->surface_sync); + } else { + TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)", + wl_egl_surface); + wl_egl_display->use_explicit_sync = TPL_FALSE; + } + } + + wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc(); + wl_egl_surface->presentation_feedbacks = __tpl_list_alloc(); +} + +static void +__tpl_wl_egl_surface_fini(tpl_surface_t *surface) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = NULL; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + + TPL_CHECK_ON_FALSE_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW); + + wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + wl_egl_display = wl_egl_surface->wl_egl_display; + TPL_CHECK_ON_NULL_RETURN(wl_egl_display); + + TPL_INFO("[SURFACE_FINI][BEGIN]", + "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_egl_surface, + wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue); + + if (wl_egl_surface->surf_source) + tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); + wl_egl_surface->surf_source = NULL; + + _print_buffer_lists(wl_egl_surface); + + if (wl_egl_surface->wl_egl_window) { + struct tizen_private *tizen_private = NULL; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + TPL_INFO("[WL_EGL_WINDOW_FINI]", + "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", + wl_egl_surface, wl_egl_window, + wl_egl_surface->wl_surface); + tizen_private = (struct tizen_private *)wl_egl_window->driver_private; + if (tizen_private) { + tizen_private->set_window_serial_callback = NULL; + tizen_private->rotate_callback = NULL; + tizen_private->get_rotation_capability = NULL; + tizen_private->create_presentation_sync_fd = NULL; + tizen_private->create_commit_sync_fd = NULL; + tizen_private->set_frontbuffer_callback = NULL; + tizen_private->merge_sync_fds = NULL; + tizen_private->data = NULL; + free(tizen_private); + + wl_egl_window->driver_private = NULL; + } + + wl_egl_window->destroy_window_callback = NULL; + wl_egl_window->resize_callback = NULL; + + wl_egl_surface->wl_egl_window = NULL; + } + + wl_egl_surface->wl_surface = NULL; + wl_egl_surface->wl_egl_display = NULL; + wl_egl_surface->tpl_surface = NULL; + + tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex); + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex); + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + tpl_gmutex_clear(&wl_egl_surface->surf_mutex); + tpl_gcond_clear(&wl_egl_surface->surf_cond); + + TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface); + + free(wl_egl_surface); + surface->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface, + tpl_bool_t set) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); + + wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; + + TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); + + TPL_INFO("[SET_PREROTATION_CAPABILITY]", + "wl_egl_surface(%p) prerotation capability set to [%s]", + wl_egl_surface, (set ? "TRUE" : "FALSE")); + + wl_egl_surface->prerotation_capability = set; + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface, + int post_interval) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); + + wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; + + TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); + + TPL_INFO("[SET_POST_INTERVAL]", + "wl_egl_surface(%p) post_interval(%d -> %d)", + wl_egl_surface, wl_egl_surface->post_interval, post_interval); + + wl_egl_surface->post_interval = post_interval; + + return TPL_ERROR_NONE; +} + +static tpl_bool_t +__tpl_wl_egl_surface_validate(tpl_surface_t *surface) +{ + tpl_bool_t retval = TPL_TRUE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + + retval = !(wl_egl_surface->reset); + + return retval; +} + +static void +__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) +{ + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + + if (width) + *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); + if (height) + *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); +} + +#define CAN_DEQUEUE_TIMEOUT_MS 10000 + +tpl_result_t +_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + _print_buffer_lists(wl_egl_surface); + + if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue)) + != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)", + wl_egl_surface->tbm_queue, tsq_err); + return TPL_ERROR_INVALID_OPERATION; + } + + { + int i; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) { + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + wl_egl_buffer = wl_egl_surface->buffers[i]; + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + if (wl_egl_buffer && wl_egl_buffer->status == COMMITTED) { + wl_egl_buffer->status = RELEASED; + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + wl_egl_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + wl_egl_buffer->tbm_surface, tsq_err); + tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); + } + } + } + + TPL_INFO("[FORCE_FLUSH]", + "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); + + return TPL_ERROR_NONE; +} + +static void +_wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer, + tpl_wl_egl_surface_t *wl_egl_surface) +{ + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + struct tizen_private *tizen_private = + (struct tizen_private *)wl_egl_window->driver_private; + + TPL_ASSERT(tizen_private); + + wl_egl_buffer->draw_done = TPL_FALSE; + wl_egl_buffer->need_to_commit = TPL_TRUE; + + wl_egl_buffer->acquire_fence_fd = -1; + wl_egl_buffer->release_fence_fd = -1; + wl_egl_buffer->commit_sync_fd = -1; + wl_egl_buffer->presentation_sync_fd = -1; + + wl_egl_buffer->buffer_release = NULL; + + wl_egl_buffer->transform = tizen_private->transform; + + if (wl_egl_buffer->w_transform != tizen_private->window_transform) { + wl_egl_buffer->w_transform = tizen_private->window_transform; + wl_egl_buffer->w_rotated = TPL_TRUE; + } + + if (wl_egl_surface->set_serial_is_used) { + wl_egl_buffer->serial = wl_egl_surface->serial; + } else { + wl_egl_buffer->serial = ++tizen_private->serial; + } + + if (wl_egl_buffer->rects) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; + } +} + +static tpl_wl_egl_buffer_t * +_get_wl_egl_buffer(tbm_surface_h tbm_surface) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER, + (void **)&wl_egl_buffer); + return wl_egl_buffer; +} + +static tpl_wl_egl_buffer_t * +_wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, + tbm_surface_h tbm_surface) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + + if (!wl_egl_buffer) { + wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t)); + TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL); + + tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER, + (tbm_data_free)__cb_wl_egl_buffer_free); + tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER, + wl_egl_buffer); + + wl_egl_buffer->wl_buffer = NULL; + wl_egl_buffer->tbm_surface = tbm_surface; + wl_egl_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface); + wl_egl_buffer->wl_egl_surface = wl_egl_surface; + + wl_egl_buffer->status = RELEASED; + + wl_egl_buffer->dx = wl_egl_window->dx; + wl_egl_buffer->dy = wl_egl_window->dy; + wl_egl_buffer->width = tbm_surface_get_width(tbm_surface); + wl_egl_buffer->height = tbm_surface_get_height(tbm_surface); + + tpl_gmutex_init(&wl_egl_buffer->mutex); + tpl_gcond_init(&wl_egl_buffer->cond); + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + { + int i; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) + if (wl_egl_surface->buffers[i] == NULL) break; + + wl_egl_surface->buffer_cnt++; + wl_egl_surface->buffers[i] = wl_egl_buffer; + wl_egl_buffer->idx = i; + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + + TPL_INFO("[WL_EGL_BUFFER_CREATE]", + "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, wl_egl_buffer, tbm_surface, + wl_egl_buffer->bo_name); + } + + _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface); + + return wl_egl_buffer; +} + +static tbm_surface_h +__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, + int32_t *release_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->display->backend.data); + TPL_OBJECT_CHECK_RETURN(surface, NULL); + + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)surface->display->backend.data; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_bool_t is_activated = 0; + int bo_name = 0; + tbm_surface_h tbm_surface = NULL; + + TPL_OBJECT_UNLOCK(surface); + tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( + wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS); + TPL_OBJECT_LOCK(surface); + + /* After the can dequeue state, lock the wl_event_mutex to prevent other + * events from being processed in wayland_egl_thread + * during below dequeue procedure. */ + tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); + + if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { + TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", + wl_egl_surface->tbm_queue, surface); + if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) { + TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)", + wl_egl_surface->tbm_queue, surface); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + return NULL; + } else { + tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + } + } + + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)", + wl_egl_surface->tbm_queue, surface); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + return NULL; + } + + /* wayland client can check their states (ACTIVATED or DEACTIVATED) with + * below function [wayland_tbm_client_queue_check_activate()]. + * This function has to be called before tbm_surface_queue_dequeue() + * in order to know what state the buffer will be dequeued next. + * + * ACTIVATED state means non-composite mode. Client can get buffers which + can be displayed directly(without compositing). + * DEACTIVATED state means composite mode. Client's buffer will be displayed + by compositor(E20) with compositing. + */ + is_activated = wayland_tbm_client_queue_check_activate( + wl_egl_display->wl_tbm_client, + wl_egl_surface->tbm_queue); + + wl_egl_surface->is_activated = is_activated; + + surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); + surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); + wl_egl_surface->width = surface->width; + wl_egl_surface->height = surface->height; + + if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) { + /* If surface->frontbuffer is already set in frontbuffer mode, + * it will return that frontbuffer if it is still activated, + * otherwise dequeue the new buffer after initializing + * surface->frontbuffer to NULL. */ + if (is_activated && !wl_egl_surface->reset) { + bo_name = _get_tbm_surface_bo_name(surface->frontbuffer); + + TPL_LOG_T("WL_EGL", + "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", + surface->frontbuffer, bo_name); + TRACE_ASYNC_BEGIN((int)surface->frontbuffer, + "[DEQ]~[ENQ] BO_NAME:%d", + bo_name); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + return surface->frontbuffer; + } else { + surface->frontbuffer = NULL; + wl_egl_surface->need_to_enqueue = TPL_TRUE; + } + } else { + surface->frontbuffer = NULL; + } + + tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue, + &tbm_surface); + if (!tbm_surface) { + TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d", + wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + return NULL; + } + + tbm_surface_internal_ref(tbm_surface); + + wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer."); + + tpl_gmutex_lock(&wl_egl_buffer->mutex); + wl_egl_buffer->status = DEQUEUED; + + /* If wl_egl_buffer->release_fence_fd is -1, + * the tbm_surface can be used immediately. + * If not, user(EGL) have to wait until signaled. */ + if (release_fence) { + if (wl_egl_surface->surface_sync) { + *release_fence = wl_egl_buffer->release_fence_fd; + TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)", + wl_egl_surface, wl_egl_buffer, *release_fence); + } else { + *release_fence = -1; + } + } + + if (surface->is_frontbuffer_mode && is_activated) + surface->frontbuffer = tbm_surface; + + wl_egl_surface->reset = TPL_FALSE; + + TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name); + TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", + wl_egl_buffer->bo_name); + TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name, + release_fence ? *release_fence : -1); + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + + return tbm_surface; +} + +static tpl_result_t +__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + if (wl_egl_buffer) { + tpl_gmutex_lock(&wl_egl_buffer->mutex); + wl_egl_buffer->status = RELEASED; + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + } + + tbm_surface_internal_unref(tbm_surface); + + tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to release tbm_surface(%p) surface(%p)", + tbm_surface, surface); + return TPL_ERROR_INVALID_OPERATION; + } + + TPL_INFO("[CANCEL_BUFFER]", "wl_egl_surface(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, int32_t acquire_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(tbm_surface); + TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); + + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *) surface->backend.data; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + int bo_name = -1; + + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", + tbm_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + return TPL_ERROR_INVALID_PARAMETER; + } + + bo_name = _get_tbm_surface_bo_name(tbm_surface); + TRACE_MARK("[ENQ] BO_NAME:%d", bo_name); - TPL_LOG_T("WL_EGL", - "[ENQ] wayland_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)", - wayland_egl_surface, tbm_surface, bo_name, sync_fence); + TPL_LOG_T("WL_EGL", + "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_egl_surface, tbm_surface, bo_name, acquire_fence); + + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + tpl_gmutex_lock(&wl_egl_buffer->mutex); + + /* If there are received region information, save it to wl_egl_buffer */ + if (num_rects && rects) { + if (wl_egl_buffer->rects != NULL) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; + } + + wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects)); + wl_egl_buffer->num_rects = num_rects; + + if (!wl_egl_buffer->rects) { + TPL_ERR("Failed to allocate memory fo damage rects info."); + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + return TPL_ERROR_OUT_OF_MEMORY; + } + + memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects); + } + + if (!wl_egl_surface->need_to_enqueue || + !wl_egl_buffer->need_to_commit) { + TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", + ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + return TPL_ERROR_NONE; + } + + /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and + * commit if surface->frontbuffer that is already set and the tbm_surface + * client want to enqueue are the same. + */ + if (surface->is_frontbuffer_mode) { + /* The first buffer to be activated in frontbuffer mode must be + * committed. Subsequence frames do not need to be committed because + * the buffer is already displayed. + */ + if (surface->frontbuffer == tbm_surface) + wl_egl_surface->need_to_enqueue = TPL_FALSE; + + if (acquire_fence != -1) { + close(acquire_fence); + acquire_fence = -1; + } + } + + if (wl_egl_buffer->acquire_fence_fd != -1) + close(wl_egl_buffer->acquire_fence_fd); + + wl_egl_buffer->acquire_fence_fd = acquire_fence; + wl_egl_buffer->status = ENQUEUED; + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + if (wl_egl_surface->presentation_sync.fd != -1) { + wl_egl_buffer->presentation_sync_fd = wl_egl_surface->presentation_sync.fd; + wl_egl_surface->presentation_sync.fd = -1; + } + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + + tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); + if (wl_egl_surface->commit_sync.fd != -1) { + wl_egl_buffer->commit_sync_fd = wl_egl_surface->commit_sync.fd; + wl_egl_surface->commit_sync.fd = -1; + TRACE_ASYNC_BEGIN(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + } + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + + tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + tbm_surface_internal_unref(tbm_surface); + TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d", + tbm_surface, wl_egl_surface, tsq_err); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + return TPL_ERROR_INVALID_OPERATION; + } + + tbm_surface_internal_unref(tbm_surface); + + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + + return TPL_ERROR_NONE; +} + +static tpl_bool_t +__thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = + (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource); + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface; + + wl_egl_surface->render_done_cnt++; + + TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)", + wl_egl_buffer->acquire_fence_fd); + + TPL_DEBUG("[RENDER DONE] wl_egl_buffer(%p) tbm_surface(%p)", + wl_egl_buffer, tbm_surface); + + tpl_gmutex_lock(&wl_egl_buffer->mutex); + tpl_gcond_signal(&wl_egl_buffer->cond); + wl_egl_buffer->status = WAITING_VBLANK; + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + + if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done) + _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer); + else + __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, + wl_egl_buffer); + + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + + return TPL_FALSE; +} + +static void +__thread_func_waiting_source_finalize(tpl_gsource *gsource) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = + (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource); + + TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)", + wl_egl_buffer, wl_egl_buffer->waiting_source, + wl_egl_buffer->acquire_fence_fd); + + close(wl_egl_buffer->acquire_fence_fd); + wl_egl_buffer->acquire_fence_fd = -1; + wl_egl_buffer->waiting_source = NULL; +} + +static tpl_gsource_functions buffer_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_waiting_source_dispatch, + .finalize = __thread_func_waiting_source_finalize, +}; + +static tpl_result_t +_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tbm_surface_h tbm_surface = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tpl_bool_t ready_to_commit = TPL_FALSE; + + while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) { + tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue, + &tbm_surface); + if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to acquire from tbm_queue(%p)", + wl_egl_surface->tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } - /* If there are received region information, - * save it to buf_info in tbm_surface user_data using below API. */ - if (num_rects && rects) { - ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects); - if (ret != TPL_ERROR_NONE) { - TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)", - num_rects, rects); + tbm_surface_internal_ref(tbm_surface); + + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, + "wl_egl_buffer sould be not NULL"); + + tpl_gmutex_lock(&wl_egl_buffer->mutex); + + wl_egl_buffer->status = ACQUIRED; + + if (wl_egl_buffer->wl_buffer == NULL) { + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + wl_egl_buffer->wl_buffer = + (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_egl_display->wl_tbm_client, tbm_surface); + + if (!wl_egl_buffer->wl_buffer) { + TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)", + wl_egl_display->wl_tbm_client, tbm_surface); + } } - } - if (!wayland_egl_surface->need_to_enqueue || - !twe_surface_check_commit_needed(wayland_egl_surface->twe_surface, - tbm_surface)) { - TPL_LOG_T("WL_EGL", - "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", - ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_OBJECT_UNLOCK(wayland_egl_surface); - return TPL_ERROR_NONE; + if (wl_egl_buffer->acquire_fence_fd != -1) { + if (wl_egl_surface->surface_sync) + ready_to_commit = TPL_TRUE; + else { + if (wl_egl_buffer->waiting_source) { + tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); + wl_egl_buffer->waiting_source = NULL; + } + + wl_egl_buffer->waiting_source = + tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer, + wl_egl_buffer->acquire_fence_fd, &buffer_funcs, + SOURCE_TYPE_DISPOSABLE); + wl_egl_buffer->status = WAITING_SIGNALED; + + TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)", + wl_egl_buffer->acquire_fence_fd); + + ready_to_commit = TPL_FALSE; + } + } + + if (ready_to_commit) { + if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done) + ready_to_commit = TPL_TRUE; + else { + wl_egl_buffer->status = WAITING_VBLANK; + __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, wl_egl_buffer); + ready_to_commit = TPL_FALSE; + } + } + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + + if (ready_to_commit) + _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer); } - /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and - * commit if surface->frontbuffer that is already set and the tbm_surface - * client want to enqueue are the same. - */ - if (surface->is_frontbuffer_mode) { - /* The first buffer to be activated in frontbuffer mode must be - * committed. Subsequence frames do not need to be committed because - * the buffer is already displayed. - */ - if (surface->frontbuffer == tbm_surface) - wayland_egl_surface->need_to_enqueue = TPL_FALSE; + return TPL_ERROR_NONE; +} + +/* -- BEGIN -- tdm_client vblank callback function */ +static void +__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, + unsigned int sequence, unsigned int tv_sec, + unsigned int tv_usec, void *user_data) +{ + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + + TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK"); + TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface); + + if (error == TDM_ERROR_TIMEOUT) + TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)", + wl_egl_surface); + + wl_egl_surface->vblank_done = TPL_TRUE; + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front( + wl_egl_surface->vblank_waiting_buffers, + NULL); + if (wl_egl_buffer) + _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); +} +/* -- END -- tdm_client vblank callback function */ + +static void +__cb_buffer_fenced_release(void *data, + struct zwp_linux_buffer_release_v1 *release, int32_t fence) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer); + + tbm_surface = wl_egl_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + if (wl_egl_buffer->status == COMMITTED) { + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_queue_error_e tsq_err; + + tpl_gmutex_lock(&wl_egl_buffer->mutex); + + zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); + wl_egl_buffer->buffer_release = NULL; + + wl_egl_buffer->release_fence_fd = fence; + wl_egl_buffer->status = RELEASED; + + TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", + _get_tbm_surface_bo_name(tbm_surface), + fence); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); - if (sync_fence != -1) { - close(sync_fence); - sync_fence = -1; + TPL_LOG_T("WL_EGL", + "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface), + fence); + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); } + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); } +} + +static void +__cb_buffer_immediate_release(void *data, + struct zwp_linux_buffer_release_v1 *release) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer); + + tbm_surface = wl_egl_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + if (wl_egl_buffer->status == COMMITTED) { + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_queue_error_e tsq_err; + + tpl_gmutex_lock(&wl_egl_buffer->mutex); + + zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); + wl_egl_buffer->buffer_release = NULL; + + wl_egl_buffer->release_fence_fd = -1; + wl_egl_buffer->status = RELEASED; + + TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); - if (sync_fence != -1) { - ret = twe_surface_set_sync_fd(wayland_egl_surface->twe_surface, - tbm_surface, sync_fence); - if (ret != TPL_ERROR_NONE) { - TPL_WARN("Failed to set sync fd (%d). But it will continue.", - sync_fence); + TPL_LOG_T("WL_EGL", + "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); } + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); } +} - tsq_err = tbm_surface_queue_enqueue(wayland_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - tbm_surface_internal_unref(tbm_surface); - TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d", - tbm_surface, surface, tsq_err); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_OBJECT_UNLOCK(wayland_egl_surface); - return TPL_ERROR_INVALID_OPERATION; - } +static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = { + __cb_buffer_fenced_release, + __cb_buffer_immediate_release, +}; - tbm_surface_internal_unref(tbm_surface); +static void +__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_OBJECT_UNLOCK(wayland_egl_surface); + TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer) - return TPL_ERROR_NONE; + tbm_surface = wl_egl_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE; + + tpl_gmutex_lock(&wl_egl_buffer->mutex); + + if (wl_egl_buffer->status == COMMITTED) { + + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + wl_egl_buffer->status = RELEASED; + + TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + } + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + + if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) + tbm_surface_internal_unref(tbm_surface); + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } } -static tpl_bool_t -__tpl_wl_egl_surface_validate(tpl_surface_t *surface) +static const struct wl_buffer_listener wl_buffer_release_listener = { + (void *)__cb_wl_buffer_release, +}; + +static void +__cb_presentation_feedback_sync_output(void *data, + struct wp_presentation_feedback *presentation_feedback, + struct wl_output *output) { - tpl_bool_t retval = TPL_TRUE; + TPL_IGNORE(data); + TPL_IGNORE(presentation_feedback); + TPL_IGNORE(output); + /* Nothing to do */ +} - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); +static void +__cb_presentation_feedback_presented(void *data, + struct wp_presentation_feedback *presentation_feedback, + uint32_t tv_sec_hi, + uint32_t tv_sec_lo, + uint32_t tv_nsec, + uint32_t refresh_nsec, + uint32_t seq_hi, + uint32_t seq_lo, + uint32_t flags) +{ + TPL_IGNORE(tv_sec_hi); + TPL_IGNORE(tv_sec_lo); + TPL_IGNORE(tv_nsec); + TPL_IGNORE(refresh_nsec); + TPL_IGNORE(seq_hi); + TPL_IGNORE(seq_lo); + TPL_IGNORE(flags); + + struct pst_feedback *pst_feedback = (struct pst_feedback *)data; + tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface; + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + + TPL_DEBUG("[FEEDBACK][PRESENTED] pst_feedback(%p) presentation_feedback(%p) bo(%d)", + pst_feedback, presentation_feedback, pst_feedback->bo_name); + + if (pst_feedback->pst_sync_fd != -1) { + int ret = _write_to_eventfd(pst_feedback->pst_sync_fd); + if (ret == -1) { + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + pst_feedback->pst_sync_fd); + } + + TRACE_ASYNC_END(pst_feedback->pst_sync_fd, + "[PRESENTATION_SYNC] bo(%d)", + pst_feedback->bo_name); - tpl_wayland_egl_surface_t *wayland_egl_surface = - (tpl_wayland_egl_surface_t *)surface->backend.data; + close(pst_feedback->pst_sync_fd); + pst_feedback->pst_sync_fd = -1; + } - retval = !(wayland_egl_surface->reset); + wp_presentation_feedback_destroy(presentation_feedback); - return retval; + pst_feedback->presentation_feedback = NULL; + pst_feedback->wl_egl_surface = NULL; + pst_feedback->bo_name = 0; + + __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback, + TPL_FIRST, NULL); + + free(pst_feedback); + + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); } -static tpl_result_t -__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface) +static void +__cb_presentation_feedback_discarded(void *data, + struct wp_presentation_feedback *presentation_feedback) { - tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + struct pst_feedback *pst_feedback = (struct pst_feedback *)data; + tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface; - wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; - if (!wayland_egl_surface) { - TPL_ERR("Invalid backend surface. surface(%p) wayland_egl_surface(%p)", - surface, wayland_egl_surface); - return TPL_ERROR_INVALID_PARAMETER; + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + + TPL_DEBUG("[FEEDBACK][DISCARDED] pst_feedback(%p) presentation_feedback(%p) bo(%d)", + pst_feedback, presentation_feedback, pst_feedback->bo_name); + + if (pst_feedback->pst_sync_fd != -1) { + int ret = _write_to_eventfd(pst_feedback->pst_sync_fd); + if (ret == -1) { + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + pst_feedback->pst_sync_fd); + } + + TRACE_ASYNC_END(pst_feedback->pst_sync_fd, + "[PRESENTATION_SYNC] bo(%d)", + pst_feedback->bo_name); + + close(pst_feedback->pst_sync_fd); + pst_feedback->pst_sync_fd = -1; } - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); - return TPL_ERROR_INVALID_PARAMETER; + wp_presentation_feedback_destroy(presentation_feedback); + + pst_feedback->presentation_feedback = NULL; + pst_feedback->wl_egl_surface = NULL; + pst_feedback->bo_name = 0; + + __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback, + TPL_FIRST, NULL); + + free(pst_feedback); + + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); +} + +static const struct wp_presentation_feedback_listener feedback_listener = { + __cb_presentation_feedback_sync_output, /* sync_output feedback -*/ + __cb_presentation_feedback_presented, + __cb_presentation_feedback_discarded +}; + +static tpl_result_t +_thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tdm_error tdm_err = TDM_ERROR_NONE; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + + if (wl_egl_surface->vblank == NULL) { + wl_egl_surface->vblank = + _thread_create_tdm_client_vblank(wl_egl_display->tdm_client); + if (!wl_egl_surface->vblank) { + TPL_WARN("Failed to create vblank. wl_egl_surface(%p)", + wl_egl_surface); + return TPL_ERROR_OUT_OF_MEMORY; + } } - tbm_surface_internal_unref(tbm_surface); + tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank, + wl_egl_surface->post_interval, + __cb_tdm_client_vblank, + (void *)wl_egl_surface); - tsq_err = tbm_surface_queue_cancel_dequeue(wayland_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to release tbm_surface(%p) surface(%p)", - tbm_surface, surface); + if (tdm_err == TDM_ERROR_NONE) { + wl_egl_surface->vblank_done = TPL_FALSE; + TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK"); + } else { + TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); return TPL_ERROR_INVALID_OPERATION; } - TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)", - surface, tbm_surface); - return TPL_ERROR_NONE; } -#define CAN_DEQUEUE_TIMEOUT_MS 10000 - -static tbm_surface_h -__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, - tbm_fd *sync_fence) +static void +_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, + tpl_wl_egl_buffer_t *wl_egl_buffer) { - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->display->backend.data); - TPL_OBJECT_CHECK_RETURN(surface, NULL); + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + struct wl_surface *wl_surface = wl_egl_surface->wl_surface; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + uint32_t version; + + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, + "wl_egl_buffer sould be not NULL"); + + if (wl_egl_buffer->wl_buffer == NULL) { + wl_egl_buffer->wl_buffer = + (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_egl_display->wl_tbm_client, + wl_egl_buffer->tbm_surface); + } + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL, + "[FATAL] Failed to create wl_buffer"); - tbm_surface_h tbm_surface = NULL; - tpl_wayland_egl_surface_t *wayland_egl_surface = - (tpl_wayland_egl_surface_t *)surface->backend.data; - tpl_wayland_egl_display_t *wayland_egl_display = - (tpl_wayland_egl_display_t *)surface->display->backend.data; - tbm_surface_queue_error_e tsq_err = 0; - int is_activated = 0; - int bo_name = 0; - tpl_result_t lock_ret = TPL_FALSE; + wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer, + &wl_buffer_release_listener, wl_egl_buffer); - TPL_OBJECT_UNLOCK(surface); - tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( - wayland_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS); - TPL_OBJECT_LOCK(surface); + version = wl_proxy_get_version((struct wl_proxy *)wl_surface); - /* After the can dequeue state, call twe_display_lock to prevent other - * events from being processed in wayland_egl_thread - * during below dequeue procedure. */ - lock_ret = twe_display_lock(wayland_egl_display->twe_display); + /* create presentation feedback and add listener */ + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) { - if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { - TPL_ERR("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", - wayland_egl_surface->tbm_queue, surface); - if (twe_surface_queue_force_flush(wayland_egl_surface->twe_surface) - != TPL_ERROR_NONE) { - TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)", - wayland_egl_surface->tbm_queue, surface); - if (lock_ret == TPL_ERROR_NONE) - twe_display_unlock(wayland_egl_display->twe_display); - return NULL; + struct pst_feedback *pst_feedback = NULL; + pst_feedback = (struct pst_feedback *) calloc(1, sizeof(struct pst_feedback)); + if (pst_feedback) { + pst_feedback->presentation_feedback = + wp_presentation_feedback(wl_egl_display->presentation, + wl_surface); + + pst_feedback->wl_egl_surface = wl_egl_surface; + pst_feedback->bo_name = wl_egl_buffer->bo_name; + + pst_feedback->pst_sync_fd = wl_egl_buffer->presentation_sync_fd; + wl_egl_buffer->presentation_sync_fd = -1; + + wp_presentation_feedback_add_listener(pst_feedback->presentation_feedback, + &feedback_listener, pst_feedback); + __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, pst_feedback); + TRACE_ASYNC_BEGIN(pst_feedback->pst_sync_fd, + "[PRESENTATION_SYNC] bo(%d)", + pst_feedback->bo_name); } else { - tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + TPL_ERR("Failed to create presentation feedback. wl_egl_buffer(%p)", + wl_egl_buffer); + _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; } } + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + + if (wl_egl_buffer->w_rotated == TPL_TRUE) { + wayland_tbm_client_set_buffer_transform( + wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer, + wl_egl_buffer->w_transform); + wl_egl_buffer->w_rotated = TPL_FALSE; + } - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)", - wayland_egl_surface->tbm_queue, surface); - if (lock_ret == TPL_ERROR_NONE) - twe_display_unlock(wayland_egl_display->twe_display); - return NULL; + if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) { + wl_egl_surface->latest_transform = wl_egl_buffer->transform; + wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform); } - /* wayland client can check their states (ACTIVATED or DEACTIVATED) with - * below function [wayland_tbm_client_queue_check_activate()]. - * This function has to be called before tbm_surface_queue_dequeue() - * in order to know what state the buffer will be dequeued next. - * - * ACTIVATED state means non-composite mode. Client can get buffers which - can be displayed directly(without compositing). - * DEACTIVATED state means composite mode. Client's buffer will be displayed - by compositor(E20) with compositing. - */ - is_activated = twe_surface_check_activated(wayland_egl_surface->twe_surface); - wayland_egl_surface->is_activated = is_activated; + if (wl_egl_window) { + wl_egl_window->attached_width = wl_egl_buffer->width; + wl_egl_window->attached_height = wl_egl_buffer->height; + } - surface->width = tbm_surface_queue_get_width(wayland_egl_surface->tbm_queue); - surface->height = tbm_surface_queue_get_height(wayland_egl_surface->tbm_queue); + wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer, + wl_egl_buffer->dx, wl_egl_buffer->dy); - if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) { - /* If surface->frontbuffer is already set in frontbuffer mode, - * it will return that frontbuffer if it is still activated, - * otherwise dequeue the new buffer after initializing - * surface->frontbuffer to NULL. */ - if (is_activated && !wayland_egl_surface->reset) { - TPL_LOG_T("WL_EGL", - "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", - surface->frontbuffer, - tbm_bo_export(tbm_surface_internal_get_bo( - surface->frontbuffer, 0))); - TRACE_ASYNC_BEGIN((int)surface->frontbuffer, - "[DEQ]~[ENQ] BO_NAME:%d", - tbm_bo_export(tbm_surface_internal_get_bo( - surface->frontbuffer, 0))); - if (lock_ret == TPL_ERROR_NONE) - twe_display_unlock(wayland_egl_display->twe_display); - return surface->frontbuffer; + if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) { + if (version < 4) { + wl_surface_damage(wl_surface, + wl_egl_buffer->dx, wl_egl_buffer->dy, + wl_egl_buffer->width, wl_egl_buffer->height); } else { - surface->frontbuffer = NULL; - wayland_egl_surface->need_to_enqueue = TPL_TRUE; + wl_surface_damage_buffer(wl_surface, + 0, 0, + wl_egl_buffer->width, wl_egl_buffer->height); } } else { - surface->frontbuffer = NULL; + int i; + for (i = 0; i < wl_egl_buffer->num_rects; i++) { + int inverted_y = + wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] + + wl_egl_buffer->rects[i * 4 + 3]); + if (version < 4) { + wl_surface_damage(wl_surface, + wl_egl_buffer->rects[i * 4 + 0], + inverted_y, + wl_egl_buffer->rects[i * 4 + 2], + wl_egl_buffer->rects[i * 4 + 3]); + } else { + wl_surface_damage_buffer(wl_surface, + wl_egl_buffer->rects[i * 4 + 0], + inverted_y, + wl_egl_buffer->rects[i * 4 + 2], + wl_egl_buffer->rects[i * 4 + 3]); + } + } } - tsq_err = tbm_surface_queue_dequeue(wayland_egl_surface->tbm_queue, - &tbm_surface); - if (!tbm_surface) { - TPL_ERR("Failed to dequeue from tbm_queue(%p) surface(%p)| tsq_err = %d", - wayland_egl_surface->tbm_queue, surface, tsq_err); - if (lock_ret == TPL_ERROR_NONE) - twe_display_unlock(wayland_egl_display->twe_display); - return NULL; - } + wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer, + wl_egl_buffer->serial); - tbm_surface_internal_ref(tbm_surface); + if (wl_egl_display->use_explicit_sync && + wl_egl_surface->surface_sync) { - /* If twe_surface_get_buffer_release_fence_fd return -1, - * the tbm_surface can be used immediately. - * If not, user(EGL) have to wait until signaled. */ - if (sync_fence) { - *sync_fence = twe_surface_get_buffer_release_fence_fd( - wayland_egl_surface->twe_surface, tbm_surface); + zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync, + wl_egl_buffer->acquire_fence_fd); + TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)", + wl_egl_surface, wl_egl_buffer->tbm_surface, wl_egl_buffer->acquire_fence_fd); + close(wl_egl_buffer->acquire_fence_fd); + wl_egl_buffer->acquire_fence_fd = -1; + + wl_egl_buffer->buffer_release = + zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync); + if (!wl_egl_buffer->buffer_release) { + TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface); + } else { + zwp_linux_buffer_release_v1_add_listener( + wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer); + TPL_DEBUG("add explicit_sync_release_listener."); + } } - bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); + wl_surface_commit(wl_surface); - if (surface->is_frontbuffer_mode && is_activated) - surface->frontbuffer = tbm_surface; + wl_display_flush(wl_egl_display->wl_display); - wayland_egl_surface->reset = TPL_FALSE; + TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + wl_egl_buffer->bo_name); - TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)", - tbm_surface, bo_name, sync_fence ? *sync_fence : -1); + wl_egl_buffer->need_to_commit = TPL_FALSE; + wl_egl_buffer->status = COMMITTED; - if (lock_ret == TPL_ERROR_NONE) - twe_display_unlock(wayland_egl_display->twe_display); + TPL_LOG_T("WL_EGL", + "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface, + wl_egl_buffer->bo_name); - return tbm_surface; -} + if (wl_egl_display->use_wait_vblank && + _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE) + TPL_ERR("Failed to set wait vblank."); -void -__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) -{ - tpl_wayland_egl_surface_t *wayland_egl_surface = - (tpl_wayland_egl_surface_t *)surface->backend.data; + tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); - if (width) - *width = tbm_surface_queue_get_width(wayland_egl_surface->tbm_queue); - if (height) - *height = tbm_surface_queue_get_height(wayland_egl_surface->tbm_queue); -} + if (wl_egl_buffer->commit_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); + if (ret == -1) { + TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd); + } + TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)", + wl_egl_buffer->bo_name); + TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)", + wl_egl_surface, wl_egl_buffer->commit_sync_fd); -tpl_bool_t -__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy) + close(wl_egl_buffer->commit_sync_fd); + wl_egl_buffer->commit_sync_fd = -1; + } + + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); +} + +static int +_write_to_eventfd(int eventfd) { - if (!native_dpy) return TPL_FALSE; + uint64_t value = 1; + int ret; - if (twe_check_native_handle_is_wl_display(native_dpy)) - return TPL_TRUE; + if (eventfd == -1) { + TPL_ERR("Invalid fd(-1)"); + return -1; + } - return TPL_FALSE; + ret = write(eventfd, &value, sizeof(uint64_t)); + if (ret == -1) { + TPL_ERR("failed to write to fd(%d)", eventfd); + return ret; + } + + return ret; } void @@ -866,3 +3197,83 @@ __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend) __tpl_wl_egl_surface_get_size; } +static void +__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) +{ + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + + TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) { + wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL; + wl_egl_surface->buffer_cnt--; + + wl_egl_buffer->idx = -1; + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + + wl_display_flush(wl_egl_display->wl_display); + + if (wl_egl_buffer->wl_buffer) + wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer); + + if (wl_egl_buffer->commit_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); + if (ret == -1) + TPL_ERR("Failed to send commit_sync signal to fd(%d)", + wl_egl_buffer->commit_sync_fd); + close(wl_egl_buffer->commit_sync_fd); + wl_egl_buffer->commit_sync_fd = -1; + } + + if (wl_egl_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (ret == -1) + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + wl_egl_buffer->presentation_sync_fd); + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; + } + + if (wl_egl_buffer->rects) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; + } + + wl_egl_buffer->tbm_surface = NULL; + wl_egl_buffer->bo_name = -1; + + free(wl_egl_buffer); +} + +static int +_get_tbm_surface_bo_name(tbm_surface_h tbm_surface) +{ + return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); +} + +static void +_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) +{ + int idx = 0; + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)", + wl_egl_surface, wl_egl_surface->buffer_cnt); + for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) { + tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx]; + if (wl_egl_buffer) { + TPL_INFO("[INFO]", + "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", + idx, wl_egl_buffer, wl_egl_buffer->tbm_surface, + wl_egl_buffer->bo_name, + status_to_string[wl_egl_buffer->status]); + } + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); +} diff --git a/src/tpl_wl_egl_thread_legacy.c b/src/tpl_wl_egl_thread_legacy.c new file mode 100644 index 0000000..bb6a55d --- /dev/null +++ b/src/tpl_wl_egl_thread_legacy.c @@ -0,0 +1,868 @@ +#define inline __inline__ + +#undef inline + +#include "tpl_internal.h" + +#include +#include +#include + +#include +#include +#include +#include + +#include "tpl_wayland_egl_thread.h" + +/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */ +#define CLIENT_QUEUE_SIZE 3 + +typedef struct _tpl_wayland_egl_display tpl_wayland_egl_display_t; +typedef struct _tpl_wayland_egl_surface tpl_wayland_egl_surface_t; + +struct _tpl_wayland_egl_display { + twe_thread *wl_egl_thread; + twe_display_h twe_display; +}; + +struct _tpl_wayland_egl_surface { + tpl_object_t base; + twe_surface_h twe_surface; + tbm_surface_queue_h tbm_queue; + tpl_bool_t is_activated; + tpl_bool_t reset; /* TRUE if queue reseted by external */ + tpl_bool_t need_to_enqueue; +}; + +static tpl_result_t +__tpl_wl_egl_display_init(tpl_display_t *display) +{ + tpl_wayland_egl_display_t *wayland_egl_display = NULL; + + TPL_ASSERT(display); + + /* Do not allow default display in wayland. */ + if (!display->native_handle) { + TPL_ERR("Invalid native handle for display."); + return TPL_ERROR_INVALID_PARAMETER; + } + + wayland_egl_display = (tpl_wayland_egl_display_t *) calloc(1, + sizeof(tpl_wayland_egl_display_t)); + if (!wayland_egl_display) { + TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_display_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + display->backend.data = wayland_egl_display; + display->bufmgr_fd = -1; + + if (twe_check_native_handle_is_wl_display(display->native_handle)) { + wayland_egl_display->wl_egl_thread = twe_thread_create(); + if (!wayland_egl_display->wl_egl_thread) { + TPL_ERR("Failed to create twe_thread."); + goto free_display; + } + + wayland_egl_display->twe_display = + twe_display_add(wayland_egl_display->wl_egl_thread, + display->native_handle, + display->backend.type); + if (!wayland_egl_display->twe_display) { + TPL_ERR("Failed to add native_display(%p) to thread(%p)", + display->native_handle, + wayland_egl_display->wl_egl_thread); + goto free_display; + } + + } else { + TPL_ERR("Invalid native handle for display."); + goto free_display; + } + + TPL_LOG_T("WL_EGL", + "[INIT DISPLAY] wayland_egl_display(%p) twe_thread(%p) twe_display(%p)", + wayland_egl_display, + wayland_egl_display->wl_egl_thread, + wayland_egl_display->twe_display); + + return TPL_ERROR_NONE; + +free_display: + if (wayland_egl_display->twe_display) + twe_display_del(wayland_egl_display->twe_display); + if (wayland_egl_display->wl_egl_thread) + twe_thread_destroy(wayland_egl_display->wl_egl_thread); + wayland_egl_display->wl_egl_thread = NULL; + wayland_egl_display->twe_display = NULL; + + free(wayland_egl_display); + display->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; +} + +static void +__tpl_wl_egl_display_fini(tpl_display_t *display) +{ + tpl_wayland_egl_display_t *wayland_egl_display; + + TPL_ASSERT(display); + + wayland_egl_display = (tpl_wayland_egl_display_t *)display->backend.data; + if (wayland_egl_display) { + + TPL_LOG_T("WL_EGL", + "[FINI] wayland_egl_display(%p) twe_thread(%p) twe_display(%p)", + wayland_egl_display, + wayland_egl_display->wl_egl_thread, + wayland_egl_display->twe_display); + + if (wayland_egl_display->twe_display) { + tpl_result_t ret = TPL_ERROR_NONE; + ret = twe_display_del(wayland_egl_display->twe_display); + if (ret != TPL_ERROR_NONE) + TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)", + wayland_egl_display->twe_display, + wayland_egl_display->wl_egl_thread); + wayland_egl_display->twe_display = NULL; + } + + if (wayland_egl_display->wl_egl_thread) { + twe_thread_destroy(wayland_egl_display->wl_egl_thread); + wayland_egl_display->wl_egl_thread = NULL; + } + + free(wayland_egl_display); + } + + display->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_egl_display_query_config(tpl_display_t *display, + tpl_surface_type_t surface_type, + int red_size, int green_size, + int blue_size, int alpha_size, + int color_depth, int *native_visual_id, + tpl_bool_t *is_slow) +{ + TPL_ASSERT(display); + + if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 && + green_size == 8 && blue_size == 8 && + (color_depth == 32 || color_depth == 24)) { + + if (alpha_size == 8) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + if (alpha_size == 0) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + } + + return TPL_ERROR_INVALID_PARAMETER; +} + +static tpl_result_t +__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id, + int alpha_size) +{ + TPL_IGNORE(display); + TPL_IGNORE(visual_id); + TPL_IGNORE(alpha_size); + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_egl_display_get_window_info(tpl_display_t *display, + tpl_handle_t window, int *width, + int *height, tbm_format *format, + int depth, int a_size) +{ + tpl_result_t ret = TPL_ERROR_NONE; + + TPL_ASSERT(display); + TPL_ASSERT(window); + + if ((ret = twe_get_native_window_info(window, width, height, format, a_size)) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to get size info of native_window(%p)", window); + } + + return ret; +} + +static tpl_result_t +__tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display, + tpl_handle_t pixmap, int *width, + int *height, tbm_format *format) +{ + tbm_surface_h tbm_surface = NULL; + + tbm_surface = twe_get_native_buffer_from_pixmap(pixmap); + if (!tbm_surface) { + TPL_ERR("Failed to get tbm_surface_h from native pixmap."); + return TPL_ERROR_INVALID_OPERATION; + } + + if (width) *width = tbm_surface_get_width(tbm_surface); + if (height) *height = tbm_surface_get_height(tbm_surface); + if (format) *format = tbm_surface_get_format(tbm_surface); + + return TPL_ERROR_NONE; +} + +static tbm_surface_h +__tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) +{ + tbm_surface_h tbm_surface = NULL; + + TPL_ASSERT(pixmap); + + tbm_surface = twe_get_native_buffer_from_pixmap(pixmap); + if (!tbm_surface) { + TPL_ERR("Failed to get tbm_surface_h from wayland_tbm."); + return NULL; + } + + return tbm_surface; +} + +static void +__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, + void *data) +{ + tpl_surface_t *surface = NULL; + tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; + tpl_bool_t is_activated = TPL_FALSE; + int width, height; + + surface = (tpl_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(surface); + + wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface); + + /* When the queue is resized, change the reset flag to TPL_TRUE to reflect + * the changed window size at the next frame. */ + width = tbm_surface_queue_get_width(surface_queue); + height = tbm_surface_queue_get_height(surface_queue); + if (surface->width != width || surface->height != height) { + TPL_LOG_T("WL_EGL", + "[QUEUE_RESIZE_CB] wayland_egl_surface(%p) tbm_queue(%p) (%dx%d)", + wayland_egl_surface, surface_queue, width, height); + } + + /* When queue_reset_callback is called, if is_activated is different from + * its previous state change the reset flag to TPL_TRUE to get a new buffer + * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ + is_activated = twe_surface_check_activated(wayland_egl_surface->twe_surface); + if (wayland_egl_surface->is_activated != is_activated) { + if (is_activated) { + TPL_LOG_T("WL_EGL", + "[ACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)", + wayland_egl_surface, surface_queue); + } else { + TPL_LOG_T("WL_EGL", + "[DEACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)", + wayland_egl_surface, surface_queue); + } + } + + wayland_egl_surface->reset = TPL_TRUE; + + if (surface->reset_cb) + surface->reset_cb(surface->reset_data); +} + +void __cb_window_rotate_callback(void *data) +{ + tpl_surface_t *surface = (tpl_surface_t *)data; + tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; + int rotation; + + if (!surface) { + TPL_ERR("Inavlid parameter. surface is NULL."); + return; + } + + wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; + if (!wayland_egl_surface) { + TPL_ERR("Invalid parameter. surface->backend.data is NULL"); + return; + } + + rotation = twe_surface_get_rotation(wayland_egl_surface->twe_surface); + + surface->rotation = rotation; +} + +static tpl_result_t +__tpl_wl_egl_surface_init(tpl_surface_t *surface) +{ + tpl_wayland_egl_display_t *wayland_egl_display = NULL; + tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; + tbm_surface_queue_h tbm_queue = NULL; + twe_surface_h twe_surface = NULL; + tpl_result_t ret = TPL_ERROR_NONE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); + TPL_ASSERT(surface->native_handle); + + wayland_egl_display = + (tpl_wayland_egl_display_t *)surface->display->backend.data; + if (!wayland_egl_display) { + TPL_ERR("Invalid parameter. wayland_egl_display(%p)", + wayland_egl_display); + return TPL_ERROR_INVALID_PARAMETER; + } + + wayland_egl_surface = (tpl_wayland_egl_surface_t *) calloc(1, + sizeof(tpl_wayland_egl_surface_t)); + if (!wayland_egl_surface) { + TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_surface_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + surface->backend.data = (void *)wayland_egl_surface; + + if (__tpl_object_init(&wayland_egl_surface->base, + TPL_OBJECT_SURFACE, + NULL) != TPL_ERROR_NONE) { + TPL_ERR("Failed to initialize backend surface's base object!"); + goto object_init_fail; + } + + twe_surface = twe_surface_add(wayland_egl_display->wl_egl_thread, + wayland_egl_display->twe_display, + surface->native_handle, + surface->format, surface->num_buffers); + if (!twe_surface) { + TPL_ERR("Failed to add native_window(%p) to thread(%p)", + surface->native_handle, wayland_egl_display->wl_egl_thread); + goto create_twe_surface_fail; + } + + tbm_queue = twe_surface_get_tbm_queue(twe_surface); + if (!tbm_queue) { + TPL_ERR("Failed to get tbm_queue from twe_surface(%p)", twe_surface); + goto queue_create_fail; + } + + /* Set reset_callback to tbm_queue */ + if (tbm_surface_queue_add_reset_cb(tbm_queue, + __cb_tbm_surface_queue_reset_callback, + (void *)surface)) { + TPL_ERR("TBM surface queue add reset cb failed!"); + goto add_reset_cb_fail; + } + + wayland_egl_surface->reset = TPL_FALSE; + wayland_egl_surface->twe_surface = twe_surface; + wayland_egl_surface->tbm_queue = tbm_queue; + wayland_egl_surface->is_activated = TPL_FALSE; + wayland_egl_surface->need_to_enqueue = TPL_TRUE; + + surface->width = tbm_surface_queue_get_width(tbm_queue); + surface->height = tbm_surface_queue_get_height(tbm_queue); + surface->rotation = twe_surface_get_rotation(twe_surface); + + ret = twe_surface_set_rotate_callback(twe_surface, (void *)surface, + (tpl_surface_cb_func_t)__cb_window_rotate_callback); + if (ret != TPL_ERROR_NONE) { + TPL_WARN("Failed to register rotate callback."); + } + + TPL_LOG_T("WL_EGL", + "[INIT1/2]tpl_surface(%p) tpl_wayland_egl_surface(%p) twe_surface(%p)", + surface, wayland_egl_surface, twe_surface); + TPL_LOG_T("WL_EGL", + "[INIT2/2]size(%dx%d)rot(%d)|tbm_queue(%p)|native_window(%p)", + surface->width, surface->height, surface->rotation, + tbm_queue, surface->native_handle); + + return TPL_ERROR_NONE; + +add_reset_cb_fail: +queue_create_fail: + twe_surface_del(twe_surface); +create_twe_surface_fail: +object_init_fail: + free(wayland_egl_surface); + surface->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; +} + +static void +__tpl_wl_egl_surface_fini(tpl_surface_t *surface) +{ + tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; + tpl_wayland_egl_display_t *wayland_egl_display = NULL; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + + wayland_egl_surface = (tpl_wayland_egl_surface_t *) surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface); + + TPL_OBJECT_LOCK(wayland_egl_surface); + + wayland_egl_display = (tpl_wayland_egl_display_t *) + surface->display->backend.data; + + if (wayland_egl_display == NULL) { + TPL_ERR("check failed: wayland_egl_display == NULL"); + TPL_OBJECT_UNLOCK(wayland_egl_surface); + return; + } + + if (surface->type == TPL_SURFACE_TYPE_WINDOW) { + TPL_LOG_T("WL_EGL", + "[FINI] wayland_egl_surface(%p) native_window(%p) twe_surface(%p)", + wayland_egl_surface, surface->native_handle, + wayland_egl_surface->twe_surface); + + if (twe_surface_del(wayland_egl_surface->twe_surface) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", + wayland_egl_surface->twe_surface, + wayland_egl_display->wl_egl_thread); + } + + wayland_egl_surface->twe_surface = NULL; + wayland_egl_surface->tbm_queue = NULL; + } + + TPL_OBJECT_UNLOCK(wayland_egl_surface); + __tpl_object_fini(&wayland_egl_surface->base); + free(wayland_egl_surface); + surface->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface, + tpl_bool_t set) +{ + tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; + + if (!surface) { + TPL_ERR("Invalid parameter. tpl_surface(%p)", surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; + if (!wayland_egl_surface) { + TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)", + surface, wayland_egl_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!wayland_egl_surface->twe_surface) { + TPL_ERR("Invalid parameter. wayland_egl_surface(%p) twe_surface(%p)", + wayland_egl_surface, wayland_egl_surface->twe_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + twe_surface_set_rotation_capablity(wayland_egl_surface->twe_surface, + set); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface, + int post_interval) +{ + tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; + + if (!surface) { + TPL_ERR("Invalid parameter. tpl_surface(%p)", surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; + if (!wayland_egl_surface) { + TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)", + surface, wayland_egl_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!wayland_egl_surface->twe_surface) { + TPL_ERR("Invalid parameter. wayland_egl_surface(%p) twe_surface(%p)", + wayland_egl_surface, wayland_egl_surface->twe_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + twe_surface_set_post_interval(wayland_egl_surface->twe_surface, + post_interval); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, tbm_fd sync_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(tbm_surface); + TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); + + tpl_wayland_egl_surface_t *wayland_egl_surface = + (tpl_wayland_egl_surface_t *) surface->backend.data; + tbm_surface_queue_error_e tsq_err; + tpl_result_t ret = TPL_ERROR_NONE; + int bo_name = 0; + + TPL_OBJECT_LOCK(wayland_egl_surface); + + bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); + + if (!wayland_egl_surface) { + TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)", + surface, wayland_egl_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_OBJECT_UNLOCK(wayland_egl_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", + tbm_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_OBJECT_UNLOCK(wayland_egl_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + TRACE_MARK("[ENQ] BO_NAME:%d", bo_name); + + TPL_LOG_T("WL_EGL", + "[ENQ] wayland_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)", + wayland_egl_surface, tbm_surface, bo_name, sync_fence); + + /* If there are received region information, + * save it to buf_info in tbm_surface user_data using below API. */ + if (num_rects && rects) { + ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects); + if (ret != TPL_ERROR_NONE) { + TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)", + num_rects, rects); + } + } + + if (!wayland_egl_surface->need_to_enqueue || + !twe_surface_check_commit_needed(wayland_egl_surface->twe_surface, + tbm_surface)) { + TPL_LOG_T("WL_EGL", + "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", + ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_OBJECT_UNLOCK(wayland_egl_surface); + return TPL_ERROR_NONE; + } + + /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and + * commit if surface->frontbuffer that is already set and the tbm_surface + * client want to enqueue are the same. + */ + if (surface->is_frontbuffer_mode) { + /* The first buffer to be activated in frontbuffer mode must be + * committed. Subsequence frames do not need to be committed because + * the buffer is already displayed. + */ + if (surface->frontbuffer == tbm_surface) + wayland_egl_surface->need_to_enqueue = TPL_FALSE; + + if (sync_fence != -1) { + close(sync_fence); + sync_fence = -1; + } + } + + if (sync_fence != -1) { + ret = twe_surface_set_sync_fd(wayland_egl_surface->twe_surface, + tbm_surface, sync_fence); + if (ret != TPL_ERROR_NONE) { + TPL_WARN("Failed to set sync fd (%d). But it will continue.", + sync_fence); + } + } + + tsq_err = tbm_surface_queue_enqueue(wayland_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + tbm_surface_internal_unref(tbm_surface); + TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d", + tbm_surface, surface, tsq_err); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_OBJECT_UNLOCK(wayland_egl_surface); + return TPL_ERROR_INVALID_OPERATION; + } + + tbm_surface_internal_unref(tbm_surface); + + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_OBJECT_UNLOCK(wayland_egl_surface); + + return TPL_ERROR_NONE; +} + +static tpl_bool_t +__tpl_wl_egl_surface_validate(tpl_surface_t *surface) +{ + tpl_bool_t retval = TPL_TRUE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wayland_egl_surface_t *wayland_egl_surface = + (tpl_wayland_egl_surface_t *)surface->backend.data; + + retval = !(wayland_egl_surface->reset); + + return retval; +} + +static tpl_result_t +__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface) +{ + tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; + if (!wayland_egl_surface) { + TPL_ERR("Invalid backend surface. surface(%p) wayland_egl_surface(%p)", + surface, wayland_egl_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + tbm_surface_internal_unref(tbm_surface); + + tsq_err = tbm_surface_queue_cancel_dequeue(wayland_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to release tbm_surface(%p) surface(%p)", + tbm_surface, surface); + return TPL_ERROR_INVALID_OPERATION; + } + + TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)", + surface, tbm_surface); + + return TPL_ERROR_NONE; +} + +#define CAN_DEQUEUE_TIMEOUT_MS 10000 + +static tbm_surface_h +__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, + tbm_fd *sync_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->display->backend.data); + TPL_OBJECT_CHECK_RETURN(surface, NULL); + + tbm_surface_h tbm_surface = NULL; + tpl_wayland_egl_surface_t *wayland_egl_surface = + (tpl_wayland_egl_surface_t *)surface->backend.data; + tpl_wayland_egl_display_t *wayland_egl_display = + (tpl_wayland_egl_display_t *)surface->display->backend.data; + tbm_surface_queue_error_e tsq_err = 0; + int is_activated = 0; + int bo_name = 0; + tpl_result_t lock_ret = TPL_FALSE; + + TPL_OBJECT_UNLOCK(surface); + tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( + wayland_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS); + TPL_OBJECT_LOCK(surface); + + /* After the can dequeue state, call twe_display_lock to prevent other + * events from being processed in wayland_egl_thread + * during below dequeue procedure. */ + lock_ret = twe_display_lock(wayland_egl_display->twe_display); + + if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { + TPL_ERR("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", + wayland_egl_surface->tbm_queue, surface); + if (twe_surface_queue_force_flush(wayland_egl_surface->twe_surface) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)", + wayland_egl_surface->tbm_queue, surface); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_egl_display->twe_display); + return NULL; + } else { + tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + } + } + + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)", + wayland_egl_surface->tbm_queue, surface); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_egl_display->twe_display); + return NULL; + } + + /* wayland client can check their states (ACTIVATED or DEACTIVATED) with + * below function [wayland_tbm_client_queue_check_activate()]. + * This function has to be called before tbm_surface_queue_dequeue() + * in order to know what state the buffer will be dequeued next. + * + * ACTIVATED state means non-composite mode. Client can get buffers which + can be displayed directly(without compositing). + * DEACTIVATED state means composite mode. Client's buffer will be displayed + by compositor(E20) with compositing. + */ + is_activated = twe_surface_check_activated(wayland_egl_surface->twe_surface); + wayland_egl_surface->is_activated = is_activated; + + surface->width = tbm_surface_queue_get_width(wayland_egl_surface->tbm_queue); + surface->height = tbm_surface_queue_get_height(wayland_egl_surface->tbm_queue); + + if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) { + /* If surface->frontbuffer is already set in frontbuffer mode, + * it will return that frontbuffer if it is still activated, + * otherwise dequeue the new buffer after initializing + * surface->frontbuffer to NULL. */ + if (is_activated && !wayland_egl_surface->reset) { + TPL_LOG_T("WL_EGL", + "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", + surface->frontbuffer, + tbm_bo_export(tbm_surface_internal_get_bo( + surface->frontbuffer, 0))); + TRACE_ASYNC_BEGIN((int)surface->frontbuffer, + "[DEQ]~[ENQ] BO_NAME:%d", + tbm_bo_export(tbm_surface_internal_get_bo( + surface->frontbuffer, 0))); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_egl_display->twe_display); + return surface->frontbuffer; + } else { + surface->frontbuffer = NULL; + wayland_egl_surface->need_to_enqueue = TPL_TRUE; + } + } else { + surface->frontbuffer = NULL; + } + + tsq_err = tbm_surface_queue_dequeue(wayland_egl_surface->tbm_queue, + &tbm_surface); + if (!tbm_surface) { + TPL_ERR("Failed to dequeue from tbm_queue(%p) surface(%p)| tsq_err = %d", + wayland_egl_surface->tbm_queue, surface, tsq_err); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_egl_display->twe_display); + return NULL; + } + + tbm_surface_internal_ref(tbm_surface); + + /* If twe_surface_get_buffer_release_fence_fd return -1, + * the tbm_surface can be used immediately. + * If not, user(EGL) have to wait until signaled. */ + if (sync_fence) { + *sync_fence = twe_surface_get_buffer_release_fence_fd( + wayland_egl_surface->twe_surface, tbm_surface); + } + + bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); + + if (surface->is_frontbuffer_mode && is_activated) + surface->frontbuffer = tbm_surface; + + wayland_egl_surface->reset = TPL_FALSE; + + TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name); + TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)", + tbm_surface, bo_name, sync_fence ? *sync_fence : -1); + + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_egl_display->twe_display); + + return tbm_surface; +} + +void +__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) +{ + tpl_wayland_egl_surface_t *wayland_egl_surface = + (tpl_wayland_egl_surface_t *)surface->backend.data; + + if (width) + *width = tbm_surface_queue_get_width(wayland_egl_surface->tbm_queue); + if (height) + *height = tbm_surface_queue_get_height(wayland_egl_surface->tbm_queue); +} + + +tpl_bool_t +__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy) +{ + if (!native_dpy) return TPL_FALSE; + + if (twe_check_native_handle_is_wl_display(native_dpy)) + return TPL_TRUE; + + return TPL_FALSE; +} + +void +__tpl_display_init_backend_wl_egl_thread_legacy(tpl_display_backend_t *backend) +{ + TPL_ASSERT(backend); + + backend->type = TPL_BACKEND_WAYLAND_THREAD; + backend->data = NULL; + + backend->init = __tpl_wl_egl_display_init; + backend->fini = __tpl_wl_egl_display_fini; + backend->query_config = __tpl_wl_egl_display_query_config; + backend->filter_config = __tpl_wl_egl_display_filter_config; + backend->get_window_info = __tpl_wl_egl_display_get_window_info; + backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info; + backend->get_buffer_from_native_pixmap = + __tpl_wl_egl_display_get_buffer_from_native_pixmap; +} + +void +__tpl_surface_init_backend_wl_egl_thread_legacy(tpl_surface_backend_t *backend) +{ + TPL_ASSERT(backend); + + backend->type = TPL_BACKEND_WAYLAND_THREAD; + backend->data = NULL; + + backend->init = __tpl_wl_egl_surface_init; + backend->fini = __tpl_wl_egl_surface_fini; + backend->validate = __tpl_wl_egl_surface_validate; + backend->cancel_dequeued_buffer = + __tpl_wl_egl_surface_cancel_dequeued_buffer; + backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer; + backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer; + backend->set_rotation_capability = + __tpl_wl_egl_surface_set_rotation_capability; + backend->set_post_interval = + __tpl_wl_egl_surface_set_post_interval; + backend->get_size = + __tpl_wl_egl_surface_get_size; +} + -- 2.7.4 From a1b0fdb55b9dc48b7d57af30ceb6acfedcf4b0f9 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 10 Mar 2021 15:49:47 +0900 Subject: [PATCH 07/16] Fix some wrong indentations Change-Id: I936059980b41c6abf7fa9be58d9f41032c7a373f Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 52 ++++++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 1cd44b8..2cbb72d 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -43,7 +43,7 @@ struct _tpl_wl_egl_display { tpl_gthread *thread; tpl_gmutex wl_event_mutex; - struct wl_display *wl_display; + struct wl_display *wl_display; struct wl_event_queue *ev_queue; struct wayland_tbm_client *wl_tbm_client; int last_error; /* errno of the last wl_display error*/ @@ -359,11 +359,12 @@ __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; if (!strcmp(interface, "tizen_surface_shm")) { - wl_egl_display->tss = wl_registry_bind(wl_registry, - name, - &tizen_surface_shm_interface, - ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ? - version : IMPL_TIZEN_SURFACE_SHM_VERSION)); + wl_egl_display->tss = + wl_registry_bind(wl_registry, + name, + &tizen_surface_shm_interface, + ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ? + version : IMPL_TIZEN_SURFACE_SHM_VERSION)); } else if (!strcmp(interface, wp_presentation_interface.name)) { wl_egl_display->presentation = wl_registry_bind(wl_registry, @@ -783,7 +784,7 @@ __tpl_wl_egl_display_init(tpl_display_t *display) wl_egl_display->explicit_sync = NULL; wl_egl_display->wl_tbm_client = NULL; - wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled + wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled { char *env = tpl_getenv("TPL_WAIT_VBLANK"); if (env && !atoi(env)) { @@ -1014,6 +1015,7 @@ tpl_bool_t __tpl_display_choose_backend_wl_egl_thread2(tpl_handle_t native_dpy) { struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE); /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value @@ -1033,7 +1035,7 @@ __tpl_display_choose_backend_wl_egl_thread2(tpl_handle_t native_dpy) static void __cb_destroy_callback(void *private) { - struct tizen_private *tizen_private = (struct tizen_private *)private; + struct tizen_private *tizen_private = (struct tizen_private *)private; tpl_wl_egl_surface_t *wl_egl_surface = NULL; if (!tizen_private) { @@ -1074,7 +1076,7 @@ __cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private) TPL_ASSERT(private); TPL_ASSERT(wl_egl_window); - struct tizen_private *tizen_private = (struct tizen_private *)private; + struct tizen_private *tizen_private = (struct tizen_private *)private; tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; int cur_w, cur_h, req_w, req_h, format; @@ -1111,7 +1113,7 @@ __cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private) TPL_ASSERT(private); TPL_ASSERT(wl_egl_window); - struct tizen_private *tizen_private = (struct tizen_private *)private; + struct tizen_private *tizen_private = (struct tizen_private *)private; tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; int rotation = tizen_private->rotation; @@ -1137,8 +1139,8 @@ __cb_get_rotation_capability(struct wl_egl_window *wl_egl_window, TPL_ASSERT(private); TPL_ASSERT(wl_egl_window); - int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE; - struct tizen_private *tizen_private = (struct tizen_private *)private; + int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE; + struct tizen_private *tizen_private = (struct tizen_private *)private; tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; if (!wl_egl_surface) { @@ -1163,7 +1165,7 @@ __cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window, TPL_ASSERT(private); TPL_ASSERT(wl_egl_window); - struct tizen_private *tizen_private = (struct tizen_private *)private; + struct tizen_private *tizen_private = (struct tizen_private *)private; tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; if (!wl_egl_surface) { @@ -1184,7 +1186,7 @@ __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) int commit_sync_fd = -1; - struct tizen_private *tizen_private = (struct tizen_private *)private; + struct tizen_private *tizen_private = (struct tizen_private *)private; tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; if (!wl_egl_surface) { @@ -1206,7 +1208,8 @@ __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC); if (wl_egl_surface->commit_sync.fd == -1) { - TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)", wl_egl_surface); + TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)", + wl_egl_surface); tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); return -1; } @@ -1231,7 +1234,7 @@ __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *priv int presentation_sync_fd = -1; - struct tizen_private *tizen_private = (struct tizen_private *)private; + struct tizen_private *tizen_private = (struct tizen_private *)private; tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; if (!wl_egl_surface) { @@ -1252,7 +1255,8 @@ __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *priv wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC); if (wl_egl_surface->presentation_sync.fd == -1) { - TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)", wl_egl_surface); + TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)", + wl_egl_surface); tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); return -1; } @@ -1290,7 +1294,7 @@ static void __cb_tss_flusher_free_flush_callback(void *data, struct tizen_surface_shm_flusher *tss_flusher) { tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", wl_egl_surface, wl_egl_surface->tbm_queue); @@ -1722,7 +1726,7 @@ _thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface, int num_buffers) { tbm_surface_queue_h tbm_queue = NULL; - tbm_bufmgr bufmgr = NULL; + tbm_bufmgr bufmgr = NULL; unsigned int capability; struct wl_surface *wl_surface = wl_egl_surface->wl_surface; @@ -2763,8 +2767,8 @@ __cb_buffer_immediate_release(void *data, } static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = { - __cb_buffer_fenced_release, - __cb_buffer_immediate_release, + __cb_buffer_fenced_release, + __cb_buffer_immediate_release, }; static void @@ -2816,8 +2820,8 @@ static const struct wl_buffer_listener wl_buffer_release_listener = { static void __cb_presentation_feedback_sync_output(void *data, - struct wp_presentation_feedback *presentation_feedback, - struct wl_output *output) + struct wp_presentation_feedback *presentation_feedback, + struct wl_output *output) { TPL_IGNORE(data); TPL_IGNORE(presentation_feedback); @@ -3260,7 +3264,7 @@ _get_tbm_surface_bo_name(tbm_surface_h tbm_surface) static void _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) { - int idx = 0; + int idx = 0; tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)", -- 2.7.4 From 23f6eabb62c190e89d4a677106a5cc8bf2436d2d Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 11 Mar 2021 11:11:28 +0900 Subject: [PATCH 08/16] Clear all buffers before destroying surf_source. Change-Id: Ifc52b83af3eb193915090369f3e9985048a702fb Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 155 ++++++++++++++++++++++++++---------------------- 1 file changed, 85 insertions(+), 70 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 2cbb72d..da85a82 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1433,76 +1433,6 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - - { - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - int idx = 0; - tpl_bool_t need_to_release = TPL_FALSE; - tpl_bool_t need_to_cancel = TPL_FALSE; - - while (wl_egl_surface->buffer_cnt) { - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - wl_egl_buffer = wl_egl_surface->buffers[idx]; - if (wl_egl_buffer) { - TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)", - idx, wl_egl_buffer, - wl_egl_buffer->tbm_surface, - status_to_string[wl_egl_buffer->status]); - - wl_egl_surface->buffers[idx] = NULL; - wl_egl_surface->buffer_cnt--; - } else { - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); - idx++; - continue; - } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); - - tpl_gmutex_lock(&wl_egl_buffer->mutex); - - need_to_release = (wl_egl_buffer->status == ACQUIRED || - wl_egl_buffer->status == WAITING_SIGNALED || - wl_egl_buffer->status == WAITING_VBLANK || - wl_egl_buffer->status == COMMITTED); - - need_to_cancel = wl_egl_buffer->status == DEQUEUED; - - if (wl_egl_buffer->status == WAITING_SIGNALED) { - tpl_result_t wait_result = TPL_ERROR_NONE; - wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond, - &wl_egl_buffer->mutex, - 16); - if (wait_result == TPL_ERROR_TIME_OUT) - TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", - wl_egl_buffer); - } - - if (need_to_release) { - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, - wl_egl_buffer->tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", - wl_egl_buffer->tbm_surface, tsq_err); - } - - if (need_to_cancel) { - tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, - wl_egl_buffer->tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)", - wl_egl_buffer->tbm_surface, tsq_err); - } - - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - - if (need_to_release || need_to_cancel) - tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); - - idx++; - } - } - if (wl_egl_surface->surface_sync) { TPL_INFO("[SURFACE_SYNC_DESTROY]", "wl_egl_surface(%p) surface_sync(%p)", @@ -1901,6 +1831,89 @@ _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface) } static void +_tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tpl_bool_t need_to_release = TPL_FALSE; + tpl_bool_t need_to_cancel = TPL_FALSE; + int idx = 0; + + while (wl_egl_surface->buffer_cnt) { + tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + wl_egl_buffer = wl_egl_surface->buffers[idx]; + + if (wl_egl_buffer) { + TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)", + idx, wl_egl_buffer, + wl_egl_buffer->tbm_surface, + status_to_string[wl_egl_buffer->status]); + wl_egl_surface->buffers[idx] = NULL; + wl_egl_surface->buffer_cnt--; + } else { + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + idx++; + continue; + } + + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + + tpl_gmutex_lock(&wl_egl_buffer->mutex); + + /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */ + /* It has been acquired but has not yet been released, so this + * buffer must be released. */ + need_to_release = (wl_egl_buffer->status == ACQUIRED || + wl_egl_buffer->status == WAITING_SIGNALED || + wl_egl_buffer->status == WAITING_VBLANK || + wl_egl_buffer->status == COMMITTED); + /* After dequeue, it has not been enqueued yet + * so cancel_dequeue must be performed. */ + need_to_cancel = wl_egl_buffer->status == DEQUEUED; + + if (wl_egl_buffer->status == WAITING_SIGNALED) { + tpl_result_t wait_result = TPL_ERROR_NONE; + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond, + &wl_egl_buffer->mutex, + 16); /* 16ms */ + tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); + if (wait_result == TPL_ERROR_TIME_OUT) + TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", + wl_egl_buffer); + } + + if (need_to_release) { + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + wl_egl_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + wl_egl_buffer->tbm_surface, tsq_err); + } + + if (need_to_cancel) { + tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, + wl_egl_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)", + wl_egl_buffer->tbm_surface, tsq_err); + } + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + + if (need_to_release || need_to_cancel) + tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); + + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + + idx++; + } +} + +static void __tpl_wl_egl_surface_fini(tpl_surface_t *surface) { tpl_wl_egl_surface_t *wl_egl_surface = NULL; @@ -1922,6 +1935,8 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) wl_egl_surface, wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue); + _tpl_wl_egl_surface_buffer_clear(wl_egl_surface); + if (wl_egl_surface->surf_source) tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); wl_egl_surface->surf_source = NULL; -- 2.7.4 From 97d8e884ca01403a2ec2ea83737362e895a14ba9 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 11 Mar 2021 15:30:21 +0900 Subject: [PATCH 09/16] Fix some order of printing logs. Change-Id: Ie0830c48474305fc61f12f571bd5911109e17cd3 Signed-off-by: Joonbum Ko --- src/tpl_surface.c | 3 ++- src/tpl_wl_egl_thread.c | 29 ++++++++++++++++++++--------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/tpl_surface.c b/src/tpl_surface.c index b41534b..e05009e 100755 --- a/src/tpl_surface.c +++ b/src/tpl_surface.c @@ -13,9 +13,10 @@ static void __tpl_surface_free(void *data) { TPL_ASSERT(data); - TPL_LOG_F("tpl_surface_t(%p)", data); __tpl_surface_fini((tpl_surface_t *) data); + + TPL_LOG_F("tpl_surface_t(%p) free", data); free(data); } diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index da85a82..fdc70d6 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1846,10 +1846,6 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) wl_egl_buffer = wl_egl_surface->buffers[idx]; if (wl_egl_buffer) { - TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)", - idx, wl_egl_buffer, - wl_egl_buffer->tbm_surface, - status_to_string[wl_egl_buffer->status]); wl_egl_surface->buffers[idx] = NULL; wl_egl_surface->buffer_cnt--; } else { @@ -1863,6 +1859,11 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) tpl_gmutex_lock(&wl_egl_buffer->mutex); + TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)", + idx, wl_egl_buffer, + wl_egl_buffer->tbm_surface, + status_to_string[wl_egl_buffer->status]); + /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */ /* It has been acquired but has not yet been released, so this * buffer must be released. */ @@ -2422,11 +2423,8 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, TRACE_MARK("[ENQ] BO_NAME:%d", bo_name); - TPL_LOG_T("WL_EGL", - "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)", - wl_egl_surface, tbm_surface, bo_name, acquire_fence); - wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + tpl_gmutex_lock(&wl_egl_buffer->mutex); /* If there are received region information, save it to wl_egl_buffer */ @@ -2480,7 +2478,6 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, close(wl_egl_buffer->acquire_fence_fd); wl_egl_buffer->acquire_fence_fd = acquire_fence; - wl_egl_buffer->status = ENQUEUED; tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); if (wl_egl_surface->presentation_sync.fd != -1) { @@ -2498,6 +2495,11 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, } tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + wl_egl_buffer->status = ENQUEUED; + TPL_LOG_T("WL_EGL", + "[ENQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_egl_buffer, tbm_surface, bo_name, acquire_fence); + tpl_gmutex_unlock(&wl_egl_buffer->mutex); tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue, @@ -2602,6 +2604,10 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) wl_egl_buffer->status = ACQUIRED; + TPL_LOG_T("WL_EGL", "[ACQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + if (wl_egl_buffer->wl_buffer == NULL) { tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; wl_egl_buffer->wl_buffer = @@ -3240,6 +3246,11 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, (void *)wl_egl_buffer->wl_buffer); + if (wl_egl_buffer->waiting_source) { + tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); + wl_egl_buffer->waiting_source = NULL; + } + if (wl_egl_buffer->commit_sync_fd != -1) { int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); if (ret == -1) -- 2.7.4 From 36aa64fc02c00064fcd84e32cb263c0b23f50d6d Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 11 Mar 2021 16:03:14 +0900 Subject: [PATCH 10/16] Changed to wait until ENQUEUE becomes FENCE_SIGNALED. - It is not safe to force close before being SIGNALED status because it is a fence fd delivered from driver. - Therefore, it is desirable to destroy all buffers when SIGNALED status is guaranteed. Change-Id: Ifd2632b6aeb3d519031e79cb03118b306fcd49a6 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index fdc70d6..17cc5cf 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1875,7 +1875,8 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) * so cancel_dequeue must be performed. */ need_to_cancel = wl_egl_buffer->status == DEQUEUED; - if (wl_egl_buffer->status == WAITING_SIGNALED) { + if (wl_egl_buffer->status >= ENQUEUED && + wl_egl_buffer->status < WAITING_VBLANK) { tpl_result_t wait_result = TPL_ERROR_NONE; tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond, -- 2.7.4 From 69bf06cf122dd70915ec673f16bdb467c4a6d3ba Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 11 Mar 2021 16:09:32 +0900 Subject: [PATCH 11/16] Make shorten the name of backend function. - __tpl_wl_egl_surface_cancel_dequeued_buffer() is too long name. - it will be changed to below. __tpl_wl_egl_surface_cancel_buffer() Change-Id: I05d6520097b965e316bbf8b9b9b2c1e78c62ecbb Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 17cc5cf..e5d875e 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2357,8 +2357,8 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, } static tpl_result_t -__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface) +__tpl_wl_egl_surface_cancel_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface) { TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); @@ -3212,7 +3212,7 @@ __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend) backend->fini = __tpl_wl_egl_surface_fini; backend->validate = __tpl_wl_egl_surface_validate; backend->cancel_dequeued_buffer = - __tpl_wl_egl_surface_cancel_dequeued_buffer; + __tpl_wl_egl_surface_cancel_buffer; backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer; backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer; backend->set_rotation_capability = -- 2.7.4 From 41ca2a8219d134cb3c41cd6471fe4305cfb95d79 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 11 Mar 2021 17:56:37 +0900 Subject: [PATCH 12/16] Destroy buffer_release when wl_egl_buffer destroy. Change-Id: Iad683fc89b9d9a5e23a948f564a64ad572715140 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index e5d875e..68cdbef 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -3243,9 +3243,16 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) wl_display_flush(wl_egl_display->wl_display); - if (wl_egl_buffer->wl_buffer) + if (wl_egl_buffer->wl_buffer) { wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, (void *)wl_egl_buffer->wl_buffer); + wl_egl_buffer->wl_buffer = NULL; + } + + if (wl_egl_buffer->buffer_release) { + zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); + wl_egl_buffer->buffer_release = NULL; + } if (wl_egl_buffer->waiting_source) { tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); -- 2.7.4 From 90ce1840e07d9a89fd2e5704849157e4416db6c9 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 12 Mar 2021 10:42:33 +0900 Subject: [PATCH 13/16] Fix buffer_clear logic for explicit fence sync. Change-Id: Idb76fa9179605c03b29c8dd36d9276f121d7753d Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 57 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 18 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 68cdbef..cc609a0 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -1875,17 +1875,28 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) * so cancel_dequeue must be performed. */ need_to_cancel = wl_egl_buffer->status == DEQUEUED; - if (wl_egl_buffer->status >= ENQUEUED && - wl_egl_buffer->status < WAITING_VBLANK) { + if (wl_egl_buffer->status >= ENQUEUED) { + tpl_bool_t need_to_wait = TPL_FALSE; tpl_result_t wait_result = TPL_ERROR_NONE; - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond, - &wl_egl_buffer->mutex, - 16); /* 16ms */ - tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); - if (wait_result == TPL_ERROR_TIME_OUT) - TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", - wl_egl_buffer); + + if (!wl_egl_display->use_explicit_sync && + wl_egl_buffer->status < WAITING_VBLANK) + need_to_wait = TPL_TRUE; + + if (wl_egl_display->use_explicit_sync && + wl_egl_buffer->status < COMMITTED) + need_to_wait = TPL_TRUE; + + if (need_to_wait) { + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond, + &wl_egl_buffer->mutex, + 16); /* 16ms */ + tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); + if (wait_result == TPL_ERROR_TIME_OUT) + TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)", + wl_egl_buffer); + } } if (need_to_release) { @@ -1904,6 +1915,8 @@ _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface) wl_egl_buffer->tbm_surface, tsq_err); } + wl_egl_buffer->status = RELEASED; + tpl_gmutex_unlock(&wl_egl_buffer->mutex); if (need_to_release || need_to_cancel) @@ -2702,12 +2715,12 @@ __cb_buffer_fenced_release(void *data, tbm_surface = wl_egl_buffer->tbm_surface; if (tbm_surface_internal_is_valid(tbm_surface)) { + + tpl_gmutex_lock(&wl_egl_buffer->mutex); if (wl_egl_buffer->status == COMMITTED) { tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; tbm_surface_queue_error_e tsq_err; - tpl_gmutex_lock(&wl_egl_buffer->mutex); - zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); wl_egl_buffer->buffer_release = NULL; @@ -2726,8 +2739,6 @@ __cb_buffer_fenced_release(void *data, _get_tbm_surface_bo_name(tbm_surface), fence); - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) @@ -2735,6 +2746,9 @@ __cb_buffer_fenced_release(void *data, tbm_surface_internal_unref(tbm_surface); } + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + } else { TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); } @@ -2752,12 +2766,12 @@ __cb_buffer_immediate_release(void *data, tbm_surface = wl_egl_buffer->tbm_surface; if (tbm_surface_internal_is_valid(tbm_surface)) { + + tpl_gmutex_lock(&wl_egl_buffer->mutex); if (wl_egl_buffer->status == COMMITTED) { tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; tbm_surface_queue_error_e tsq_err; - tpl_gmutex_lock(&wl_egl_buffer->mutex); - zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); wl_egl_buffer->buffer_release = NULL; @@ -2774,8 +2788,6 @@ __cb_buffer_immediate_release(void *data, wl_egl_buffer->wl_buffer, tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); - tpl_gmutex_unlock(&wl_egl_buffer->mutex); - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) @@ -2783,6 +2795,9 @@ __cb_buffer_immediate_release(void *data, tbm_surface_internal_unref(tbm_surface); } + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + } else { TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); } @@ -3130,9 +3145,15 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", wl_egl_buffer->bo_name); + tpl_gmutex_lock(&wl_egl_buffer->mutex); + wl_egl_buffer->need_to_commit = TPL_FALSE; wl_egl_buffer->status = COMMITTED; + tpl_gcond_signal(&wl_egl_buffer->cond); + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + TPL_LOG_T("WL_EGL", "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)", wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface, -- 2.7.4 From a2d736370add8cb09aac892e023a21530e99e61b Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 12 Mar 2021 11:17:25 +0900 Subject: [PATCH 14/16] Fix a problem with overwriting fence fd with -1. Change-Id: Iab7191b1ddd50a822b768529f577b1612f80e259 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index cc609a0..8618311 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -2132,14 +2132,7 @@ _wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer, wl_egl_buffer->draw_done = TPL_FALSE; wl_egl_buffer->need_to_commit = TPL_TRUE; - - wl_egl_buffer->acquire_fence_fd = -1; - wl_egl_buffer->release_fence_fd = -1; - wl_egl_buffer->commit_sync_fd = -1; - wl_egl_buffer->presentation_sync_fd = -1; - wl_egl_buffer->buffer_release = NULL; - wl_egl_buffer->transform = tizen_private->transform; if (wl_egl_buffer->w_transform != tizen_private->window_transform) { @@ -2194,6 +2187,11 @@ _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, wl_egl_buffer->status = RELEASED; + wl_egl_buffer->acquire_fence_fd = -1; + wl_egl_buffer->commit_sync_fd = -1; + wl_egl_buffer->presentation_sync_fd = -1; + wl_egl_buffer->release_fence_fd = -1; + wl_egl_buffer->dx = wl_egl_window->dx; wl_egl_buffer->dy = wl_egl_window->dy; wl_egl_buffer->width = tbm_surface_get_width(tbm_surface); -- 2.7.4 From 9887da5c823e1b7d8e31cff39793e4ebccaf83b1 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 12 Mar 2021 13:22:16 +0900 Subject: [PATCH 15/16] Enable explicit_fence_sync feature defaultly. Change-Id: I44a63dc2df55d54372a8ddfe2392f618489ca1da Signed-off-by: Joonbum Ko --- src/tpl_wl_egl_thread.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index 8618311..bd4ba82 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -372,14 +372,14 @@ __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, TPL_DEBUG("bind wp_presentation_interface"); } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) { char *env = tpl_getenv("TPL_EFS"); - if (env && atoi(env)) { + if (env && !atoi(env)) { + wl_egl_display->use_explicit_sync = TPL_FALSE; + } else { wl_egl_display->explicit_sync = wl_registry_bind(wl_registry, name, &zwp_linux_explicit_synchronization_v1_interface, 1); wl_egl_display->use_explicit_sync = TPL_TRUE; TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface"); - } else { - wl_egl_display->use_explicit_sync = TPL_FALSE; } } } -- 2.7.4 From f2c1739f2064c11d9a69bec58f51263d3bb0893d Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Fri, 12 Mar 2021 15:52:53 +0900 Subject: [PATCH 16/16] Package version up to 1.8.1 Change-Id: I2560ac5a621f19981742bb46965f155913e27b99 Signed-off-by: Joonbum Ko --- packaging/libtpl-egl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index a0e2e65..6511342 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -4,7 +4,7 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 %define TPL_VERSION_MINOR 8 -%define TPL_VERSION_PATCH 0 +%define TPL_VERSION_PATCH 1 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4