From e8b886ab8f19035250e4ee6d13f91a9334b35d85 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 13 Oct 2020 16:12:09 +0900 Subject: [PATCH 01/16] Separated unused files to 'unused' Change-Id: I198e142f772360a9137972e38145c201eae9f3b7 Signed-off-by: Joonbum Ko --- src/Makefile.am | 1 - src/{ => unused}/tpl_gbm.c | 0 src/{ => unused}/tpl_wayland_vk_wsi.c | 0 src/{ => unused}/tpl_x11_common.c | 0 src/{ => unused}/tpl_x11_dri2.c | 0 src/{ => unused}/tpl_x11_dri3.c | 0 src/{ => unused}/tpl_x11_internal.h | 0 7 files changed, 1 deletion(-) rename src/{ => unused}/tpl_gbm.c (100%) rename src/{ => unused}/tpl_wayland_vk_wsi.c (100%) rename src/{ => unused}/tpl_x11_common.c (100%) rename src/{ => unused}/tpl_x11_dri2.c (100%) rename src/{ => unused}/tpl_x11_dri3.c (100%) rename src/{ => unused}/tpl_x11_internal.h (100%) diff --git a/src/Makefile.am b/src/Makefile.am index d7823b7..491d40b 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -27,7 +27,6 @@ if WITH_WAYLAND libtpl_egl_la_SOURCES += tpl_wayland_egl.c \ tpl_wl_egl_thread.c \ tpl_wayland_egl_thread.c \ - tpl_wayland_vk_wsi.c \ tpl_wl_vk_thread.c \ wayland-vulkan/wayland-vulkan-protocol.c endif diff --git a/src/tpl_gbm.c b/src/unused/tpl_gbm.c similarity index 100% rename from src/tpl_gbm.c rename to src/unused/tpl_gbm.c diff --git a/src/tpl_wayland_vk_wsi.c b/src/unused/tpl_wayland_vk_wsi.c similarity index 100% rename from src/tpl_wayland_vk_wsi.c rename to src/unused/tpl_wayland_vk_wsi.c diff --git a/src/tpl_x11_common.c b/src/unused/tpl_x11_common.c similarity index 100% rename from src/tpl_x11_common.c rename to src/unused/tpl_x11_common.c diff --git a/src/tpl_x11_dri2.c b/src/unused/tpl_x11_dri2.c similarity index 100% rename from src/tpl_x11_dri2.c rename to src/unused/tpl_x11_dri2.c diff --git a/src/tpl_x11_dri3.c b/src/unused/tpl_x11_dri3.c similarity index 100% rename from src/tpl_x11_dri3.c rename to src/unused/tpl_x11_dri3.c diff --git a/src/tpl_x11_internal.h b/src/unused/tpl_x11_internal.h similarity index 100% rename from src/tpl_x11_internal.h rename to src/unused/tpl_x11_internal.h -- 2.7.4 From a21459ec98f1fc4f1d019f015e3189661243c838 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 22 Oct 2020 16:55:12 +0900 Subject: [PATCH 02/16] Implemented initial tpl_utils_gthread. - Thread-related functions included in tpl_wayland_egl_thread have been separated with util. Change-Id: Ia75d1410e20241d8994e0bf55f7e6bc50016278c Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 334 +++++++++++ src/tpl_utils_gthread.h | 176 ++++++ src/tpl_wl_egl.c | 1436 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1946 insertions(+) create mode 100644 src/tpl_utils_gthread.c create mode 100644 src/tpl_utils_gthread.h create mode 100644 src/tpl_wl_egl.c diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c new file mode 100644 index 0000000..181bf31 --- /dev/null +++ b/src/tpl_utils_gthread.c @@ -0,0 +1,334 @@ +#include "tpl_utils_gthread.h" + +struct _tpl_gthread { + GThread *thread; + GMainLoop *loop; + + tpl_gsource *destroy_sig_source; + + GMutex thread_mutex; + GCond thread_cond; + + tpl_gthread_func init_func; + tpl_gthread_func deinit_func; + void *func_data; +}; + +struct _tpl_gsource { + GSource gsource; + gpointer tag; + + tpl_gthread *thread; + + int fd; + tpl_bool_t is_eventfd; + tpl_gsource_functions *gsource_funcs; + + tpl_bool_t is_disposable; + + void *data; +}; + + +static gpointer +_tpl_gthread_init(gpointer data) +{ + tpl_gthread *thread = data; + + g_mutex_lock(&thread->thread_mutex); + + if (thread->init_func) + thread->init_func(thread->func_data); + + g_cond_signal(&thread->thread_cond); + g_mutex_unlock(&thread->thread_mutex); + + g_main_loop_run(thread->twe_loop); + + return thread; +} + +static void +_tpl_gthread_fini(gpointer data) +{ + tpl_gthread *thread = data; + + g_mutex_lock(&thread->thread_mutex); + + if (thread->deinit_func) + thread->deinit_func(thread->func_data); + + g_cond_signal(&thread->thread_cond); + g_mutex_unlock(&thread->thread_mutex); +} + +static tpl_gsource_functions thread_destroy_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = _tpl_gthread_fini, + .finalize = NULL, +}; + +tpl_gthread * +tpl_gthread_create(const char *thread_name, + tpl_gthread_func init_func, void *func_data) +{ + GMainContext *context = NULL; + GMainLoop *loop = NULL; + tpl_gthread *new_thread = NULL; + + context = g_main_context_new(); + if (!context) { + TPL_ERR("Failed to create GMainContext"); + return NULL; + } + + loop = g_main_loop_new(context, FALSE); + if (!loop) { + TPL_ERR("Failed to create GMainLoop"); + g_main_context_unref(context); + return NULL; + } + + g_main_context_unref(context); + + new_thread = calloc(1, sizeof(tpl_gthread)); + if (!new_thread) { + TPL_ERR("Failed to allocate tpl_gthread"); + g_main_context_unref(context); + g_main_loop_unref(loop); + } + + g_mutex_init(&new_thread->thread_mutex); + g_cond_init(&new_thread->thread_cond); + + + g_mutex_lock(&new_thread->thread_mutex); + new_thread->destroy_sig_source = + tpl_gsource_create(new_thread, new_thread, -1, + &thread_destroy_funcs, TPL_TRUE); + + new_thread->loop = loop; + new_thread->init_func = init_func; + new_thread->func_data = func_data; + new_thread->thread = g_thread_new(thread_name, + _tpl_gthread_init, new_thread); + g_cond_wait(&new_thread->thread_cond, + &new_thread->thread_mutex); + g_mutex_unlock(&new_thread->thread_mutex); + + return new_thread; +} + +void +tpl_gthread_destroy(tpl_gthread *thread, tpl_gthread_func deinit_func) +{ + g_mutex_lock(&thread->thread_mutex); + thread->deinit_func = deinit_func; + tpl_gsource_send_event(thread->destroy_sig_source, 1); + g_cond_wait(&thread->thread_cond, &thread->thread_mutex); + + g_main_loop_quit(thread->loop); + g_thread_join(thread->thread); + g_main_loop_unref(thread->loop); + + g_mutex_unlock(&thread->thread_mutex); + g_mutex_clear(&thread->thread_mutex); + g_cond_clear(&thread->thread_cond); + + thread->func = NULL; + + free(thread); +} + +static gboolean +_thread_source_prepare(GSource *source, gint *time) +{ + tpl_gsource *gsource = (tpl_gsource *)source; + tpl_bool_t ret = TPL_FALSE; + + if (gsource->gsource_funcs->prepare) + ret = gsource->gsource_funcs->prepare(gsource); + + *time = -1; + + return ret; +} + +static gboolean +_thread_source_check(GSource *source) +{ + tpl_gsource *gsource = (tpl_gsource *)source; + tpl_bool_t ret = TPL_FALSE; + + if (gsource->gsource_funcs->check) + ret = gsource->gsource_funcs->check(gsource); + + return ret; +} + +static gboolean +_thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) +{ + tpl_gsource *gsource = (tpl_gsource *)source; + tpl_bool_t ret = TPL_GSOURCE_CONTINUE; + GIOCondition cond = g_source_query_unix_fd(source, gsource->tag); + + TPL_IGNORE(cb); + TPL_IGNORE(data); + + if (cond & G_IO_IN) { + if (gsource->gsource_funcs->dispatch) + ret = gsource->gsource_funcs->dispatch(gsource); + } else { + /* When some io errors occur, it is not considered as a critical error. + * There may be problems with the screen, but it does not affect the operation. */ + TPL_WARN("Invalid GIOCondition occured. tpl_gsource(%p) fd(%d) cond(%d)", + gsource, gsource->fd, cond); + } + + if (gsource->is_disposable) + ret = TPL_GSOURCE_REMOVE; + + return ret; +} + +static void +_thread_source_finalize(GSource *source) +{ + tpl_gsource *gsource = (tpl_gsource *)source; + + if (gsource->gsource_funcs->finalize) + gsource->gsource_funcs->finalize(gsource); + + if (gsource->is_eventfd) + close(gsource->fd); + + gsource->fd = -1; + gsource->thread = NULL; + gsource->gsource_funcs = NULL; + gsource->data = NULL; +} + +static GSourceFuncs _thread_source_funcs = { + .prepare = _thread_source_prepare, + .check = _thread_source_check, + .dispatch = _thread_source_dispatch, + .finalize = _thread_source_finalize, +}; + +tpl_gsource * +tpl_gsource_create(tpl_gthread *thread, void *data, int fd, + tpl_gsource_functions *funcs, tpl_bool_t is_disposable) +{ + tpl_gsource *new_gsource = NULL; + + new_gsource = (tpl_gsource *)g_source_new(&_thread_source_funcs, + sizeof(tpl_gsource)); + if (!new_gsource) { + TPL_ERR("Failed to create new tpl_gsource"); + return NULL; + } + + if (fd < 0) { + new_gsource->fd = eventfd(0, EFD_CLOEXEC); + if (new_gsource->fd < 0) { + TPL_ERR("Failed to create eventfd. errno(%d)", errno); + g_source_unref(&new_gsource->gsource); + return NULL; + } + + new_gsource->is_eventfd = TPL_TRUE; + } else { + new_gsource->fd = fd; + } + + new_gsource->thread = thread; + new_gsource->gsource_funcs = funcs; + new_gsource->data = data; + new_gsource->is_disposable = is_disposable; + + new_gsource->tag = g_source_add_unix_fd(&new_gsource->gsource, + new_gsource->fd, + G_IO_IN | G_IO_ERR); + g_source_attach(&new_gsource->gsource, + g_main_loop_get_context(thread->loop)); + + return new_gsource; +} + +void +tpl_gsource_destroy(tpl_gsource *source) +{ + g_source_remove_unix_fd(&source->gsource, source->tag); + g_source_destroy(&source->gsource); + g_source_unref(&source->gsource); +} + +void +tpl_gsource_send_event(tpl_gsource *source, uint64_t message) +{ + uint64_t value = message; + int ret; + + ret = write(del_source->event_fd, &value, sizeof(uint64_t)); + if (ret == -1) { + TPL_ERR("failed to send devent. tpl_gsource(%p)", + source); + } +} + +void * +tpl_gsource_get_data(tpl_gsource *source) +{ + if (source && source->data) + return source->data; +} + +void +tpl_gmutex_init(tpl_gmutex *gmutex) +{ + g_mutex_init(gmutex); +} + +void +tpl_gmutex_clear(tpl_gmutex *gmutex) +{ + g_mutex_clear(gmutex); +} + +void +tpl_gmutex_lock(tpl_gmutex *gmutex) +{ + g_mutex_lock(gmutex); +} + +void +tpl_gmutex_unlock(tpl_gmutex *gmutex) +{ + g_mutex_unlock(gmutex); +} + +void +tpl_gcond_init(tpl_gcond *gcond) +{ + g_cond_init(gcond); +} + +void +tpl_gcond_clear(tpl_gcond *gcond) +{ + g_cond_clear(gcond); +} + +void +tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex) +{ + g_cond_wait(gcond, gmutex); +} + +void +tpl_gcond_signal(tpl_gcond *gcond) +{ + g_cond_signal(gcond); +} \ No newline at end of file diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h new file mode 100644 index 0000000..1886609 --- /dev/null +++ b/src/tpl_utils_gthread.h @@ -0,0 +1,176 @@ + +#include +#include +#include + +#include + +#include "tpl_utils.h" + +typedef struct _tpl_gthread tpl_gthread; +typedef struct _tpl_gsource tpl_gsource; +typedef struct _tpl_gsource_functions tpl_gsource_functions; + +typedef TPL_TRUE TPL_GSOURCE_CONTINUE; +typedef TPL_FALSE TPL_GSOURCE_REMOVE; + +typedef void (*tpl_gthread_func) (void *user_data); + +typedef GMutex tpl_gmutex; +typedef GCond tpl_gcond; + +struct _tpl_gsource_functions { + tpl_bool_t (*prepare) (tpl_gsource *source); + tpl_bool_t (*check) (tpl_gsource *source); + tpl_bool_t (*dispatch) (tpl_gsource *source); + void (*finalize) (tpl_gsource *source); +}; + +/** + * Create a new tpl_gthread + * + * This creates a new g_thread and guarantees up to g_main_loop_run. + * + * @param thread_name The name of new thread can be useful for discriminating threads in a debugger. + * @param init_func Function pointer to be called initially in the created thread. + * @param data Parameter passed when func is called. + * @return Pointer to newly created tpl_gthread. + * + * The returned tpl_gthread* must call tpl_gthread_destroy when terminating the thread. + */ +tpl_gthread * +tpl_gthread_create(const char *thread_name, + tpl_gthread_init_func init_func, void *func_data); + +/** + * Stop thread and Destroy tpl_gthread + * + * After waiting for the ongoing operation in the thread to complete, + * all resources created in tpl_gthread_create are freed. + * + * @param thread Pointer to tpl_gthread created with tpl_gthread_create(). + * @param deinit_func Function Pointer to be called in thread destroying. + * + * @see tpl_gthread_create() + */ +void +tpl_gthread_destroy(tpl_gthread *thread, tpl_gthread_func deinit_func); + +/** + * Create a new tpl_gsource + * + * This creates a new tpl_gsource to be attached the thread loop. + * + * @param thread Pointer to tpl_gthread to attach new tpl_gsource. + * @param data Pointer to some handle used by its user. + * @param fd fd to poll. If the value is more than 0, the passed value will be polled. + * If it is -1, eventfd is created in this function. + * @param funcs Pointer to tpl_gsource_functions. + * This structure corresponds to GSourceFuncs, and dispatch and finalize are required. + * @param is_disposable If it is intended to be used for single use, TRUE should be passed, + * and FALSE should be passed to keep it. + * In the case of disposable, it is not necessary to call tpl_gsource_destroy. + * @return Pointer to newly created tpl_gsource. + * + * All created tpl_gsource resources will be freed in the thread. + * @see tpl_gsource_destroy + */ +tpl_gsource * +tpl_gsource_create(tpl_gthread *thread, void *data, int fd, + tpl_gsource_functions *funcs, tpl_bool_t is_disposable); + +/** + * Detach the passed tpl_gsource from thread and destroy it. + * + * @param source Pointer to tpl_gsource to destroy. + */ +void +tpl_gsource_destroy(tpl_gsource *source); + +/** + * Send an event to dispatch the gsource attached to the thread. + * + * @param source Pointer to tpl_gsource to send event. + * @param message Value to be read in thread.. + */ +void +tpl_gsource_send_event(tpl_gsource *source, uint64_t message); + +/** + * Get user data from passed tpl_gsource + * + * @param source Pointer to tpl_gsource to get its user data. + * @return Pointer to user data passed to tpl_gsource_create(). + */ +void * +tpl_gsource_get_data(tpl_gsource *source); + +/** + * wrapping g_mutex_init() + * + * @param gmutex Pointer to tpl_gmutex. + */ +void +tpl_gmutex_init(tpl_gmutex *gmutex); + +/** + * wrapping g_mutex_clear() + * + * @param gmutex Pointer to tpl_gmutex. + */ +void +tpl_gmutex_clear(tpl_gmutex *gmutex); + +/** + * wrapping g_mutex_lock() + * + * @param gmutex Pointer to tpl_gmutex. + */ +void +tpl_gmutex_lock(tpl_gmutex *gmutex); + +/** + * wrapping g_mutex_unlock() + * + * @param gmutex Pointer to tpl_gmutex. + */ +void +tpl_gmutex_unlock(tpl_gmutex *gmutex); + +/** + * wrapping g_cond_init() + * + * @param gmutex Pointer to tpl_gcond. + */ +void +tpl_gcond_init(tpl_gcond *gcond); + +/** + * wrapping g_cond_clear() + * + * @param gmutex Pointer to tpl_gcond. + */ +void +tpl_gcond_clear(tpl_gcond *gcond); + +/** + * wrapping g_cond_wait() + * + * @param gmutex Pointer to tpl_gcond. + */ +void +tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex); + +/** + * wrapping g_cond_signal() + * + * @param gmutex Pointer to tpl_gcond. + */ +void +tpl_gcond_signal(tpl_gcond *gcond); + + + + + + diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c new file mode 100644 index 0000000..7b41f7e --- /dev/null +++ b/src/tpl_wl_egl.c @@ -0,0 +1,1436 @@ + +#include "tpl_internal.h" + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "wayland-egl-tizen/wayland-egl-tizen.h" +#include "wayland-egl-tizen/wayland-egl-tizen-priv.h" + +#include +#include +#include + +#include "tpl_utils_gthread.h" + +static int buffer_info_key; +#define KEY_BUFFER_INFO (unsigned long)(&buffer_info_key) + +/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */ +#define CLIENT_QUEUE_SIZE 3 + +typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t; +typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t; + +struct _tpl_wl_egl_display { + tpl_gsource *disp_source; + tpl_gthread *thread; + tpl_gmutex wl_event_mutex; + + struct wl_display *wl_display; + struct wl_event_queue *ev_queue; + struct wayland_tbm_client *wl_tbm_client; + int last_error; /* errno of the last wl_display error*/ + + tdm_client *tdm_client; + tpl_gsource *tdm_source; + + tpl_bool_t use_wait_vblank; + tpl_bool_t use_explicit_sync; + tpl_bool_t prepared; + + struct tizen_surface_shm *tss; /* used for surface buffer_flush */ + struct wp_presentation *presentation; + struct zwp_linux_explicit_synchronization_v1 *explicit_sync; +}; + +struct _tpl_wl_egl_surface { + tpl_gsource *surf_source; + + tbm_surface_queue_h tbm_queue; + + struct wl_surface *surf; + struct wl_egl_window *wl_egl_window; + struct zwp_linux_surface_synchronization_v1 *surface_sync; + struct tizen_surface_shm_flusher *tss_flusher; + + /* surface information */ + int latest_transform; + int rotation; + int format; + int render_done_cnt; + unsigned int serial; + + + tpl_wl_egl_display_t *wl_egl_display; + + /* the lists for buffer tracing */ + tpl_list_t *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */ + tpl_list_t *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */ + tpl_list_t *fence_waiting_sources; /* Trace fence_wait_source from ENQUEUE to fence signaled */ + tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */ + tpl_list_t *render_done_fences; /* for attaching to twe_thread with fences passed by enqueue */ + + tdm_client_vblank *vblank; + + tbm_fd commit_sync_timeline; + int commit_sync_timestamp; + unsigned int commit_sync_fence_number; + + tbm_fd presentation_sync_timeline; + int presentation_sync_timestamp; + int presentation_sync_ts_backup; + int presentation_sync_req_cnt; + + tpl_gmutex pst_mutex; + tpl_gmutex surf_mutex; + tpl_gmutex free_queue_mutex; + tpl_gcond free_queue_cond; + + /* for waiting draw done */ + tpl_bool_t use_sync_fence; + + /* to use zwp_linux_surface_synchronization */ + tpl_bool_t use_surface_sync; + + tpl_bool_t is_activated; + tpl_bool_t reset; /* TRUE if queue reseted by external */ + tpl_bool_t need_to_enqueue; + tpl_bool_t rotation_capability; + tpl_bool_t vblank_done; + tpl_bool_t is_destroying; + tpl_bool_t set_serial_is_used; /* Will be deprecated */ + + int post_interval; +}; + +struct _tpl_wl_egl_bufer { + tbm_surface_h tbm_surface; + + struct wl_proxy *wl_buffer; + int dx, dy; + int width, height; + + tpl_wl_egl_surface_t *wl_egl_surface; + + /* for wayland_tbm_client_set_buffer_transform */ + int w_transform; + tpl_bool_t w_rotated; + + /* for wl_surface_set_buffer_transform */ + int transform; + + /* for damage region */ + int num_rects; + int *rects; + + unsigned int commit_sync_ts_backup; + + /* for wayland_tbm_client_set_buffer_serial */ + unsigned int serial; + + /* for checking need_to_commit (frontbuffer mode) */ + tpl_bool_t need_to_commit; + + /* for checking need to release */ + tpl_bool_t need_to_release; + + /* for checking draw done */ + tpl_bool_t draw_done; + + + /* to get release event via zwp_linux_buffer_release_v1 */ + struct zwp_linux_buffer_release_v1 *buffer_release; + + /* each buffers own its release_fence_fd, until it passes ownership + * to it to EGL */ + int release_fence_fd; + + /* each buffers own its acquire_fence_fd. until it passes ownership + * to it to SERVER */ + int acquire_fence_fd; +}; + +struct sync_info { + tbm_surface_h tbm_surface; + int sync_fd; +}; + +struct _twe_fence_wait_source { + tpl_gsource *fence_source; + tbm_fd fence_fd; + tbm_surface_h tbm_surface; + tpl_wl_egl_surface_t *wl_egl_surface; +}; + +tpl_bool_t +_check_native_handle_is_wl_display(tpl_handle_t display) +{ + struct wl_interface *wl_egl_native_dpy = *(void **) display; + + if (!wl_egl_native_dpy) { + TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy); + return TPL_FALSE; + } + + /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value + is a memory address pointing the structure of wl_display_interface. */ + if (wl_egl_native_dpy == &wl_display_interface) + return TPL_TRUE; + + if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name, + strlen(wl_display_interface.name)) == 0) { + return TPL_TRUE; + } + + return TPL_FALSE; +} + +static tpl_bool_t +__thread_func_tdm_dispatch(tpl_gsource *gsource) +{ + tpl_wl_egl_display_t *wl_egl_display = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; + + wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + if (!wl_egl_display) { + TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource); + TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); + return TPL_GSOURCE_REMOVE; + } + + tdm_err = tdm_client_handle_events(wl_egl_display->tdm_client); + + /* If an error occurs in tdm_client_handle_events, it cannot be recovered. + * When tdm_source is no longer available due to an unexpected situation, + * twe_thread must remove it from the thread and destroy it. + * In that case, tdm_vblank can no longer be used for surfaces and displays + * that used this tdm_source. */ + if (tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)", + tdm_err); + TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); + + tpl_gsource_destroy(gsource); + + wl_egl_display->tdm_source = NULL; + + return G_SOURCE_REMOVE; + } + + return G_SOURCE_CONTINUE; +} + +static void +__thread_func_tdm_finalize(tpl_gsource *gsource) +{ + tpl_wl_egl_display_t *wl_egl_display = NULL; + twe_tdm_source *tdm_source = (twe_tdm_source *)source; + + wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + + TPL_LOG_T(BACKEND, "tdm_destroy| tdm_source(%p) tdm_client(%p)", + gsource, wl_egl_display->tdm_client); + + if (wl_egl_display->tdm_client) { + tdm_client_destroy(wl_egl_display->tdm_client); + wl_egl_display->tdm_client = NULL; + } +} + +static tpl_gsource_functions tdm_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_tdm_dispatch, + .finalize = __thread_func_tdm_finalize, +}; + +tpl_result_t +_thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display) +{ + tpl_gsource *tdm_source = NULL; + tdm_client *client = NULL; + int tdm_display_fd = -1; + tdm_error tdm_err = TDM_ERROR_NONE; + + if (!wl_egl_display->thread) { + TPL_ERR("thread should be created before init tdm_client."); + return TPL_ERROR_INVALID_OPERATION; + } + + client = tdm_client_create(&tdm_err); + if (!client || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err); + return TPL_ERROR_INVALID_OPERATION; + } + + tdm_err = tdm_client_get_fd(client, &tdm_fd); + if (tdm_fd < 0 || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err); + tdm_client_destroy(client); + return TPL_ERROR_INVALID_OPERATION; + } + + tdm_source = tpl_gsource_create(wl_egl_display->thread, + (void *)wl_egl_display, + &tdm_funcs, TPL_FALSE); + if (!tdm_source) { + TPL_ERR("Failed to create tdm_gsource\n"); + tdm_client_destroy(client); + return TPL_ERROR_INVALID_OPERATION; + } + + wl_egl_display->tdm_client = client; + wl_egl_display->tdm_source = tdm_source; + + TPL_LOG_T(BACKEND, "TPL_WAIT_VBLANK:DEFAULT_ENABLED"); + TPL_LOG_T(BACKEND, "wl_egl_display(%p) tdm_source(%p) tdm_client(%p)", + wl_egl_display, tdm_source, client); + + return TPL_ERROR_NONE; +} + +#define IMPL_TIZEN_SURFACE_SHM_VERSION 2 + +void +__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, + uint32_t name, const char *interface, + uint32_t version) +{ + tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; + + if (!strcmp(interface, "tizen_surface_shm")) { + wl_egl_display->tss = wl_registry_bind(wl_registry, + name, + &tizen_surface_shm_interface, + ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ? + version : IMPL_TIZEN_SURFACE_SHM_VERSION)); + } else if (!strcmp(interface, wp_presentation_interface.name)) { + wl_egl_display->presentation = + wl_registry_bind(wl_registry, + name, &wp_presentation_interface, 1); + TPL_DEBUG("bind wp_presentation_interface"); + } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) { + char *env = tpl_getenv("TPL_EFS"); + if (env && atoi(env)) { + wl_egl_display->explicit_sync = + wl_registry_bind(wl_registry, name, + &zwp_linux_explicit_synchronization_v1_interface, 1); + wl_egl_display->use_explicit_sync = TPL_TRUE; + TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface"); + } else { + wl_egl_display->use_explicit_sync = TPL_FALSE; + } + } +} + +void +__cb_wl_resistry_global_remove_callback(void *data, + struct wl_registry *wl_registry, + uint32_t name) +{ +} + +static const struct wl_registry_listener registry_listener = { + __cb_wl_resistry_global_callback, + __cb_wl_resistry_global_remove_callback +}; + +static void +_wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display, + const char *func_name) +{ + int dpy_err; + char buf[1024]; + strerror_r(errno, buf, sizeof(buf)); + + if (wl_egl_display->last_error == errno) + return; + + TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf); + + dpy_err = wl_display_get_error(wl_egl_display->wl_display); + if (dpy_err == EPROTO) { + const struct wl_interface *err_interface; + uint32_t err_proxy_id, err_code; + err_code = wl_display_get_protocol_error(wl_egl_display->wl_display, + &err_interface, + &err_proxy_id); + TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d", + err_interface->name, err_code, err_proxy_id); + } + + wl_egl_display->last_error = errno; +} + +tpl_result_t +_thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display) +{ + struct wl_registry *registry = NULL; + struct wl_event_queue *queue = NULL; + struct wl_display *display_wrapper = NULL; + int ret; + tpl_result_t result = TPL_ERROR_NONE; + + queue = wl_display_create_queue(wl_egl_display->wl_display); + if (!queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_egl_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display); + if (!display_wrapper) { + TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)", + wl_egl_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue); + + registry = wl_display_get_registry(display_wrapper); + if (!registry) { + TPL_ERR("Failed to create wl_registry"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_proxy_wrapper_destroy(display_wrapper); + display_wrapper = NULL; + + if (wl_registry_add_listener(registry, ®istry_listener, + wl_egl_display)) { + TPL_ERR("Failed to wl_registry_add_listener"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue); + if (ret == -1) { + _twe_display_print_err(wl_egl_display, "roundtrip_queue"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + /* set tizen_surface_shm's queue as client's private queue */ + if (wl_egl_display->tss) { + wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss, + wl_egl_display->ev_queue); + TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss); + } + + if (wl_egl_display->presentation) { + wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation, + wl_egl_display->ev_queue); + TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.", + wl_egl_display->presentation); + } + + if (wl_egl_display->explicit_sync) { + wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync, + wl_egl_display->ev_queue); + TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.", + wl_egl_display->explicit_sync); + } + +fini: + if (display_wrapper) + wl_proxy_wrapper_destroy(display_wrapper); + if (registry) + wl_registry_destroy(registry); + if (queue) + wl_event_queue_destroy(queue); + + return result; +} + +static void* +_thread_init(void *data) +{ + tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; + + if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) { + TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)", + wl_egl_display, wl_egl_display->wl_display); + } + + if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) { + TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED"); + } + + return wl_egl_display; +} + +static gboolean +__thread_func_disp_prepare(tpl_gsource *gsource) +{ + tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)gsource->data; + + /* If this wl_egl_display is already prepared, + * do nothing in this function. */ + if (wl_egl_display->prepared) + return FALSE; + + /* If there is a last_error, there is no need to poll, + * so skip directly to dispatch. + * prepare -> dispatch */ + if (wl_egl_display->last_error) + return TRUE; + + while (wl_display_prepare_read_queue(wl_egl_display->wl_display, + wl_egl_display->ev_queue) != 0) { + if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, + wl_egl_display->ev_queue) == -1) { + _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); + } + } + + wl_egl_display->prepared = TPL_TRUE; + + wl_display_flush(wl_egl_display->wl_display); + + return FALSE; +} + +static gboolean +_twe_thread_wl_disp_check(GSource *source) +{ + twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source; + gboolean ret = FALSE; + + if (!disp_source->prepared) + return ret; + + /* If prepared, but last_error is set, + * cancel_read is executed and FALSE is returned. + * That can lead to G_SOURCE_REMOVE by calling disp_prepare again + * and skipping disp_check from prepare to disp_dispatch. + * check -> prepare -> dispatch -> G_SOURCE_REMOVE */ + if (disp_source->prepared && disp_source->last_error) { + wl_display_cancel_read(disp_source->disp); + return ret; + } + + if (disp_source->gfd.revents & G_IO_IN) { + if (wl_display_read_events(disp_source->disp) == -1) + _wl_display_print_err(disp_source, "read_event."); + ret = TRUE; + } else { + wl_display_cancel_read(disp_source->disp); + ret = FALSE; + } + + disp_source->prepared = TPL_FALSE; + + return ret; +} + +static gboolean +_twe_thread_wl_disp_dispatch(GSource *source, GSourceFunc cb, gpointer data) +{ + twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source; + + /* If there is last_error, G_SOURCE_REMOVE should be returned + * to remove the gsource from the main loop. + * This is because disp_source is not valid since last_error was set.*/ + if (disp_source->last_error) { + return G_SOURCE_REMOVE; + } + + g_mutex_lock(&disp_source->wl_event_mutex); + if (disp_source->gfd.revents & G_IO_IN) { + if (wl_display_dispatch_queue_pending(disp_source->disp, + disp_source->ev_queue) == -1) { + _wl_display_print_err(disp_source, "dispatch_queue_pending"); + } + } + + wl_display_flush(disp_source->disp); + g_mutex_unlock(&disp_source->wl_event_mutex); + + return G_SOURCE_CONTINUE; +} + +static void +_twe_thread_wl_disp_finalize(GSource *source) +{ + TPL_LOG_T(BACKEND, "finalize| disp_source(%p)", source); + + return; +} + +static tpl_gsource_functions disp_funcs = { + .prepare = __thread_func_disp_prepare, + .check = __thread_func_disp_check, + .dispatch = __thread_func_disp_dispatch, + .finalize = __thread_func_disp_finalize, +}; + +static tpl_result_t +__tpl_wl_egl_display_init(tpl_display_t *display) +{ + tpl_wl_egl_display_t *wl_egl_display = NULL; + + TPL_ASSERT(display); + + /* Do not allow default display in wayland. */ + if (!display->native_handle) { + TPL_ERR("Invalid native handle for display."); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!_check_native_handle_is_wl_display(display->native_handle)) { + TPL_ERR("native_handle(%p) is not wl_display", display->native_handle); + return TPL_ERROR_INVALID_PARAMETER; + } + + wl_egl_display = (tpl_wl_egl_display_t *) calloc(1, + sizeof(tpl_wl_egl_display_t)); + if (!wl_egl_display) { + TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + display->backend.data = wl_egl_display; + display->bufmgr_fd = -1; + + wl_egl_display->wl_display = (struct wl_display *)display->native_handle; + wl_egl_display->last_error = 0; + wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled + wl_egl_display->prepared = TPL_FALSE; + + /* Wayland Interfaces */ + wl_egl_display->tss = NULL; + wl_egl_display->presentation = NULL; + wl_egl_display->explicit_sync = NULL; + + wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled + env = tpl_getenv("TPL_WAIT_VBLANK"); + if (env && !atoi(env)) { + wl_egl_display->use_wait_vblank = TPL_FALSE; + } + + /* Create gthread */ + wl_egl_display->thread = tpl_gthread_create("wl_egl_thread", + _thread_init, (void *)wl_egl_display); + if (!wl_egl_display->thread) { + TPL_ERR("Failed to create wl_egl_thread"); + goto free_display; + } + + wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread, + (void *)wl_egl_display, + wl_display_get_fd(wl_egl_display->wl_display), + & + ) + if (!wl_egl_display->twe_display) { + TPL_ERR("Failed to add native_display(%p) to thread(%p)", + display->native_handle, + wl_egl_display->wl_egl_thread); + goto free_display; + } + + TPL_LOG_T("WL_EGL", + "[INIT DISPLAY] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_egl_display, + wl_egl_display->thread, + wl_egl_display->wl_display); + + TPL_LOG_T("WL_EGL", + "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%S) USE_EXPLICIT_SYNC(%s)", + wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE", + wl_egl_display->tss ? "TRUE" : "FALSE", + wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE"); + + return TPL_ERROR_NONE; + +free_display: + if (wl_egl_display->thread) + tpl_gthread_destroy(wl_egl_display->thread); + + wl_egl_display->thread = NULL; + free(wl_egl_display); + + display->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; +} + +static void +__tpl_wl_egl_display_fini(tpl_display_t *display) +{ + tpl_wl_egl_display_t *wl_egl_display; + + TPL_ASSERT(display); + + wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data; + if (wl_egl_display) { + + TPL_LOG_T("WL_EGL", + "[FINI] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_egl_display, + wl_egl_display->thread, + wl_egl_display->wl_display); + + if (wl_egl_display->twe_display) { + tpl_result_t ret = TPL_ERROR_NONE; + ret = twe_display_del(wl_egl_display->twe_display); + if (ret != TPL_ERROR_NONE) + TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)", + wl_egl_display->twe_display, + wl_egl_display->wl_egl_thread); + wl_egl_display->twe_display = NULL; + } + + if (wl_egl_display->wl_egl_thread) { + twe_thread_destroy(wl_egl_display->wl_egl_thread); + wl_egl_display->wl_egl_thread = NULL; + } + + free(wl_egl_display); + } + + display->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_egl_display_query_config(tpl_display_t *display, + tpl_surface_type_t surface_type, + int red_size, int green_size, + int blue_size, int alpha_size, + int color_depth, int *native_visual_id, + tpl_bool_t *is_slow) +{ + TPL_ASSERT(display); + + if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 && + green_size == 8 && blue_size == 8 && + (color_depth == 32 || color_depth == 24)) { + + if (alpha_size == 8) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + if (alpha_size == 0) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + } + + return TPL_ERROR_INVALID_PARAMETER; +} + +static tpl_result_t +__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id, + int alpha_size) +{ + TPL_IGNORE(display); + TPL_IGNORE(visual_id); + TPL_IGNORE(alpha_size); + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_egl_display_get_window_info(tpl_display_t *display, + tpl_handle_t window, int *width, + int *height, tbm_format *format, + int depth, int a_size) +{ + tpl_result_t ret = TPL_ERROR_NONE; + + TPL_ASSERT(display); + TPL_ASSERT(window); + + if ((ret = twe_get_native_window_info(window, width, height, format, a_size)) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to get size info of native_window(%p)", window); + } + + return ret; +} + +static tpl_result_t +__tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display, + tpl_handle_t pixmap, int *width, + int *height, tbm_format *format) +{ + tbm_surface_h tbm_surface = NULL; + + tbm_surface = twe_get_native_buffer_from_pixmap(pixmap); + if (!tbm_surface) { + TPL_ERR("Failed to get tbm_surface_h from native pixmap."); + return TPL_ERROR_INVALID_OPERATION; + } + + if (width) *width = tbm_surface_get_width(tbm_surface); + if (height) *height = tbm_surface_get_height(tbm_surface); + if (format) *format = tbm_surface_get_format(tbm_surface); + + return TPL_ERROR_NONE; +} + +static tbm_surface_h +__tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) +{ + tbm_surface_h tbm_surface = NULL; + + TPL_ASSERT(pixmap); + + tbm_surface = twe_get_native_buffer_from_pixmap(pixmap); + if (!tbm_surface) { + TPL_ERR("Failed to get tbm_surface_h from wayland_tbm."); + return NULL; + } + + return tbm_surface; +} + +static void +__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, + void *data) +{ + tpl_surface_t *surface = NULL; + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tpl_bool_t is_activated = TPL_FALSE; + int width, height; + + surface = (tpl_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(surface); + + wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + /* When the queue is resized, change the reset flag to TPL_TRUE to reflect + * the changed window size at the next frame. */ + width = tbm_surface_queue_get_width(surface_queue); + height = tbm_surface_queue_get_height(surface_queue); + if (surface->width != width || surface->height != height) { + TPL_LOG_T("WL_EGL", + "[QUEUE_RESIZE_CB] wl_egl_surface(%p) tbm_queue(%p) (%dx%d)", + wl_egl_surface, surface_queue, width, height); + } + + /* When queue_reset_callback is called, if is_activated is different from + * its previous state change the reset flag to TPL_TRUE to get a new buffer + * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ + is_activated = twe_surface_check_activated(wl_egl_surface->twe_surface); + if (wl_egl_surface->is_activated != is_activated) { + if (is_activated) { + TPL_LOG_T("WL_EGL", + "[ACTIVATED_CB] wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, surface_queue); + } else { + TPL_LOG_T("WL_EGL", + "[DEACTIVATED_CB] wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, surface_queue); + } + } + + wl_egl_surface->reset = TPL_TRUE; + + if (surface->reset_cb) + surface->reset_cb(surface->reset_data); +} + +void __cb_window_rotate_callback(void *data) +{ + tpl_surface_t *surface = (tpl_surface_t *)data; + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + int rotation; + + if (!surface) { + TPL_ERR("Inavlid parameter. surface is NULL."); + return; + } + + wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; + if (!wl_egl_surface) { + TPL_ERR("Invalid parameter. surface->backend.data is NULL"); + return; + } + + rotation = twe_surface_get_rotation(wl_egl_surface->twe_surface); + + surface->rotation = rotation; +} + +static tpl_result_t +__tpl_wl_egl_surface_init(tpl_surface_t *surface) +{ + tpl_wl_egl_display_t *wl_egl_display = NULL; + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tbm_surface_queue_h tbm_queue = NULL; + twe_surface_h twe_surface = NULL; + tpl_result_t ret = TPL_ERROR_NONE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); + TPL_ASSERT(surface->native_handle); + + wl_egl_display = + (tpl_wl_egl_display_t *)surface->display->backend.data; + if (!wl_egl_display) { + TPL_ERR("Invalid parameter. wl_egl_display(%p)", + wl_egl_display); + return TPL_ERROR_INVALID_PARAMETER; + } + + wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1, + sizeof(tpl_wl_egl_surface_t)); + if (!wl_egl_surface) { + TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + surface->backend.data = (void *)wl_egl_surface; + + if (__tpl_object_init(&wl_egl_surface->base, + TPL_OBJECT_SURFACE, + NULL) != TPL_ERROR_NONE) { + TPL_ERR("Failed to initialize backend surface's base object!"); + goto object_init_fail; + } + + twe_surface = twe_surface_add(wl_egl_display->wl_egl_thread, + wl_egl_display->twe_display, + surface->native_handle, + surface->format, surface->num_buffers); + if (!twe_surface) { + TPL_ERR("Failed to add native_window(%p) to thread(%p)", + surface->native_handle, wl_egl_display->wl_egl_thread); + goto create_twe_surface_fail; + } + + tbm_queue = twe_surface_get_tbm_queue(twe_surface); + if (!tbm_queue) { + TPL_ERR("Failed to get tbm_queue from twe_surface(%p)", twe_surface); + goto queue_create_fail; + } + + /* Set reset_callback to tbm_queue */ + if (tbm_surface_queue_add_reset_cb(tbm_queue, + __cb_tbm_surface_queue_reset_callback, + (void *)surface)) { + TPL_ERR("TBM surface queue add reset cb failed!"); + goto add_reset_cb_fail; + } + + wl_egl_surface->reset = TPL_FALSE; + wl_egl_surface->twe_surface = twe_surface; + wl_egl_surface->tbm_queue = tbm_queue; + wl_egl_surface->is_activated = TPL_FALSE; + wl_egl_surface->need_to_enqueue = TPL_TRUE; + + surface->width = tbm_surface_queue_get_width(tbm_queue); + surface->height = tbm_surface_queue_get_height(tbm_queue); + surface->rotation = twe_surface_get_rotation(twe_surface); + + ret = twe_surface_set_rotate_callback(twe_surface, (void *)surface, + (tpl_surface_cb_func_t)__cb_window_rotate_callback); + if (ret != TPL_ERROR_NONE) { + TPL_WARN("Failed to register rotate callback."); + } + + TPL_LOG_T("WL_EGL", + "[INIT1/2]tpl_surface(%p) tpl_wl_egl_surface(%p) twe_surface(%p)", + surface, wl_egl_surface, twe_surface); + TPL_LOG_T("WL_EGL", + "[INIT2/2]size(%dx%d)rot(%d)|tbm_queue(%p)|native_window(%p)", + surface->width, surface->height, surface->rotation, + tbm_queue, surface->native_handle); + + return TPL_ERROR_NONE; + +add_reset_cb_fail: +queue_create_fail: + twe_surface_del(twe_surface); +create_twe_surface_fail: +object_init_fail: + free(wl_egl_surface); + surface->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; +} + +static void +__tpl_wl_egl_surface_fini(tpl_surface_t *surface) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = NULL; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + + wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + TPL_OBJECT_LOCK(wl_egl_surface); + + wl_egl_display = (tpl_wl_egl_display_t *) + surface->display->backend.data; + + if (wl_egl_display == NULL) { + TPL_ERR("check failed: wl_egl_display == NULL"); + TPL_OBJECT_UNLOCK(wl_egl_surface); + return; + } + + if (surface->type == TPL_SURFACE_TYPE_WINDOW) { + TPL_LOG_T("WL_EGL", + "[FINI] wl_egl_surface(%p) native_window(%p) twe_surface(%p)", + wl_egl_surface, surface->native_handle, + wl_egl_surface->twe_surface); + + if (twe_surface_del(wl_egl_surface->twe_surface) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", + wl_egl_surface->twe_surface, + wl_egl_display->wl_egl_thread); + } + + wl_egl_surface->twe_surface = NULL; + wl_egl_surface->tbm_queue = NULL; + } + + TPL_OBJECT_UNLOCK(wl_egl_surface); + __tpl_object_fini(&wl_egl_surface->base); + free(wl_egl_surface); + surface->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface, + tpl_bool_t set) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + + if (!surface) { + TPL_ERR("Invalid parameter. tpl_surface(%p)", surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; + if (!wl_egl_surface) { + TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)", + surface, wl_egl_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!wl_egl_surface->twe_surface) { + TPL_ERR("Invalid parameter. wl_egl_surface(%p) twe_surface(%p)", + wl_egl_surface, wl_egl_surface->twe_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + twe_surface_set_rotation_capablity(wl_egl_surface->twe_surface, + set); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface, + int post_interval) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + + if (!surface) { + TPL_ERR("Invalid parameter. tpl_surface(%p)", surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; + if (!wl_egl_surface) { + TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)", + surface, wl_egl_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!wl_egl_surface->twe_surface) { + TPL_ERR("Invalid parameter. wl_egl_surface(%p) twe_surface(%p)", + wl_egl_surface, wl_egl_surface->twe_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + twe_surface_set_post_interval(wl_egl_surface->twe_surface, + post_interval); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, tbm_fd sync_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(tbm_surface); + TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); + + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *) surface->backend.data; + tbm_surface_queue_error_e tsq_err; + tpl_result_t ret = TPL_ERROR_NONE; + int bo_name = 0; + + TPL_OBJECT_LOCK(wl_egl_surface); + + bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); + + if (!wl_egl_surface) { + TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)", + surface, wl_egl_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_OBJECT_UNLOCK(wl_egl_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", + tbm_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_OBJECT_UNLOCK(wl_egl_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + TRACE_MARK("[ENQ] BO_NAME:%d", bo_name); + + TPL_LOG_T("WL_EGL", + "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_egl_surface, tbm_surface, bo_name, sync_fence); + + /* If there are received region information, + * save it to buf_info in tbm_surface user_data using below API. */ + if (num_rects && rects) { + ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects); + if (ret != TPL_ERROR_NONE) { + TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)", + num_rects, rects); + } + } + + if (!wl_egl_surface->need_to_enqueue || + !twe_surface_check_commit_needed(wl_egl_surface->twe_surface, + tbm_surface)) { + TPL_LOG_T("WL_EGL", + "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", + ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_OBJECT_UNLOCK(wl_egl_surface); + return TPL_ERROR_NONE; + } + + /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and + * commit if surface->frontbuffer that is already set and the tbm_surface + * client want to enqueue are the same. + */ + if (surface->is_frontbuffer_mode) { + /* The first buffer to be activated in frontbuffer mode must be + * committed. Subsequence frames do not need to be committed because + * the buffer is already displayed. + */ + if (surface->frontbuffer == tbm_surface) + wl_egl_surface->need_to_enqueue = TPL_FALSE; + + if (sync_fence != -1) { + close(sync_fence); + sync_fence = -1; + } + } + + if (sync_fence != -1) { + ret = twe_surface_set_sync_fd(wl_egl_surface->twe_surface, + tbm_surface, sync_fence); + if (ret != TPL_ERROR_NONE) { + TPL_WARN("Failed to set sync fd (%d). But it will continue.", + sync_fence); + } + } + + tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + tbm_surface_internal_unref(tbm_surface); + TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d", + tbm_surface, surface, tsq_err); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_OBJECT_UNLOCK(wl_egl_surface); + return TPL_ERROR_INVALID_OPERATION; + } + + tbm_surface_internal_unref(tbm_surface); + + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_OBJECT_UNLOCK(wl_egl_surface); + + return TPL_ERROR_NONE; +} + +static tpl_bool_t +__tpl_wl_egl_surface_validate(tpl_surface_t *surface) +{ + tpl_bool_t retval = TPL_TRUE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + + retval = !(wl_egl_surface->reset); + + return retval; +} + +static tpl_result_t +__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; + if (!wl_egl_surface) { + TPL_ERR("Invalid backend surface. surface(%p) wl_egl_surface(%p)", + surface, wl_egl_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + tbm_surface_internal_unref(tbm_surface); + + tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to release tbm_surface(%p) surface(%p)", + tbm_surface, surface); + return TPL_ERROR_INVALID_OPERATION; + } + + TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)", + surface, tbm_surface); + + return TPL_ERROR_NONE; +} + +#define CAN_DEQUEUE_TIMEOUT_MS 10000 + +static tbm_surface_h +__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, + tbm_fd *sync_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->display->backend.data); + TPL_OBJECT_CHECK_RETURN(surface, NULL); + + tbm_surface_h tbm_surface = NULL; + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)surface->display->backend.data; + tbm_surface_queue_error_e tsq_err = 0; + int is_activated = 0; + int bo_name = 0; + tpl_result_t lock_ret = TPL_FALSE; + + TPL_OBJECT_UNLOCK(surface); + tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( + wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS); + TPL_OBJECT_LOCK(surface); + + /* After the can dequeue state, call twe_display_lock to prevent other + * events from being processed in wayland_egl_thread + * during below dequeue procedure. */ + lock_ret = twe_display_lock(wl_egl_display->twe_display); + + if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { + TPL_ERR("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", + wl_egl_surface->tbm_queue, surface); + if (twe_surface_queue_force_flush(wl_egl_surface->twe_surface) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)", + wl_egl_surface->tbm_queue, surface); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wl_egl_display->twe_display); + return NULL; + } else { + tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + } + } + + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)", + wl_egl_surface->tbm_queue, surface); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wl_egl_display->twe_display); + return NULL; + } + + /* wayland client can check their states (ACTIVATED or DEACTIVATED) with + * below function [wayland_tbm_client_queue_check_activate()]. + * This function has to be called before tbm_surface_queue_dequeue() + * in order to know what state the buffer will be dequeued next. + * + * ACTIVATED state means non-composite mode. Client can get buffers which + can be displayed directly(without compositing). + * DEACTIVATED state means composite mode. Client's buffer will be displayed + by compositor(E20) with compositing. + */ + is_activated = twe_surface_check_activated(wl_egl_surface->twe_surface); + wl_egl_surface->is_activated = is_activated; + + surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); + surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); + + if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) { + /* If surface->frontbuffer is already set in frontbuffer mode, + * it will return that frontbuffer if it is still activated, + * otherwise dequeue the new buffer after initializing + * surface->frontbuffer to NULL. */ + if (is_activated && !wl_egl_surface->reset) { + TPL_LOG_T("WL_EGL", + "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", + surface->frontbuffer, + tbm_bo_export(tbm_surface_internal_get_bo( + surface->frontbuffer, 0))); + TRACE_ASYNC_BEGIN((int)surface->frontbuffer, + "[DEQ]~[ENQ] BO_NAME:%d", + tbm_bo_export(tbm_surface_internal_get_bo( + surface->frontbuffer, 0))); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wl_egl_display->twe_display); + return surface->frontbuffer; + } else { + surface->frontbuffer = NULL; + wl_egl_surface->need_to_enqueue = TPL_TRUE; + } + } else { + surface->frontbuffer = NULL; + } + + tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue, + &tbm_surface); + if (!tbm_surface) { + TPL_ERR("Failed to dequeue from tbm_queue(%p) surface(%p)| tsq_err = %d", + wl_egl_surface->tbm_queue, surface, tsq_err); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wl_egl_display->twe_display); + return NULL; + } + + tbm_surface_internal_ref(tbm_surface); + + /* If twe_surface_get_buffer_release_fence_fd return -1, + * the tbm_surface can be used immediately. + * If not, user(EGL) have to wait until signaled. */ + if (sync_fence) { + *sync_fence = twe_surface_get_buffer_release_fence_fd( + wl_egl_surface->twe_surface, tbm_surface); + } + + bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); + + if (surface->is_frontbuffer_mode && is_activated) + surface->frontbuffer = tbm_surface; + + wl_egl_surface->reset = TPL_FALSE; + + TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name); + TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)", + tbm_surface, bo_name, sync_fence ? *sync_fence : -1); + + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wl_egl_display->twe_display); + + return tbm_surface; +} + +void +__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) +{ + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + + if (width) + *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); + if (height) + *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); +} + + +tpl_bool_t +__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy) +{ + if (!native_dpy) return TPL_FALSE; + + if (twe_check_native_handle_is_wl_display(native_dpy)) + return TPL_TRUE; + + return TPL_FALSE; +} + +void +__tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend) +{ + TPL_ASSERT(backend); + + backend->type = TPL_BACKEND_WAYLAND_THREAD; + backend->data = NULL; + + backend->init = __tpl_wl_egl_display_init; + backend->fini = __tpl_wl_egl_display_fini; + backend->query_config = __tpl_wl_egl_display_query_config; + backend->filter_config = __tpl_wl_egl_display_filter_config; + backend->get_window_info = __tpl_wl_egl_display_get_window_info; + backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info; + backend->get_buffer_from_native_pixmap = + __tpl_wl_egl_display_get_buffer_from_native_pixmap; +} + +void +__tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend) +{ + TPL_ASSERT(backend); + + backend->type = TPL_BACKEND_WAYLAND_THREAD; + backend->data = NULL; + + backend->init = __tpl_wl_egl_surface_init; + backend->fini = __tpl_wl_egl_surface_fini; + backend->validate = __tpl_wl_egl_surface_validate; + backend->cancel_dequeued_buffer = + __tpl_wl_egl_surface_cancel_dequeued_buffer; + backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer; + backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer; + backend->set_rotation_capability = + __tpl_wl_egl_surface_set_rotation_capability; + backend->set_post_interval = + __tpl_wl_egl_surface_set_post_interval; + backend->get_size = + __tpl_wl_egl_surface_get_size; +} + -- 2.7.4 From 9bcd751a968af21b5b068a2a63cac25359081e71 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 9 Nov 2020 14:19:52 +0900 Subject: [PATCH 03/16] Added an API to check io condtion. Change-Id: I493b57bd93096fe433cd6db521563cacd8d47a3c Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 17 +++++++++++++++++ src/tpl_utils_gthread.h | 9 +++++++++ 2 files changed, 26 insertions(+) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 181bf31..66e7b23 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -285,6 +285,23 @@ tpl_gsource_get_data(tpl_gsource *source) return source->data; } +tpl_bool_t +tpl_gsource_check_io_condition(tpl_gsource *source) +{ + GIOCondition cond; + + if (!source) { + TPL_ERR("Invalid parameter tpl_gsource is null"); + return TPL_FALSE; + } + + cond = g_source_query_unix_fd(source->gsource, source->tag); + if (cond & G_IO_IN) + return TPL_TRUE; + + return TPL_FALSE; +} + void tpl_gmutex_init(tpl_gmutex *gmutex) { diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 1886609..084754c 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -106,6 +106,15 @@ void * tpl_gsource_get_data(tpl_gsource *source); /** + * Check the GIOCondition of fd that tpl_gsource has + * + * @param source Pointer to tpl_gsource to check io condition. + * @return TPL_TRUE if GIOCondition is G_IO_IN, TPL_FALSE otherwise (G_IO_ERR). + */ +tpl_bool_t +tpl_gsource_check_io_condition(tpl_gsource *source); + +/** * wrapping g_mutex_init() * * @param gmutex Pointer to tpl_gmutex. -- 2.7.4 From cbef264849be56d2d3386dcb1c09dd6eeeb7725b Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 9 Nov 2020 14:20:05 +0900 Subject: [PATCH 04/16] Implement display backend at tpl_wl_egl.c Change-Id: I1a25d1d09eb7c3baf308e085d8dd690588f487f2 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl.c | 477 ++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 295 insertions(+), 182 deletions(-) diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index 7b41f7e..99285b8 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -45,6 +45,9 @@ struct _tpl_wl_egl_display { struct wayland_tbm_client *wl_tbm_client; int last_error; /* errno of the last wl_display error*/ + tpl_bool_t wl_initialized; + tpl_bool_t tdm_initialized; + tdm_client *tdm_client; tpl_gsource *tdm_source; @@ -242,13 +245,15 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - TPL_LOG_T(BACKEND, "tdm_destroy| tdm_source(%p) tdm_client(%p)", + TPL_LOG_T("WL_EGL", "tdm_destroy| tdm_source(%p) tdm_client(%p)", gsource, wl_egl_display->tdm_client); if (wl_egl_display->tdm_client) { tdm_client_destroy(wl_egl_display->tdm_client); wl_egl_display->tdm_client = NULL; } + + wl_egl_display->tdm_initialized = TPL_FALSE; } static tpl_gsource_functions tdm_funcs = { @@ -262,7 +267,7 @@ tpl_result_t _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display) { tpl_gsource *tdm_source = NULL; - tdm_client *client = NULL; + tdm_client *tdm_client = NULL; int tdm_display_fd = -1; tdm_error tdm_err = TDM_ERROR_NONE; @@ -271,33 +276,35 @@ _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display) return TPL_ERROR_INVALID_OPERATION; } - client = tdm_client_create(&tdm_err); - if (!client || tdm_err != TDM_ERROR_NONE) { + tdm_client = tdm_client_create(&tdm_err); + if (!tdm_client || tdm_err != TDM_ERROR_NONE) { TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err); return TPL_ERROR_INVALID_OPERATION; } - tdm_err = tdm_client_get_fd(client, &tdm_fd); - if (tdm_fd < 0 || tdm_err != TDM_ERROR_NONE) { + tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd); + if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) { TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err); - tdm_client_destroy(client); + tdm_client_destroy(tdm_client); return TPL_ERROR_INVALID_OPERATION; } tdm_source = tpl_gsource_create(wl_egl_display->thread, - (void *)wl_egl_display, + (void *)wl_egl_display, tdm_display_fd, &tdm_funcs, TPL_FALSE); if (!tdm_source) { TPL_ERR("Failed to create tdm_gsource\n"); - tdm_client_destroy(client); + tdm_client_destroy(tdm_client); return TPL_ERROR_INVALID_OPERATION; } - wl_egl_display->tdm_client = client; + wl_egl_display->tdm_client = tdm_client; wl_egl_display->tdm_source = tdm_source; - TPL_LOG_T(BACKEND, "TPL_WAIT_VBLANK:DEFAULT_ENABLED"); - TPL_LOG_T(BACKEND, "wl_egl_display(%p) tdm_source(%p) tdm_client(%p)", + wl_egl_display->tdm_initialized = TPL_TRUE; + + TPL_LOG_T("WL_EGL", "TPL_WAIT_VBLANK:DEFAULT_ENABLED"); + TPL_LOG_T("WL_EGL", "wl_egl_display(%p) tdm_source(%p) tdm_client(%p)", wl_egl_display, tdm_source, client); return TPL_ERROR_NONE; @@ -376,121 +383,22 @@ _wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display, wl_egl_display->last_error = errno; } -tpl_result_t -_thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display) -{ - struct wl_registry *registry = NULL; - struct wl_event_queue *queue = NULL; - struct wl_display *display_wrapper = NULL; - int ret; - tpl_result_t result = TPL_ERROR_NONE; - - queue = wl_display_create_queue(wl_egl_display->wl_display); - if (!queue) { - TPL_ERR("Failed to create wl_queue wl_display(%p)", - wl_egl_display->wl_display); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display); - if (!display_wrapper) { - TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)", - wl_egl_display->wl_display); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue); - - registry = wl_display_get_registry(display_wrapper); - if (!registry) { - TPL_ERR("Failed to create wl_registry"); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - wl_proxy_wrapper_destroy(display_wrapper); - display_wrapper = NULL; - - if (wl_registry_add_listener(registry, ®istry_listener, - wl_egl_display)) { - TPL_ERR("Failed to wl_registry_add_listener"); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue); - if (ret == -1) { - _twe_display_print_err(wl_egl_display, "roundtrip_queue"); - result = TPL_ERROR_INVALID_OPERATION; - goto fini; - } - - /* set tizen_surface_shm's queue as client's private queue */ - if (wl_egl_display->tss) { - wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss, - wl_egl_display->ev_queue); - TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss); - } - - if (wl_egl_display->presentation) { - wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation, - wl_egl_display->ev_queue); - TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.", - wl_egl_display->presentation); - } - - if (wl_egl_display->explicit_sync) { - wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync, - wl_egl_display->ev_queue); - TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.", - wl_egl_display->explicit_sync); - } - -fini: - if (display_wrapper) - wl_proxy_wrapper_destroy(display_wrapper); - if (registry) - wl_registry_destroy(registry); - if (queue) - wl_event_queue_destroy(queue); - - return result; -} - -static void* -_thread_init(void *data) -{ - tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; - - if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) { - TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)", - wl_egl_display, wl_egl_display->wl_display); - } - - if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) { - TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED"); - } - - return wl_egl_display; -} - -static gboolean +static tpl_bool_t __thread_func_disp_prepare(tpl_gsource *gsource) { - tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)gsource->data; + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); /* If this wl_egl_display is already prepared, * do nothing in this function. */ if (wl_egl_display->prepared) - return FALSE; + return TPL_FALSE; /* If there is a last_error, there is no need to poll, * so skip directly to dispatch. * prepare -> dispatch */ if (wl_egl_display->last_error) - return TRUE; + return TPL_TRUE; while (wl_display_prepare_read_queue(wl_egl_display->wl_display, wl_egl_display->ev_queue) != 0) { @@ -504,16 +412,17 @@ __thread_func_disp_prepare(tpl_gsource *gsource) wl_display_flush(wl_egl_display->wl_display); - return FALSE; + return TPL_FALSE; } -static gboolean -_twe_thread_wl_disp_check(GSource *source) +static tpl_bool_t +__thread_func_disp_check(tpl_gsource *gsource) { - twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source; - gboolean ret = FALSE; + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + tpl_bool_t ret = TPL_FALSE; - if (!disp_source->prepared) + if (!wl_egl_display->prepared) return ret; /* If prepared, but last_error is set, @@ -521,55 +430,63 @@ _twe_thread_wl_disp_check(GSource *source) * That can lead to G_SOURCE_REMOVE by calling disp_prepare again * and skipping disp_check from prepare to disp_dispatch. * check -> prepare -> dispatch -> G_SOURCE_REMOVE */ - if (disp_source->prepared && disp_source->last_error) { - wl_display_cancel_read(disp_source->disp); + if (wl_egl_display->prepared && wl_egl_display->last_error) { + wl_display_cancel_read(wl_egl_display->wl_display); return ret; } - if (disp_source->gfd.revents & G_IO_IN) { - if (wl_display_read_events(disp_source->disp) == -1) - _wl_display_print_err(disp_source, "read_event."); - ret = TRUE; + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_read_events(wl_egl_display->wl_display) == -1) + _wl_display_print_err(wl_egl_display, "read_event"); + ret = TPL_TRUE; } else { - wl_display_cancel_read(disp_source->disp); - ret = FALSE; + wl_display_cancel_read(wl_egl_display->wl_display); + ret = TPL_FALSE; } - disp_source->prepared = TPL_FALSE; + wl_egl_display->prepared = TPL_FALSE; return ret; } -static gboolean -_twe_thread_wl_disp_dispatch(GSource *source, GSourceFunc cb, gpointer data) +static tpl_bool_t +__thread_func_disp_dispatch(tpl_gsource *gsource) { - twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source; + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - /* If there is last_error, G_SOURCE_REMOVE should be returned + /* If there is last_error, SOURCE_REMOVE should be returned * to remove the gsource from the main loop. - * This is because disp_source is not valid since last_error was set.*/ - if (disp_source->last_error) { - return G_SOURCE_REMOVE; + * This is because wl_egl_display is not valid since last_error was set.*/ + if (wl_egl_display->last_error) { + return TPL_GSOURCE_REMOVE; } - g_mutex_lock(&disp_source->wl_event_mutex); - if (disp_source->gfd.revents & G_IO_IN) { - if (wl_display_dispatch_queue_pending(disp_source->disp, - disp_source->ev_queue) == -1) { - _wl_display_print_err(disp_source, "dispatch_queue_pending"); + g_mutex_lock(&wl_egl_display->wl_event_mutex); + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, + wl_egl_display->ev_queue) == -1) { + _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); } } - wl_display_flush(disp_source->disp); - g_mutex_unlock(&disp_source->wl_event_mutex); + wl_display_flush(wl_egl_display->wl_display); + g_mutex_unlock(&wl_egl_display->wl_event_mutex); - return G_SOURCE_CONTINUE; + return TPL_GSOURCE_CONTINUE; } static void -_twe_thread_wl_disp_finalize(GSource *source) +__thread_func_disp_finalize(tpl_gsource *source) { - TPL_LOG_T(BACKEND, "finalize| disp_source(%p)", source); + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + + if (wl_egl_display->wl_initialized) + _thread_wl_display_fini(wl_egl_display); + + TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)", + wl_egl_display, source); return; } @@ -581,6 +498,34 @@ static tpl_gsource_functions disp_funcs = { .finalize = __thread_func_disp_finalize, }; +static void* +_thread_init(void *data) +{ + tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; + + if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) { + TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)", + wl_egl_display, wl_egl_display->wl_display); + } + + if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) { + TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED"); + } + + return wl_egl_display; +} + +static void +_thread_fini(void *data) +{ + tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; + + if (wl_egl_display->tdm_initialized) + tpl_gsource_destroy(wl_egl_display->tdm_source); + if (wl_egl_display->wl_initialized) + _thread_wl_display_fini(wl_egl_display); +} + static tpl_result_t __tpl_wl_egl_display_init(tpl_display_t *display) { @@ -599,6 +544,12 @@ __tpl_wl_egl_display_init(tpl_display_t *display) return TPL_ERROR_INVALID_PARAMETER; } + ev_queue = wl_display_create_queue(display->native_handle); + if (!ev_queue) { + TPL_ERR("Failed to create wl_event_queue."); + return TPL_ERROR_OUT_OF_MEMORY; + } + wl_egl_display = (tpl_wl_egl_display_t *) calloc(1, sizeof(tpl_wl_egl_display_t)); if (!wl_egl_display) { @@ -609,6 +560,10 @@ __tpl_wl_egl_display_init(tpl_display_t *display) display->backend.data = wl_egl_display; display->bufmgr_fd = -1; + wl_egl_display->tdm_initialized = TPL_FALSE; + wl_egl_display->wl_initialized = TPL_FALSE; + + wl_egl_display->ev_queue = ev_queue; wl_egl_display->wl_display = (struct wl_display *)display->native_handle; wl_egl_display->last_error = 0; wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled @@ -625,6 +580,8 @@ __tpl_wl_egl_display_init(tpl_display_t *display) wl_egl_display->use_wait_vblank = TPL_FALSE; } + tpl_gmutex_init(&wl_egl_display->wl_event_mutex); + /* Create gthread */ wl_egl_display->thread = tpl_gthread_create("wl_egl_thread", _thread_init, (void *)wl_egl_display); @@ -636,12 +593,11 @@ __tpl_wl_egl_display_init(tpl_display_t *display) wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_display, wl_display_get_fd(wl_egl_display->wl_display), - & - ) - if (!wl_egl_display->twe_display) { + &disp_funcs, TPL_FALSE); + if (!wl_egl_display->disp_source) { TPL_ERR("Failed to add native_display(%p) to thread(%p)", display->native_handle, - wl_egl_display->wl_egl_thread); + wl_egl_display->thread); goto free_display; } @@ -661,7 +617,7 @@ __tpl_wl_egl_display_init(tpl_display_t *display) free_display: if (wl_egl_display->thread) - tpl_gthread_destroy(wl_egl_display->thread); + tpl_gthread_destroy(wl_egl_display->thread, _thread_fini); wl_egl_display->thread = NULL; free(wl_egl_display); @@ -679,28 +635,24 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data; if (wl_egl_display) { - TPL_LOG_T("WL_EGL", "[FINI] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", wl_egl_display, wl_egl_display->thread, wl_egl_display->wl_display); - if (wl_egl_display->twe_display) { - tpl_result_t ret = TPL_ERROR_NONE; - ret = twe_display_del(wl_egl_display->twe_display); - if (ret != TPL_ERROR_NONE) - TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)", - wl_egl_display->twe_display, - wl_egl_display->wl_egl_thread); - wl_egl_display->twe_display = NULL; + if (wl_egl_display->gsource) { + tpl_gsource_destroy(wl_egl_display->gsource); + wl_egl_display->gsource = NULL; } - if (wl_egl_display->wl_egl_thread) { - twe_thread_destroy(wl_egl_display->wl_egl_thread); + if (wl_egl_display->thread) { + tpl_gthread_destroy(wl_egl_display->thread, NULL); wl_egl_display->wl_egl_thread = NULL; } + tpl_gmutex_clear(&wl_egl_display->wl_event_mutex); + free(wl_egl_display); } @@ -708,12 +660,146 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) } static tpl_result_t +_thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display) +{ + struct wl_registry *registry = NULL; + struct wl_event_queue *queue = NULL; + struct wl_display *display_wrapper = NULL; + int ret; + tpl_result_t result = TPL_ERROR_NONE; + + queue = wl_display_create_queue(wl_egl_display->wl_display); + if (!queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_egl_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display); + if (wl_egl_display->ev_queue) { + TPL_ERR("Failed to create wl_queue wl_display(%p)", + wl_egl_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display); + if (!display_wrapper) { + TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)", + wl_egl_display->wl_display); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue); + + registry = wl_display_get_registry(display_wrapper); + if (!registry) { + TPL_ERR("Failed to create wl_registry"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + wl_proxy_wrapper_destroy(display_wrapper); + display_wrapper = NULL; + + if (wl_registry_add_listener(registry, ®istry_listener, + wl_egl_display)) { + TPL_ERR("Failed to wl_registry_add_listener"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue); + if (ret == -1) { + _twe_display_print_err(wl_egl_display, "roundtrip_queue"); + result = TPL_ERROR_INVALID_OPERATION; + goto fini; + } + + /* set tizen_surface_shm's queue as client's private queue */ + if (wl_egl_display->tss) { + wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss, + wl_egl_display->ev_queue); + TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss); + } + + if (wl_egl_display->presentation) { + wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation, + wl_egl_display->ev_queue); + TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.", + wl_egl_display->presentation); + } + + if (wl_egl_display->explicit_sync) { + wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync, + wl_egl_display->ev_queue); + TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.", + wl_egl_display->explicit_sync); + } + + wl_egl_display->wl_initialized = TPL_TRUE; + +fini: + if (display_wrapper) + wl_proxy_wrapper_destroy(display_wrapper); + if (registry) + wl_registry_destroy(registry); + if (queue) + wl_event_queue_destroy(queue); + + return result; +} + +static void +_thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display) +{ + /* If wl_egl_display is in prepared state, cancel it */ + if (wl_egl_display->prepared) { + wl_display_cancel_read(wl_egl_display->wl_display); + wl_egl_display->prepared = TPL_FALSE; + } + + if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, + wl_egl_display->ev_queue) == -1) { + _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); + } + + if (wl_egl_display->tss) { + TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) fini.", wl_egl_display->tss); + tizen_surface_shm_destroy(wl_egl_display->tss); + wl_egl_display->tss = NULL; + } + + if (wl_egl_display->presentation) { + TPL_LOG_T("WL_EGL", "wp_presentation(%p) fini.", wl_egl_display->presentation); + wp_presentation_destroy(wl_egl_display->presentation); + wl_egl_display->presentation = NULL; + } + + if (wl_egl_display->explicit_sync) { + TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) fini.", + wl_egl_display->explicit_sync); + zwp_linux_explicit_synchronization_v1_destroy(wl_egl_display->explicit_sync); + wl_egl_display->explicit_sync = NULL; + } + + wl_event_queue_destroy(wl_egl_display->ev_queue); + + wl_egl_display->wl_initialized = TPL_FALSE; + + TPL_LOG_T("WL_EGL", "[FINI] wl_display(%p)", + wl_egl_display->wl_display); +} + +static tpl_result_t __tpl_wl_egl_display_query_config(tpl_display_t *display, - tpl_surface_type_t surface_type, - int red_size, int green_size, - int blue_size, int alpha_size, - int color_depth, int *native_visual_id, - tpl_bool_t *is_slow) + tpl_surface_type_t surface_type, + int red_size, int green_size, + int blue_size, int alpha_size, + int color_depth, int *native_visual_id, + tpl_bool_t *is_slow) { TPL_ASSERT(display); @@ -738,7 +824,7 @@ __tpl_wl_egl_display_query_config(tpl_display_t *display, static tpl_result_t __tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id, - int alpha_size) + int alpha_size) { TPL_IGNORE(display); TPL_IGNORE(visual_id); @@ -748,18 +834,34 @@ __tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id, static tpl_result_t __tpl_wl_egl_display_get_window_info(tpl_display_t *display, - tpl_handle_t window, int *width, - int *height, tbm_format *format, - int depth, int a_size) + tpl_handle_t window, int *width, + int *height, tbm_format *format, + int depth, int a_size) { tpl_result_t ret = TPL_ERROR_NONE; + struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window; TPL_ASSERT(display); TPL_ASSERT(window); - if ((ret = twe_get_native_window_info(window, width, height, format, a_size)) - != TPL_ERROR_NONE) { - TPL_ERR("Failed to get size info of native_window(%p)", window); + if (!wl_egl_window) { + TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (width) *width = wl_egl_window->width; + if (height) *height = wl_egl_window->height; + if (format) { + struct tizen_private *tizen_private = _get_tizen_private(wl_egl_window); + if (tizen_private && tizen_private->data) { + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + *format = wl_egl_surface->format; + } else { + if (a_size == 8) + *format = TBM_FORMAT_ARGB8888; + else + *format = TBM_FORMAT_XRGB8888; + } } return ret; @@ -767,15 +869,21 @@ __tpl_wl_egl_display_get_window_info(tpl_display_t *display, static tpl_result_t __tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display, - tpl_handle_t pixmap, int *width, - int *height, tbm_format *format) + tpl_handle_t pixmap, int *width, + int *height, tbm_format *format) { tbm_surface_h tbm_surface = NULL; - tbm_surface = twe_get_native_buffer_from_pixmap(pixmap); + if (!pixmap) { + TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap); + return TPL_ERROR_INVALID_PARAMETER; + } + + tbm_surface = wayland_tbm_server_get_surface(NULL, + (struct wl_resource *)pixmap); if (!tbm_surface) { - TPL_ERR("Failed to get tbm_surface_h from native pixmap."); - return TPL_ERROR_INVALID_OPERATION; + TPL_ERR("Failed to get tbm_surface from wayland_tbm."); + return TPL_ERROR_INVALID_PARAMETER; } if (width) *width = tbm_surface_get_width(tbm_surface); @@ -792,7 +900,8 @@ __tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) TPL_ASSERT(pixmap); - tbm_surface = twe_get_native_buffer_from_pixmap(pixmap); + tbm_surface = wayland_tbm_server_get_surface(NULL, + (struct wl_resource *)pixmap); if (!tbm_surface) { TPL_ERR("Failed to get tbm_surface_h from wayland_tbm."); return NULL; @@ -801,6 +910,10 @@ __tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) return tbm_surface; } + + + + static void __cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, void *data) -- 2.7.4 From 3d6e4e5b2a12b6b95a4870c45275deaec1b10038 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 28 Dec 2020 16:00:26 +0900 Subject: [PATCH 05/16] Add new enum type tpl_gsource_type_t. - tpl_gsource_type_t can be classified into 4 enums below SOURCE_TYPE_UNKNOWN : not specified. it will be classified to NORMAL SOURCE_TYPE_NORMAL : normal source SOURCE_TYPE_DISPOSABLE : disposable source SOURCE_TYPE_FINALIZER : disposable source to finalize normal source. Change-Id: I8a570929642e98ea89d20f6c63f21f7d96574c27 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 9 +++++---- src/tpl_utils_gthread.h | 14 ++++++++++---- src/tpl_wl_egl.c | 4 ++-- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 66e7b23..6d84137 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -24,7 +24,7 @@ struct _tpl_gsource { tpl_bool_t is_eventfd; tpl_gsource_functions *gsource_funcs; - tpl_bool_t is_disposable; + tpl_gsource_type_t type; void *data; }; @@ -187,7 +187,7 @@ _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) gsource, gsource->fd, cond); } - if (gsource->is_disposable) + if (gsource->type == SOURCE_TYPE_DISPOSABLE) ret = TPL_GSOURCE_REMOVE; return ret; @@ -219,7 +219,7 @@ static GSourceFuncs _thread_source_funcs = { tpl_gsource * tpl_gsource_create(tpl_gthread *thread, void *data, int fd, - tpl_gsource_functions *funcs, tpl_bool_t is_disposable) + tpl_gsource_functions *funcs, tpl_gsource_type_t type) { tpl_gsource *new_gsource = NULL; @@ -241,12 +241,13 @@ tpl_gsource_create(tpl_gthread *thread, void *data, int fd, new_gsource->is_eventfd = TPL_TRUE; } else { new_gsource->fd = fd; + new_gsource->is_eventfd = TPL_FALSE; } new_gsource->thread = thread; new_gsource->gsource_funcs = funcs; new_gsource->data = data; - new_gsource->is_disposable = is_disposable; + new_gsource->type = type; new_gsource->tag = g_source_add_unix_fd(&new_gsource->gsource, new_gsource->fd, diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 084754c..4da3828 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -19,6 +19,14 @@ typedef void (*tpl_gthread_func) (void *user_data); typedef GMutex tpl_gmutex; typedef GCond tpl_gcond; +typedef enum { + SOURCE_TYPE_UNKNOWN = -1, /* not specified. it will be classified to NORMAL */ + SOURCE_TYPE_NORMAL, /* normal source */ + SOURCE_TYPE_DISPOSABLE, /* disposable source */ + SOURCE_TYPE_FINALIZER, /* disposable source to finalize normal source. */ + SOURCE_TYPE_MAX +} tpl_gsource_type_t; + struct _tpl_gsource_functions { tpl_bool_t (*prepare) (tpl_gsource *source); tpl_bool_t (*check) (tpl_gsource *source); @@ -67,9 +75,7 @@ tpl_gthread_destroy(tpl_gthread *thread, tpl_gthread_func deinit_func); * If it is -1, eventfd is created in this function. * @param funcs Pointer to tpl_gsource_functions. * This structure corresponds to GSourceFuncs, and dispatch and finalize are required. - * @param is_disposable If it is intended to be used for single use, TRUE should be passed, - * and FALSE should be passed to keep it. - * In the case of disposable, it is not necessary to call tpl_gsource_destroy. + * @param type Type of source to be created. @see tpl_gsource_type_t * @return Pointer to newly created tpl_gsource. * * All created tpl_gsource resources will be freed in the thread. @@ -77,7 +83,7 @@ tpl_gthread_destroy(tpl_gthread *thread, tpl_gthread_func deinit_func); */ tpl_gsource * tpl_gsource_create(tpl_gthread *thread, void *data, int fd, - tpl_gsource_functions *funcs, tpl_bool_t is_disposable); + tpl_gsource_functions *funcs, tpl_gsource_type_t type); /** * Detach the passed tpl_gsource from thread and destroy it. diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index 99285b8..4cfb306 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -291,7 +291,7 @@ _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display) tdm_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_display, tdm_display_fd, - &tdm_funcs, TPL_FALSE); + &tdm_funcs, SOURCE_TYPE_NORMAL); if (!tdm_source) { TPL_ERR("Failed to create tdm_gsource\n"); tdm_client_destroy(tdm_client); @@ -593,7 +593,7 @@ __tpl_wl_egl_display_init(tpl_display_t *display) wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_display, wl_display_get_fd(wl_egl_display->wl_display), - &disp_funcs, TPL_FALSE); + &disp_funcs, SOURCE_TYPE_NORMAL); if (!wl_egl_display->disp_source) { TPL_ERR("Failed to add native_display(%p) to thread(%p)", display->native_handle, -- 2.7.4 From d8f9743174f8059825bc6904bc7f47d1a8ed5640 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 28 Dec 2020 21:10:00 +0900 Subject: [PATCH 06/16] Add finalizer source to tpl_gsource - The finalizer source can only be had when the type of tpl_gsource is SOURCE_TYPE_NORMAL. - The finalizer allows a source of NORMAL type to be safely finalized inside a thread. Change-Id: I143b9fe52bc38b65f7649115b3a757b572ce1cd0 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 75 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 65 insertions(+), 10 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 6d84137..5678755 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -25,6 +25,7 @@ struct _tpl_gsource { tpl_gsource_functions *gsource_funcs; tpl_gsource_type_t type; + tpl_gsource *finalizer; void *data; }; @@ -139,6 +140,7 @@ tpl_gthread_destroy(tpl_gthread *thread, tpl_gthread_func deinit_func) thread->func = NULL; free(thread); + thread = NULL; } static gboolean @@ -147,7 +149,10 @@ _thread_source_prepare(GSource *source, gint *time) tpl_gsource *gsource = (tpl_gsource *)source; tpl_bool_t ret = TPL_FALSE; - if (gsource->gsource_funcs->prepare) + if (gsource->type != SOURCE_TYPE_NORMAL) + return ret; + + if (gsource->gsource_funcs && gsource->gsource_funcs->prepare) ret = gsource->gsource_funcs->prepare(gsource); *time = -1; @@ -161,7 +166,10 @@ _thread_source_check(GSource *source) tpl_gsource *gsource = (tpl_gsource *)source; tpl_bool_t ret = TPL_FALSE; - if (gsource->gsource_funcs->check) + if (gsource->type != SOURCE_TYPE_NORMAL) + return ret; + + if (gsource->gsource_funcs && gsource->gsource_funcs->check) ret = gsource->gsource_funcs->check(gsource); return ret; @@ -175,11 +183,24 @@ _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) GIOCondition cond = g_source_query_unix_fd(source, gsource->tag); TPL_IGNORE(cb); - TPL_IGNORE(data); if (cond & G_IO_IN) { - if (gsource->gsource_funcs->dispatch) + if (gsource->gsource_funcs && gsource->gsource_funcs->dispatch) ret = gsource->gsource_funcs->dispatch(gsource); + + if (gsource->type == SOURCE_TYPE_FINALIZER) { + tpl_gsource *del_source = (tpl_gsource *)data; + if (!g_source_is_destroyed(&del_source->gsource)) { + g_mutex_lock(&del_source->thread->thread_mutex); + + g_source_remove_unix_fd(&del_source->gsource, del_source->tag); + g_source_destroy(&del_source->gsource); + g_source_unref(&del_source->gsource); + + g_cond_signal(&del_source->thread->thread_cond); + g_mutex_unlock(&del_source->thread_mutex); + } + } } else { /* When some io errors occur, it is not considered as a critical error. * There may be problems with the screen, but it does not affect the operation. */ @@ -187,8 +208,13 @@ _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) gsource, gsource->fd, cond); } - if (gsource->type == SOURCE_TYPE_DISPOSABLE) + if (gsource->type == SOURCE_TYPE_DISPOSABLE || + gsource->type == SOURCE_TYPE_FINALIZER) { + g_source_remove_unix_fd(&gsource->gsource, gsource->tag); + g_source_destroy(&gsource->gsource); + g_source_unref(&gsource->gsource); ret = TPL_GSOURCE_REMOVE; + } return ret; } @@ -198,7 +224,7 @@ _thread_source_finalize(GSource *source) { tpl_gsource *gsource = (tpl_gsource *)source; - if (gsource->gsource_funcs->finalize) + if (gsource->gsource_funcs && gsource->gsource_funcs->finalize) gsource->gsource_funcs->finalize(gsource); if (gsource->is_eventfd) @@ -208,6 +234,7 @@ _thread_source_finalize(GSource *source) gsource->thread = NULL; gsource->gsource_funcs = NULL; gsource->data = NULL; + gsource->finalizer = NULL; } static GSourceFuncs _thread_source_funcs = { @@ -249,6 +276,13 @@ tpl_gsource_create(tpl_gthread *thread, void *data, int fd, new_gsource->data = data; new_gsource->type = type; + if (new_gsource->type == SOURCE_TYPE_NORMAL) { + tpl_gsource *finalizer = tpl_gsource_create(thread, new_gsource, -1, + NULL, SOURCE_TYPE_FINALIZER); + new_gsource->finalizer = finalizer; + } else + new_gsource->finalizer = NULL; + new_gsource->tag = g_source_add_unix_fd(&new_gsource->gsource, new_gsource->fd, G_IO_IN | G_IO_ERR); @@ -261,9 +295,30 @@ tpl_gsource_create(tpl_gthread *thread, void *data, int fd, void tpl_gsource_destroy(tpl_gsource *source) { - g_source_remove_unix_fd(&source->gsource, source->tag); - g_source_destroy(&source->gsource); - g_source_unref(&source->gsource); + if (g_source_is_destroyed(&source->gsource)) { + TPL_WARN("gsource(%p) already has been destroyed.", + source); + return; + } + + if (source->type == SOURCE_TYPE_NORMAL && + source->finalizer != NULL) { + g_mutex_lock(&source->thread->thread_mutex); + + tpl_gsource_send_event(source->finalizer, 1); + + g_cond_wait(&source->thread->thread_cond, &source->thread->thread_mutex); + g_mutex_unlock(&source->thread->thread_mutex); + } + + + if (!g_source_is_destroyed(&source->gsource) && + !(source->type == SOURCE_TYPE_DISPOSABLE || + source->type == SOURCE_TYPE_FINALIZER)) { + g_source_remove_unix_fd(&source->gsource, source->tag); + g_source_destroy(&source->gsource); + g_source_unref(&source->gsource); + } } void @@ -272,7 +327,7 @@ tpl_gsource_send_event(tpl_gsource *source, uint64_t message) uint64_t value = message; int ret; - ret = write(del_source->event_fd, &value, sizeof(uint64_t)); + ret = write(source->event_fd, &value, sizeof(uint64_t)); if (ret == -1) { TPL_ERR("failed to send devent. tpl_gsource(%p)", source); -- 2.7.4 From 8b70dc9962c7a6107e71e3bd43e69b83859680a3 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 5 Jan 2021 12:48:17 +0900 Subject: [PATCH 07/16] Added missing gsource_destroy for tdm source. Change-Id: I4ee2813d3f7d19c2ee0467d79931eb54ff259386 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index 4cfb306..76947c6 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -616,8 +616,10 @@ __tpl_wl_egl_display_init(tpl_display_t *display) return TPL_ERROR_NONE; free_display: - if (wl_egl_display->thread) + if (wl_egl_display->thread) { + tpl_gsource_destroy(wl_egl_display->tdm_source); tpl_gthread_destroy(wl_egl_display->thread, _thread_fini); + } wl_egl_display->thread = NULL; free(wl_egl_display); @@ -646,6 +648,11 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) wl_egl_display->gsource = NULL; } + if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) { + tpl_gsource_destroy(wl_egl_display->tdm_source); + wl_egl_display->tdm_source = NULL; + } + if (wl_egl_display->thread) { tpl_gthread_destroy(wl_egl_display->thread, NULL); wl_egl_display->wl_egl_thread = NULL; -- 2.7.4 From 1c98c9ca0d023e6c5d0172b5dcf5750e466ffb8f Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Tue, 5 Jan 2021 13:04:29 +0900 Subject: [PATCH 08/16] Added argument to function tpl_gsource_destroy - In an exceptional situation, there are cases where tpl_gsource_destroy should be called inside gthread. - In this case, if a source whose type is SOURCE_TYPE_NORMAL tries to destroy through finalizer, deadlock may occur. Therefore, when destroying in the same thread, destroy_in_thread must be set to FALSE. Change-Id: I53a7b376f6cc12fbab12ec6d7bf6cc8e812eaa87 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 29 +++++++++++++++++------------ src/tpl_utils_gthread.h | 5 ++++- src/tpl_wl_egl.c | 10 +++++----- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 5678755..f178def 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -293,7 +293,7 @@ tpl_gsource_create(tpl_gthread *thread, void *data, int fd, } void -tpl_gsource_destroy(tpl_gsource *source) +tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread) { if (g_source_is_destroyed(&source->gsource)) { TPL_WARN("gsource(%p) already has been destroyed.", @@ -301,20 +301,25 @@ tpl_gsource_destroy(tpl_gsource *source) return; } - if (source->type == SOURCE_TYPE_NORMAL && - source->finalizer != NULL) { - g_mutex_lock(&source->thread->thread_mutex); + if (destroy_in_thread) { + if (source->type == SOURCE_TYPE_NORMAL) { + g_mutex_lock(&source->thread->thread_mutex); - tpl_gsource_send_event(source->finalizer, 1); - - g_cond_wait(&source->thread->thread_cond, &source->thread->thread_mutex); - g_mutex_unlock(&source->thread->thread_mutex); - } + tpl_gsource_send_event(source->finalizer, 1); + g_cond_wait(&source->thread->thread_cond, &source->thread->thread_mutex); + g_mutex_unlock(&source->thread->thread_mutex); + } + } else { + if (source->type == SOURCE_TYPE_NORMAL && + source->finalizer) { + tpl_gsource *finalize = source->finalizer; + g_source_remove_unix_fd(&finalize->gsource, finalize->tag); + g_source_destroy(&finalize->gsource); + g_source_unref(&finalize->gsource); + source->finalizer = NULL; + } - if (!g_source_is_destroyed(&source->gsource) && - !(source->type == SOURCE_TYPE_DISPOSABLE || - source->type == SOURCE_TYPE_FINALIZER)) { g_source_remove_unix_fd(&source->gsource, source->tag); g_source_destroy(&source->gsource); g_source_unref(&source->gsource); diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 4da3828..06d32e6 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -89,9 +89,12 @@ tpl_gsource_create(tpl_gthread *thread, void *data, int fd, * Detach the passed tpl_gsource from thread and destroy it. * * @param source Pointer to tpl_gsource to destroy. + * @param destroy_in_thread TRUE if destroy in thread through eventfd, FALSE otherwise. + * It is valid only when SOURCE_TYPE is NORMAL. + * @see tpl_gsource_type_t tpl_gsource_create */ void -tpl_gsource_destroy(tpl_gsource *source); +tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread); /** * Send an event to dispatch the gsource attached to the thread. diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index 76947c6..c0e1416 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -227,7 +227,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource) tdm_err); TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); - tpl_gsource_destroy(gsource); + tpl_gsource_destroy(gsource, TPL_FALSE); wl_egl_display->tdm_source = NULL; @@ -521,7 +521,7 @@ _thread_fini(void *data) tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; if (wl_egl_display->tdm_initialized) - tpl_gsource_destroy(wl_egl_display->tdm_source); + tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_FALSE); if (wl_egl_display->wl_initialized) _thread_wl_display_fini(wl_egl_display); } @@ -617,7 +617,7 @@ __tpl_wl_egl_display_init(tpl_display_t *display) free_display: if (wl_egl_display->thread) { - tpl_gsource_destroy(wl_egl_display->tdm_source); + tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); tpl_gthread_destroy(wl_egl_display->thread, _thread_fini); } @@ -644,12 +644,12 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) wl_egl_display->wl_display); if (wl_egl_display->gsource) { - tpl_gsource_destroy(wl_egl_display->gsource); + tpl_gsource_destroy(wl_egl_display->gsource, TPL_TRUE); wl_egl_display->gsource = NULL; } if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) { - tpl_gsource_destroy(wl_egl_display->tdm_source); + tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); wl_egl_display->tdm_source = NULL; } -- 2.7.4 From c24f0e8a45fd703c1761df7dee52fbd015398d17 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 6 Jan 2021 15:26:35 +0900 Subject: [PATCH 09/16] Rename tpl_gsource_send_event to tpl_gsource_send_message. Change-Id: Ib52508bd46ed588a9ce7893c9dae2d6f61681bfa Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 14 ++++++++++---- src/tpl_utils_gthread.h | 6 +++--- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index f178def..6329ce9 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -126,7 +126,7 @@ tpl_gthread_destroy(tpl_gthread *thread, tpl_gthread_func deinit_func) { g_mutex_lock(&thread->thread_mutex); thread->deinit_func = deinit_func; - tpl_gsource_send_event(thread->destroy_sig_source, 1); + tpl_gsource_send_message(thread->destroy_sig_source, 1); g_cond_wait(&thread->thread_cond, &thread->thread_mutex); g_main_loop_quit(thread->loop); @@ -305,7 +305,7 @@ tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread) if (source->type == SOURCE_TYPE_NORMAL) { g_mutex_lock(&source->thread->thread_mutex); - tpl_gsource_send_event(source->finalizer, 1); + tpl_gsource_send_message(source->finalizer, 1); g_cond_wait(&source->thread->thread_cond, &source->thread->thread_mutex); g_mutex_unlock(&source->thread->thread_mutex); @@ -327,12 +327,18 @@ tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread) } void -tpl_gsource_send_event(tpl_gsource *source, uint64_t message) +tpl_gsource_send_message(tpl_gsource *source, uint64_t message) { uint64_t value = message; int ret; - ret = write(source->event_fd, &value, sizeof(uint64_t)); + if (!source->is_eventfd) { + TPL_ERR("source is not using eventfd. source(%p) fd(%d)", + source, source->fd); + return; + } + + ret = write(source->fd, &value, sizeof(uint64_t)); if (ret == -1) { TPL_ERR("failed to send devent. tpl_gsource(%p)", source); diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 06d32e6..d24ce8e 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -97,13 +97,13 @@ void tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread); /** - * Send an event to dispatch the gsource attached to the thread. + * Send an message to dispatch the gsource attached to the thread. * - * @param source Pointer to tpl_gsource to send event. + * @param source Pointer to tpl_gsource to send message. * @param message Value to be read in thread.. */ void -tpl_gsource_send_event(tpl_gsource *source, uint64_t message); +tpl_gsource_send_message(tpl_gsource *source, uint64_t message); /** * Get user data from passed tpl_gsource -- 2.7.4 From ff64b35eaa30128d0c8eb80ce9fc8bf220ef8f0a Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 6 Jan 2021 15:46:31 +0900 Subject: [PATCH 10/16] Added message argunment to thread dispatch callback. Change-Id: I37c39032839c0e598006ed29ae31ea5ca1b66207 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 13 ++++++++++++- src/tpl_utils_gthread.h | 2 +- src/tpl_wl_egl.c | 4 +++- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 6329ce9..8bfcb01 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -185,8 +185,19 @@ _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) TPL_IGNORE(cb); if (cond & G_IO_IN) { + ssize_t s; + uint64_t message = 0; + + if (gsource->is_eventfd) { + s = read(gsource->fd, &message, sizeof(uint64_t)); + if (s != sizeof(uint64_t)) { + TPL_ERR("Failed to read from event_fd(%d)", + gsource->fd); + } + } + if (gsource->gsource_funcs && gsource->gsource_funcs->dispatch) - ret = gsource->gsource_funcs->dispatch(gsource); + ret = gsource->gsource_funcs->dispatch(gsource, message); if (gsource->type == SOURCE_TYPE_FINALIZER) { tpl_gsource *del_source = (tpl_gsource *)data; diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index d24ce8e..66ad277 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -30,7 +30,7 @@ typedef enum { struct _tpl_gsource_functions { tpl_bool_t (*prepare) (tpl_gsource *source); tpl_bool_t (*check) (tpl_gsource *source); - tpl_bool_t (*dispatch) (tpl_gsource *source); + tpl_bool_t (*dispatch) (tpl_gsource *source, uint64_t message); void (*finalize) (tpl_gsource *source); }; diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index c0e1416..3079519 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -203,11 +203,13 @@ _check_native_handle_is_wl_display(tpl_handle_t display) } static tpl_bool_t -__thread_func_tdm_dispatch(tpl_gsource *gsource) +__thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) { tpl_wl_egl_display_t *wl_egl_display = NULL; tdm_error tdm_err = TDM_ERROR_NONE; + TPL_IGNORE(message); + wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); if (!wl_egl_display) { TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource); -- 2.7.4 From 21a016dd3305edc56c8d219f5151e584822ffc51 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 6 Jan 2021 13:37:15 +0900 Subject: [PATCH 11/16] Implement backend surface of tpl_wl_egl_thread. Change-Id: I391559ab9ebf6f926a6242795b7b5871b7ecf34f Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.h | 2 +- src/tpl_wl_egl.c | 1399 ++++++++++++++++++++++++++++++++++++----------- 2 files changed, 1069 insertions(+), 332 deletions(-) diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index 66ad277..a30b86f 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -97,7 +97,7 @@ void tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread); /** - * Send an message to dispatch the gsource attached to the thread. + * Send a message to dispatch the gsource attached to the thread. * * @param source Pointer to tpl_gsource to send message. * @param message Value to be read in thread.. diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index 3079519..ba3fb80 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -56,8 +57,8 @@ struct _tpl_wl_egl_display { tpl_bool_t prepared; struct tizen_surface_shm *tss; /* used for surface buffer_flush */ - struct wp_presentation *presentation; - struct zwp_linux_explicit_synchronization_v1 *explicit_sync; + struct wp_presentation *presentation; /* for presentation feedback */ + struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */ }; struct _tpl_wl_egl_surface { @@ -65,59 +66,59 @@ struct _tpl_wl_egl_surface { tbm_surface_queue_h tbm_queue; - struct wl_surface *surf; struct wl_egl_window *wl_egl_window; - struct zwp_linux_surface_synchronization_v1 *surface_sync; - struct tizen_surface_shm_flusher *tss_flusher; + struct wl_surface *wl_surface; + struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ + struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */ + + tdm_client_vblank *vblank; /* surface information */ - int latest_transform; - int rotation; - int format; int render_done_cnt; unsigned int serial; + int width; + int height; + int format; + int latest_transform; + int rotation; + int post_interval; tpl_wl_egl_display_t *wl_egl_display; + tpl_surface_t *tpl_surface; /* the lists for buffer tracing */ tpl_list_t *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */ tpl_list_t *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */ - tpl_list_t *fence_waiting_sources; /* Trace fence_wait_source from ENQUEUE to fence signaled */ + tpl_list_t *fence_waiting_bufferss; /* Trace buffers from ENQUEUE to fence signaled */ tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */ tpl_list_t *render_done_fences; /* for attaching to twe_thread with fences passed by enqueue */ + tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */ - tdm_client_vblank *vblank; - - tbm_fd commit_sync_timeline; - int commit_sync_timestamp; - unsigned int commit_sync_fence_number; + struct { + tpl_gmutex mutex; + int fd; + } commit_sync; - tbm_fd presentation_sync_timeline; - int presentation_sync_timestamp; - int presentation_sync_ts_backup; - int presentation_sync_req_cnt; + struct { + tpl_gmutex mutex; + int fd; + } presentation_sync; - tpl_gmutex pst_mutex; - tpl_gmutex surf_mutex; tpl_gmutex free_queue_mutex; tpl_gcond free_queue_cond; - /* for waiting draw done */ - tpl_bool_t use_sync_fence; - - /* to use zwp_linux_surface_synchronization */ - tpl_bool_t use_surface_sync; + tpl_gmutex surf_mutex; + tpl_gcond surf_cond; + /* for waiting draw done */ + tpl_bool_t use_render_done_fence; tpl_bool_t is_activated; tpl_bool_t reset; /* TRUE if queue reseted by external */ tpl_bool_t need_to_enqueue; - tpl_bool_t rotation_capability; + tpl_bool_t prerotation_capability; tpl_bool_t vblank_done; - tpl_bool_t is_destroying; - tpl_bool_t set_serial_is_used; /* Will be deprecated */ - - int post_interval; + tpl_bool_t set_serial_is_used; }; struct _tpl_wl_egl_bufer { @@ -385,121 +386,6 @@ _wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display, wl_egl_display->last_error = errno; } -static tpl_bool_t -__thread_func_disp_prepare(tpl_gsource *gsource) -{ - tpl_wl_egl_display_t *wl_egl_display = - (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - - /* If this wl_egl_display is already prepared, - * do nothing in this function. */ - if (wl_egl_display->prepared) - return TPL_FALSE; - - /* If there is a last_error, there is no need to poll, - * so skip directly to dispatch. - * prepare -> dispatch */ - if (wl_egl_display->last_error) - return TPL_TRUE; - - while (wl_display_prepare_read_queue(wl_egl_display->wl_display, - wl_egl_display->ev_queue) != 0) { - if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, - wl_egl_display->ev_queue) == -1) { - _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); - } - } - - wl_egl_display->prepared = TPL_TRUE; - - wl_display_flush(wl_egl_display->wl_display); - - return TPL_FALSE; -} - -static tpl_bool_t -__thread_func_disp_check(tpl_gsource *gsource) -{ - tpl_wl_egl_display_t *wl_egl_display = - (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - tpl_bool_t ret = TPL_FALSE; - - if (!wl_egl_display->prepared) - return ret; - - /* If prepared, but last_error is set, - * cancel_read is executed and FALSE is returned. - * That can lead to G_SOURCE_REMOVE by calling disp_prepare again - * and skipping disp_check from prepare to disp_dispatch. - * check -> prepare -> dispatch -> G_SOURCE_REMOVE */ - if (wl_egl_display->prepared && wl_egl_display->last_error) { - wl_display_cancel_read(wl_egl_display->wl_display); - return ret; - } - - if (tpl_gsource_check_io_condition(gsource)) { - if (wl_display_read_events(wl_egl_display->wl_display) == -1) - _wl_display_print_err(wl_egl_display, "read_event"); - ret = TPL_TRUE; - } else { - wl_display_cancel_read(wl_egl_display->wl_display); - ret = TPL_FALSE; - } - - wl_egl_display->prepared = TPL_FALSE; - - return ret; -} - -static tpl_bool_t -__thread_func_disp_dispatch(tpl_gsource *gsource) -{ - tpl_wl_egl_display_t *wl_egl_display = - (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - - /* If there is last_error, SOURCE_REMOVE should be returned - * to remove the gsource from the main loop. - * This is because wl_egl_display is not valid since last_error was set.*/ - if (wl_egl_display->last_error) { - return TPL_GSOURCE_REMOVE; - } - - g_mutex_lock(&wl_egl_display->wl_event_mutex); - if (tpl_gsource_check_io_condition(gsource)) { - if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, - wl_egl_display->ev_queue) == -1) { - _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); - } - } - - wl_display_flush(wl_egl_display->wl_display); - g_mutex_unlock(&wl_egl_display->wl_event_mutex); - - return TPL_GSOURCE_CONTINUE; -} - -static void -__thread_func_disp_finalize(tpl_gsource *source) -{ - tpl_wl_egl_display_t *wl_egl_display = - (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - - if (wl_egl_display->wl_initialized) - _thread_wl_display_fini(wl_egl_display); - - TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)", - wl_egl_display, source); - - return; -} - -static tpl_gsource_functions disp_funcs = { - .prepare = __thread_func_disp_prepare, - .check = __thread_func_disp_check, - .dispatch = __thread_func_disp_dispatch, - .finalize = __thread_func_disp_finalize, -}; - static void* _thread_init(void *data) { @@ -802,6 +688,123 @@ _thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display) wl_egl_display->wl_display); } +static tpl_bool_t +__thread_func_disp_prepare(tpl_gsource *gsource) +{ + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + + /* If this wl_egl_display is already prepared, + * do nothing in this function. */ + if (wl_egl_display->prepared) + return TPL_FALSE; + + /* If there is a last_error, there is no need to poll, + * so skip directly to dispatch. + * prepare -> dispatch */ + if (wl_egl_display->last_error) + return TPL_TRUE; + + while (wl_display_prepare_read_queue(wl_egl_display->wl_display, + wl_egl_display->ev_queue) != 0) { + if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, + wl_egl_display->ev_queue) == -1) { + _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); + } + } + + wl_egl_display->prepared = TPL_TRUE; + + wl_display_flush(wl_egl_display->wl_display); + + return TPL_FALSE; +} + +static tpl_bool_t +__thread_func_disp_check(tpl_gsource *gsource) +{ + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + tpl_bool_t ret = TPL_FALSE; + + if (!wl_egl_display->prepared) + return ret; + + /* If prepared, but last_error is set, + * cancel_read is executed and FALSE is returned. + * That can lead to G_SOURCE_REMOVE by calling disp_prepare again + * and skipping disp_check from prepare to disp_dispatch. + * check -> prepare -> dispatch -> G_SOURCE_REMOVE */ + if (wl_egl_display->prepared && wl_egl_display->last_error) { + wl_display_cancel_read(wl_egl_display->wl_display); + return ret; + } + + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_read_events(wl_egl_display->wl_display) == -1) + _wl_display_print_err(wl_egl_display, "read_event"); + ret = TPL_TRUE; + } else { + wl_display_cancel_read(wl_egl_display->wl_display); + ret = TPL_FALSE; + } + + wl_egl_display->prepared = TPL_FALSE; + + return ret; +} + +static tpl_bool_t +__thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + + TPL_IGNORE(message); + + /* If there is last_error, SOURCE_REMOVE should be returned + * to remove the gsource from the main loop. + * This is because wl_egl_display is not valid since last_error was set.*/ + if (wl_egl_display->last_error) { + return TPL_GSOURCE_REMOVE; + } + + g_mutex_lock(&wl_egl_display->wl_event_mutex); + if (tpl_gsource_check_io_condition(gsource)) { + if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, + wl_egl_display->ev_queue) == -1) { + _wl_display_print_err(wl_egl_display, "dispatch_queue_pending"); + } + } + + wl_display_flush(wl_egl_display->wl_display); + g_mutex_unlock(&wl_egl_display->wl_event_mutex); + + return TPL_GSOURCE_CONTINUE; +} + +static void +__thread_func_disp_finalize(tpl_gsource *gsource) +{ + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); + + if (wl_egl_display->wl_initialized) + _thread_wl_display_fini(wl_egl_display); + + TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)", + wl_egl_display, gsource); + + return; +} + +static tpl_gsource_functions disp_funcs = { + .prepare = __thread_func_disp_prepare, + .check = __thread_func_disp_check, + .dispatch = __thread_func_disp_dispatch, + .finalize = __thread_func_disp_finalize, +}; + static tpl_result_t __tpl_wl_egl_display_query_config(tpl_display_t *display, tpl_surface_type_t surface_type, @@ -861,7 +864,7 @@ __tpl_wl_egl_display_get_window_info(tpl_display_t *display, if (width) *width = wl_egl_window->width; if (height) *height = wl_egl_window->height; if (format) { - struct tizen_private *tizen_private = _get_tizen_private(wl_egl_window); + struct tizen_private *tizen_private = (struct tizen_private *)wl_egl_window->driver_private; if (tizen_private && tizen_private->data) { tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; *format = wl_egl_surface->format; @@ -919,175 +922,331 @@ __tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) return tbm_surface; } - - - - -static void -__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, - void *data) +static tpl_result_t +__tpl_wl_egl_surface_init(tpl_surface_t *surface) { - tpl_surface_t *surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = NULL; tpl_wl_egl_surface_t *wl_egl_surface = NULL; - tpl_bool_t is_activated = TPL_FALSE; - int width, height; + tbm_surface_queue_h tbm_queue = NULL; + tpl_gsource *surf_source = NULL; + tpl_result_t ret = TPL_ERROR_NONE; - surface = (tpl_surface_t *)data; - TPL_CHECK_ON_NULL_RETURN(surface); + struct wl_egl_window *wl_egl_window = + (struct wl_egl_window *)surface->native_handle; - wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); + TPL_ASSERT(surface->native_handle); - /* When the queue is resized, change the reset flag to TPL_TRUE to reflect - * the changed window size at the next frame. */ - width = tbm_surface_queue_get_width(surface_queue); - height = tbm_surface_queue_get_height(surface_queue); - if (surface->width != width || surface->height != height) { - TPL_LOG_T("WL_EGL", - "[QUEUE_RESIZE_CB] wl_egl_surface(%p) tbm_queue(%p) (%dx%d)", - wl_egl_surface, surface_queue, width, height); + wl_egl_display = + (tpl_wl_egl_display_t *)surface->display->backend.data; + if (!wl_egl_display) { + TPL_ERR("Invalid parameter. wl_egl_display(%p)", + wl_egl_display); + return TPL_ERROR_INVALID_PARAMETER; } - /* When queue_reset_callback is called, if is_activated is different from - * its previous state change the reset flag to TPL_TRUE to get a new buffer - * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ - is_activated = twe_surface_check_activated(wl_egl_surface->twe_surface); - if (wl_egl_surface->is_activated != is_activated) { - if (is_activated) { - TPL_LOG_T("WL_EGL", - "[ACTIVATED_CB] wl_egl_surface(%p) tbm_queue(%p)", - wl_egl_surface, surface_queue); - } else { - TPL_LOG_T("WL_EGL", - "[DEACTIVATED_CB] wl_egl_surface(%p) tbm_queue(%p)", - wl_egl_surface, surface_queue); + wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1, + sizeof(tpl_wl_egl_surface_t)); + if (!wl_egl_surface) { + TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface, + -1, surf_funcs, SOURCE_TYPE_NORMAL); + if (!surf_source) { + TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)", + wl_egl_surface); + goto surf_source_create_fail; + } + + surface->backend.data = (void *)wl_egl_surface; + surface->width = wl_egl_window->width; + surface->height = wl_egl_window->height; + surface->rotation = 0; + + wl_egl_surface->tpl_surface = surface; + wl_egl_surface->width = wl_egl_window->width; + wl_egl_surface->height = wl_egl_window->height; + wl_egl_surface->format = surface->format; + + wl_egl_surface->surf_source = surf_source; + wl_egl_surface->wl_egl_window = wl_egl_window; + wl_egl_surface->wl_surface = wl_egl_window->surface; + + wl_egl_surface->wl_egl_display = wl_egl_display; + + wl_egl_surface->reset = TPL_FALSE; + wl_egl_surface->is_activated = TPL_FALSE; + wl_egl_surface->need_to_enqueue = TPL_FALSE; + wl_egl_surface->prerotation_capability = TPL_FALSE; + wl_egl_surface->vblank_done = TPL_TRUE; + wl_egl_surface->use_render_done_fence = TPL_FALSE; + wl_egl_surface->set_serial_is_used = TPL_FALSE; + + wl_egl_surface->latest_transform = 0; + wl_egl_surface->render_done_cnt = 0; + wl_egl_surface->serial = 0; + + wl_egl_surface->vblank = NULL; + wl_egl_surface->tss_flusher = NULL; + wl_egl_surface->surface_sync = NULL; + + wl_egl_surface->post_interval = surface->post_interval; + + wl_egl_surface->commit_sync.fd = -1; + wl_egl_surface->presentation_sync.fd = -1; + + { + struct tizen_private *tizen_private = NULL; + + if (wl_egl_window->driver_private) + tizen_private = (struct tizen_private *)wl_egl_window->driver_private; + else { + tizen_private = tizen_private_create(); + wl_egl_window->driver_private = (void *)tizen_private; + } + + if (tizen_private) { + tizen_private->data = (void *)wl_egl_surface; + tizen_private->rotate_callback = (void *)__cb_rotate_callback; + tizen_private->get_rotation_capability = (void *) + __cb_get_rotation_capability; + tizen_private->set_window_serial_callback = (void *) + __cb_set_window_serial_callback; + tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd; + tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd; + + wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback; + wl_egl_window->resize_callback = (void *)__cb_resize_callback; } } - wl_egl_surface->reset = TPL_TRUE; + tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex); + tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex); - if (surface->reset_cb) - surface->reset_cb(surface->reset_data); -} + tpl_gmutex_init(&wl_egl_surface->free_queue_mutex); + tpl_gmutex_init(&wl_egl_surface->surf_mutex); + tpl_gcond_init(&wl_egl_surface->free_queue_cond); + tpl_gcond_init(&wl_egl_surface->surf_cond); -void __cb_window_rotate_callback(void *data) -{ - tpl_surface_t *surface = (tpl_surface_t *)data; - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - int rotation; + /* Initialize in thread */ + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + tpl_gsource_send_message(wl_egl_surface->surf_source, 1); + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - if (!surface) { - TPL_ERR("Inavlid parameter. surface is NULL."); - return; - } + TPL_ASSERT(wl_egl_surface->tbm_queue); - wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - if (!wl_egl_surface) { - TPL_ERR("Invalid parameter. surface->backend.data is NULL"); - return; - } + TPL_INFO("[SURFACE_INIT]", + "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)", + surface, wl_egl_surface, wl_egl_surface->surf_source); - rotation = twe_surface_get_rotation(wl_egl_surface->twe_surface); + return TPL_ERROR_NONE; - surface->rotation = rotation; +surf_source_create_fail: + free(wl_egl_surface); + surface->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; } -static tpl_result_t -__tpl_wl_egl_surface_init(tpl_surface_t *surface) +static tbm_surface_queue_h +_thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface, + struct wayland_tbm_client *wl_tbm_client, + int num_buffers) { - tpl_wl_egl_display_t *wl_egl_display = NULL; - tpl_wl_egl_surface_t *wl_egl_surface = NULL; tbm_surface_queue_h tbm_queue = NULL; - twe_surface_h twe_surface = NULL; - tpl_result_t ret = TPL_ERROR_NONE; + tbm_bufmgr bufmgr = NULL; + unsigned int capability; - TPL_ASSERT(surface); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); - TPL_ASSERT(surface->native_handle); + struct wl_surface *wl_surface = wl_egl_surface->wl_surface; + int width = wl_egl_surface->width; + int height = wl_egl_surface->height; + int format = wl_egl_surface->format; - wl_egl_display = - (tpl_wl_egl_display_t *)surface->display->backend.data; - if (!wl_egl_display) { - TPL_ERR("Invalid parameter. wl_egl_display(%p)", - wl_egl_display); - return TPL_ERROR_INVALID_PARAMETER; + if (!wl_tbm_client || !wl_surface) { + TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)", + wl_tbm_client, wl_surface); + return NULL; } - wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1, - sizeof(tpl_wl_egl_surface_t)); - if (!wl_egl_surface) { - TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t."); - return TPL_ERROR_OUT_OF_MEMORY; + bufmgr = tbm_bufmgr_init(-1); + capability = tbm_bufmgr_get_capability(bufmgr); + tbm_bufmgr_deinit(bufmgr); + + if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) { + tbm_queue = wayland_tbm_client_create_surface_queue_tiled( + wl_tbm_client, + wl_surface, + num_buffers, + width, + height, + format); + } else { + tbm_queue = wayland_tbm_client_create_surface_queue( + wl_tbm_client, + wl_surface, + num_buffers, + width, + height, + format); + } + + if (tbm_queue) { + TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)", + wl_tbm_client); + return NULL; } - surface->backend.data = (void *)wl_egl_surface; + if (tbm_surface_queue_set_modes( + tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) != + TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return NULL; + } - if (__tpl_object_init(&wl_egl_surface->base, - TPL_OBJECT_SURFACE, - NULL) != TPL_ERROR_NONE) { - TPL_ERR("Failed to initialize backend surface's base object!"); - goto object_init_fail; + if (tbm_surface_queue_add_reset_cb( + tbm_queue, + __cb_tbm_queue_reset_callback, + (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return NULL; } - twe_surface = twe_surface_add(wl_egl_display->wl_egl_thread, - wl_egl_display->twe_display, - surface->native_handle, - surface->format, surface->num_buffers); - if (!twe_surface) { - TPL_ERR("Failed to add native_window(%p) to thread(%p)", - surface->native_handle, wl_egl_display->wl_egl_thread); - goto create_twe_surface_fail; + if (tbm_surface_queue_add_trace_cb( + tbm_queue, + __cb_tbm_queue_trace_callback, + (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register trace callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return NULL; } - tbm_queue = twe_surface_get_tbm_queue(twe_surface); - if (!tbm_queue) { - TPL_ERR("Failed to get tbm_queue from twe_surface(%p)", twe_surface); - goto queue_create_fail; + if (tbm_surface_queue_add_acquirable_cb( + tbm_queue, + __cb_tbm_queue_acquirable_callback, + (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return NULL; } - /* Set reset_callback to tbm_queue */ - if (tbm_surface_queue_add_reset_cb(tbm_queue, - __cb_tbm_surface_queue_reset_callback, - (void *)surface)) { - TPL_ERR("TBM surface queue add reset cb failed!"); - goto add_reset_cb_fail; + return tbm_queue; +} + +static tdm_client_vblank* +_thread_create_tdm_client_vblank(tdm_client *tdm_client) +{ + tdm_client_vblank *vblank = NULL; + tdm_client_output *tdm_output = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; + + if (!tdm_client) { + TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client); + return NULL; } - wl_egl_surface->reset = TPL_FALSE; - wl_egl_surface->twe_surface = twe_surface; - wl_egl_surface->tbm_queue = tbm_queue; - wl_egl_surface->is_activated = TPL_FALSE; - wl_egl_surface->need_to_enqueue = TPL_TRUE; + tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err); + if (!tdm_output || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err); + return NULL; + } + + vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err); + if (!vblank || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err); + return NULL; + } - surface->width = tbm_surface_queue_get_width(tbm_queue); - surface->height = tbm_surface_queue_get_height(tbm_queue); - surface->rotation = twe_surface_get_rotation(twe_surface); + tdm_client_vblank_set_enable_fake(vblank, 1); + tdm_client_vblank_set_sync(vblank, 0); - ret = twe_surface_set_rotate_callback(twe_surface, (void *)surface, - (tpl_surface_cb_func_t)__cb_window_rotate_callback); - if (ret != TPL_ERROR_NONE) { - TPL_WARN("Failed to register rotate callback."); + return vblank; +} + +static void +_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + + wl_egl_surface->tbm_queue = _thread_create_tbm_queue( + wl_egl_surface->wl_surface, + wl_egl_display->wl_tbm_client, + wl_egl_surface->width, + wl_egl_surface->height, + wl_egl_surface->format, + CLIENT_QUEUE_SIZE); + if (!wl_egl_surface->tbm_queue) { + TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)", + wl_egl_surface, wl_egl_display->wl_tbm_client); + return; } - TPL_LOG_T("WL_EGL", - "[INIT1/2]tpl_surface(%p) tpl_wl_egl_surface(%p) twe_surface(%p)", - surface, wl_egl_surface, twe_surface); - TPL_LOG_T("WL_EGL", - "[INIT2/2]size(%dx%d)rot(%d)|tbm_queue(%p)|native_window(%p)", - surface->width, surface->height, surface->rotation, - tbm_queue, surface->native_handle); + TPL_INFO("[QUEUE_CREATION]", + "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)", + wl_egl_surface, wl_egl_surface->wl_surface, + wl_egl_display->wl_tbm_client); + TPL_INFO("[QUEUE_CREATION]", + "tbm_queue(%p) size(%d x %d) X %d format(%d)", + wl_egl_surface->tbm_queue, + wl_egl_surface->width, + wl_egl_surface->height, + CLIENT_QUEUE_SIZE, + wl_egl_surface->format); - return TPL_ERROR_NONE; + wl_egl_surface->vblank = _thread_create_tdm_client_vblank( + wl_egl_display->tdm_client); + if (wl_egl_surface->vblank) { + TPL_INFO("[VBLANK_INIT]", + "wl_egl_surface(%p) tdm_client(%p) vblank(%p)", + wl_egl_surface, wl_egl_display->tdm_client, + wl_egl_surface->vblank); + } -add_reset_cb_fail: -queue_create_fail: - twe_surface_del(twe_surface); -create_twe_surface_fail: -object_init_fail: - free(wl_egl_surface); - surface->backend.data = NULL; - return TPL_ERROR_INVALID_OPERATION; + if (wl_egl_display->tss) { + wl_egl_surface->tss_flusher = + tizen_surface_shm_get_flusher(wl_egl_display->tss, + wl_egl_surface->wl_surface); + } + + if (wl_egl_surface->tss_flusher) { + tizen_surface_shm_flusher_add_listener(surf_source->tss_flusher, + &tss_flusher_listener, + wl_egl_surface); + TPL_INFO("[FLUSHER_INIT]", + "wl_egl_surface(%p) tss_flusher(%p)", + wl_egl_surface, wl_egl_surface->tss_flusher); + } + + if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) { + wl_egl_surface->surface_sync = + zwp_linux_explicit_synchronization_v1_get_synchronization( + wl_egl_display->explicit_sync, wl_egl_surface->wl_surface); + if (wl_egl_surface->surface_sync) { + TPL_INFO("[EXPLICIT_SYNC_INIT]", + "wl_egl_surface(%p) surface_sync(%p)", + wl_egl_surface, wl_egl_surface->surface_sync); + } else { + TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)", + wl_egl_surface); + wl_egl_display->use_explicit_sync = TPL_FALSE; + } + } + + wl_egl_surface->committed_buffers = __tpl_list_alloc(); + wl_egl_surface->in_use_buffers = __tpl_list_alloc(); + wl_egl_surface->fence_waiting_buffers = __tpl_list_alloc(); + wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc(); + wl_egl_surface->render_done_fences = __tpl_list_alloc(); + wl_egl_surface->presentation_feedbacks = __tpl_list_alloc(); } static void @@ -1099,70 +1258,286 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) TPL_ASSERT(surface); TPL_ASSERT(surface->display); + TPL_CHECK_ON_NULL_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW); + wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data; TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); - TPL_OBJECT_LOCK(wl_egl_surface); + wl_egl_display = wl_egl_surface->wl_egl_display; + TPL_CHECK_ON_NULL_RETURN(wl_egl_display); + + TPL_INFO("[SURFACE_FINI][BEGIN]", + "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_egl_surface, + wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue); + + if (wl_egl_surface->surf_source) + tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); + wl_egl_surface->surf_source = NULL; + + if (wl_egl_surface->wl_egl_window) { + struct tizen_private *tizen_private = NULL; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + TPL_INFO("[WL_EGL_WINDOW_FINI]", + "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", + wl_egl_surface, wl_egl_window, + wl_egl_surface->wl_surface); + tizen_private = (struct tizen_private *)wl_egl_window->driver_private; + if (tizen_private) { + tizen_private->set_window_serial_callback = NULL; + tizen_private->rotate_callback = NULL; + tizen_private->get_rotation_capability = NULL; + tizen_private->create_presentation_sync_fd = NULL; + tizen_private->create_commit_sync_fd = NULL; + tizen_private->set_frontbuffer_callback = NULL; + tizen_private->merge_sync_fds = NULL; + tizen_private->data = NULL; + free(tizen_private); + + wl_egl_window->dirver_private = NULL; + } - wl_egl_display = (tpl_wl_egl_display_t *) - surface->display->backend.data; + wl_egl_window->destroy_window_callback = NULL; + wl_egl_window->resize_callback = NULL; - if (wl_egl_display == NULL) { - TPL_ERR("check failed: wl_egl_display == NULL"); - TPL_OBJECT_UNLOCK(wl_egl_surface); - return; + wl_egl_surface->wl_egl_window = NULL; } - if (surface->type == TPL_SURFACE_TYPE_WINDOW) { - TPL_LOG_T("WL_EGL", - "[FINI] wl_egl_surface(%p) native_window(%p) twe_surface(%p)", - wl_egl_surface, surface->native_handle, - wl_egl_surface->twe_surface); - - if (twe_surface_del(wl_egl_surface->twe_surface) - != TPL_ERROR_NONE) { - TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", - wl_egl_surface->twe_surface, - wl_egl_display->wl_egl_thread); + wl_egl_surface->wl_surface = NULL; + wl_egl_surface->wl_egl_display = NULL; + wl_egl_surface->tpl_surface = NULL; + + tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex); + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex); + + tpl_gmutex_lock(&wl_egl_surface->free_queue_mutex); + tpl_gmutex_unlock(&wl_egl_surface->free_queue_mutex); + tpl_gmutex_clear(&wl_egl_surface->free_queue_cond); + + tpl_gmutex_clear(&wl_egl_surface->surf_mutex); + tpl_gcond_clear(&wl_egl_surface->surf_cond); + + g_cond_clear(&wl_egl_surface->free_queue_cond); + g_mutex_clear(&wl_egl_surface->free_queue_mutex); + + TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface); + + free(wl_egl_surface); + surface->backend.data = NULL; +} + +static void +_thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + + TPL_INFO("[SURFACE_FINI]", + "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", + wl_egl_surface, wl_egl_surface->wl_egl_window, + wl_egl_surface->wl_surface); + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + + /* TODO + if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) { + while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) { + tbm_surface_h tbm_surface = + __tpl_list_pop_front(wl_egl_surface->presentation_feedbacks, NULL); + if (tbm_surface_internal_is_valid(tbm_surface)) { + twe_wl_buffer_info *buf_info = NULL; + tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO, + (void **)&buf_info); + if (buf_info && buf_info->presentation_sync_fd != -1 && + buf_info->presentation_feedback) { + + _write_to_eventfd(buf_info->presentation_sync_fd); + close(buf_info->presentation_sync_fd); + buf_info->presentation_sync_fd = -1; + + wp_presentation_feedback_destroy(buf_info->presentation_feedback); + buf_info->presentation_feedback = NULL; + } + } + } + } + + if (wl_egl_surface->presentation_sync.fd != -1) { + _write_to_eventfd(surf_source->presentation_sync.fd); + close(surf_source->presentation_sync.fd); + surf_source->presentation_sync.fd = -1; + } + */ + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + + /* TODO buffer + if (wl_egl_surface->in_use_buffers) { + __tpl_list_free(wl_egl_surface->in_use_buffers, + (tpl_free_func_t)__cb_buffer_remove_from_list); + wl_egl_surface->in_use_buffers = NULL; + } + + if (surf_source->committed_buffers) { + while (!__tpl_list_is_empty(surf_source->committed_buffers)) { + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tbm_surface_h tbm_surface = + __tpl_list_pop_front(surf_source->committed_buffers, + (tpl_free_func_t)__cb_buffer_remove_from_list); + + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + tbm_surface, tsq_err); + } + __tpl_list_free(surf_source->committed_buffers, NULL); + surf_source->committed_buffers = NULL; + } + + if (surf_source->vblank_waiting_buffers) { + while (!__tpl_list_is_empty(surf_source->vblank_waiting_buffers)) { + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tbm_surface_h tbm_surface = + __tpl_list_pop_front(surf_source->vblank_waiting_buffers, + (tpl_free_func_t)__cb_buffer_remove_from_list); + + tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + tbm_surface, tsq_err); } + __tpl_list_free(surf_source->vblank_waiting_buffers, NULL); + surf_source->vblank_waiting_buffers = NULL; + } + + if (surf_source->use_sync_fence && surf_source->fence_waiting_sources) { + while (!__tpl_list_is_empty(surf_source->fence_waiting_sources)) { + twe_fence_wait_source *wait_source = + __tpl_list_pop_front(surf_source->fence_waiting_sources, + NULL); + if (!g_source_is_destroyed(&wait_source->gsource)) { + tbm_surface_internal_unref(wait_source->tbm_surface); + wait_source->tbm_surface = NULL; + + close(wait_source->fence_fd); + wait_source->fence_fd = -1; + + g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag); + g_source_destroy(&wait_source->gsource); + g_source_unref(&wait_source->gsource); + } + } + } + */ + + if (wl_egl_surface->surface_sync) { + TPL_INFO("[SURFACE_SYNC_DESTROY]", "wl_egl_surface(%p) surface_sync(%p)", + wl_egl_surface, wl_egl_surface->surface_sync); + zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync); + wl_egl_surface->surface_sync = NULL; + } + + if (wl_egl_surface->tss_flusher) { + TPL_INFO("[FLUSHER_DESTROY]", + "wl_egl_surface(%p) tss_flusher(%p)", + wl_egl_surface, wl_egl_surface->tss_flusher); + tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher); + wl_egl_surface->tss_flusher = NULL; + } - wl_egl_surface->twe_surface = NULL; + if (wl_egl_surface->vblank) { + TPL_INFO("[VBLANK_DESTROY]", + "wl_egl_surface(%p) vblank(%p)", + wl_egl_surface, wl_egl_surface->vblank); + tdm_client_vblank_destroy(wl_egl_surface->vblank); + wl_egl_surface->vblank = NULL; + } + + if (wl_egl_surface->tbm_queue) { + TPL_INFO("[TBM_QUEUE_DESTROY]", + "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); + tbm_surface_queue_destroy(wl_egl_surface->tbm_queue); wl_egl_surface->tbm_queue = NULL; } - TPL_OBJECT_UNLOCK(wl_egl_surface); - __tpl_object_fini(&wl_egl_surface->base); - free(wl_egl_surface); - surface->backend.data = NULL; + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } +static tpl_bool_t +__thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tpl_result_t res = TPL_ERROR_NONE; + ssize_t s; + uint64_t message = 0; + + wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource); + + /* Initialize surface */ + if (message == 1) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + TPL_DEBUG("wl_egl_surface(%p) initialize message received!", + wl_egl_surface); + _thread_wl_egl_surface_init(wl_egl_surface); + tpl_gcond_signal(wl_egl_surface->surf_cond); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } else if (message == 2) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + TPL_DEBUG("wl_egl_surface(%p) acquirable message received!", + wl_egl_surface); + /* TODO acquirable */ + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } + + return TPL_TRUE; +} + +static void +__thread_func_surf_finalize(tpl_gsource *gsource) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + + wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource); + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + _thread_wl_egl_surface_fini(wl_egl_surface); + + TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%d)", + gsource, wl_egl_surface); +} + +static tpl_gsource_functions surf_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_surf_dispatch, + .finalize = __thread_func_surf_finalize, +}; + static tpl_result_t __tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface, tpl_bool_t set) { tpl_wl_egl_surface_t *wl_egl_surface = NULL; - if (!surface) { - TPL_ERR("Invalid parameter. tpl_surface(%p)", surface); - return TPL_ERROR_INVALID_PARAMETER; - } + TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - if (!wl_egl_surface) { - TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)", - surface, wl_egl_surface); - return TPL_ERROR_INVALID_PARAMETER; - } - if (!wl_egl_surface->twe_surface) { - TPL_ERR("Invalid parameter. wl_egl_surface(%p) twe_surface(%p)", - wl_egl_surface, wl_egl_surface->twe_surface); - return TPL_ERROR_INVALID_PARAMETER; - } + TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); - twe_surface_set_rotation_capablity(wl_egl_surface->twe_surface, - set); + TPL_INFO("[SET_PREROTATION_CAPABILITY]", + "wl_egl_surface(%p) prerotation capability set to [%s]", + wl_egl_surface, (set ? "TRUE" : "FALSE")); + wl_egl_surface->prerotation_capability = set; return TPL_ERROR_NONE; } @@ -1172,26 +1547,17 @@ __tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface, { tpl_wl_egl_surface_t *wl_egl_surface = NULL; - if (!surface) { - TPL_ERR("Invalid parameter. tpl_surface(%p)", surface); - return TPL_ERROR_INVALID_PARAMETER; - } + TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - if (!wl_egl_surface) { - TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)", - surface, wl_egl_surface); - return TPL_ERROR_INVALID_PARAMETER; - } - if (!wl_egl_surface->twe_surface) { - TPL_ERR("Invalid parameter. wl_egl_surface(%p) twe_surface(%p)", - wl_egl_surface, wl_egl_surface->twe_surface); - return TPL_ERROR_INVALID_PARAMETER; - } + TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); - twe_surface_set_post_interval(wl_egl_surface->twe_surface, - post_interval); + TPL_INFO("[SET_POST_INTERVAL]", + "wl_egl_surface(%p) post_interval(%d -> %d)", + wl_egl_surface, wl_egl_surface->post_interval, post_interval); + + wl_egl_surface->post_interval = post_interval; return TPL_ERROR_NONE; } @@ -1507,11 +1873,19 @@ __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) tpl_bool_t __tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy) { - if (!native_dpy) return TPL_FALSE; + struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE); - if (twe_check_native_handle_is_wl_display(native_dpy)) + /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value + is a memory address pointing the structure of wl_display_interface. */ + if (wl_egl_native_dpy == &wl_display_interface) return TPL_TRUE; + if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name, + strlen(wl_display_interface.name)) == 0) { + return TPL_TRUE; + } + return TPL_FALSE; } @@ -1556,3 +1930,366 @@ __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend) __tpl_wl_egl_surface_get_size; } +/* -- BEGIN -- wl_egl_window callback functions */ +static void +__cb_destroy_callback(void *private) +{ + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + + if (!tizen_private) { + TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface"); + return; + } + + wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + if (wl_egl_surface) { + TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.", + wl_egl_surface->wl_egl_window); + TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface."); + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + wl_egl_surface->wl_egl_window->destroy_window_callback = NULL; + wl_egl_surface->wl_egl_window->resize_callback = NULL; + wl_egl_surface->wl_egl_window->driver_private = NULL; + wl_egl_surface->wl_egl_window = NULL; + wl_egl_surface->surf = NULL; + wl_egl_surface->is_destroying = TPL_TRUE; + + tizen_private->set_window_serial_callback = NULL; + tizen_private->rotate_callback = NULL; + tizen_private->get_rotation_capability = NULL; + tizen_private->set_frontbuffer_callback = NULL; + tizen_private->create_commit_sync_fd = NULL; + tizen_private->create_presentation_sync_fd = NULL; + tizen_private->data = NULL; + + free(tizen_private); + tizen_private = NULL; + tpl_gmutex_unlock(&surf_source->surf_mutex); + } +} + +static void +__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + int cur_w, cur_h, req_w, req_h, format; + + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return; + } + + format = wl_egl_surface->format; + cur_w = wl_egl_surface->width; + cur_h = wl_egl_surface->height; + req_w = wl_egl_window->width; + req_h = wl_egl_window->height; + + TPL_INFO("[WINDOW_RESIZE]", + "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)", + wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h); + + if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format) + != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue); + return; + } +} +/* -- END -- wl_egl_window callback functions */ + +/* -- BEGIN -- wl_egl_window tizen private callback functions */ + +/* There is no usecase for using prerotation callback below */ +static void +__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + int rotation = tizen_private->rotation; + + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return; + } + + TPL_INFO("[WINDOW_ROTATE]", + "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)", + wl_egl_surface, wl_egl_window, + wl_egl_surface->rotation, rotation); + + wl_egl_surface->rotation = rotation; +} + +/* There is no usecase for using prerotation callback below */ +static int +__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window, + void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE; + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return rotation_capability; + } + + if (wl_egl_surface->rotation_capability == TPL_TRUE) + rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED; + else + rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED; + + + return rotation_capability; +} + +static void +__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window, + void *private, unsigned int serial) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return; + } + + wl_egl_surface->set_serial_is_used = TPL_TRUE; + wl_egl_surface->serial = serial; +} + +static int +__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + int commit_sync_fd = -1; + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid parameter. wl_egl_surface is NULL", wl_egl_surface); + return -1; + } + + tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); + + if (wl_egl_surface->commit_sync.fd != -1) { + commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); + TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)", + wl_egl_surface->commit_sync.fd, commit_sync_fd); + TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)", + wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd); + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + return commit_sync_fd; + } + + wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC); + if (wl_egl_surface->commit_sync.fd == -1) { + TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)", wl_egl_surface); + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + return -1; + } + + commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); + + TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)", + wl_egl_surface->commit_sync.fd, commit_sync_fd); + TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)", + wl_egl_surface, commit_sync_fd); + + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + + return commit_sync_fd; +} + +static int +__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + int presentation_sync_fd = -1; + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid parameter. wl_egl_surface is NULL", wl_egl_surface); + return -1; + } + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + if (wl_egl_surface->presentation_sync.fd != -1) { + presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); + TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)", + wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", + wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + return presentation_sync_fd; + } + + wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC); + if (wl_egl_surface->presentation_sync.fd == -1) { + TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)", wl_egl_surface); + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + return -1; + } + + presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); + TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)", + wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", + wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + + return presentation_sync_fd; +} +/* -- END -- wl_egl_window tizen private callback functions */ + +/* -- BEGIN -- tizen_surface_shm_flusher_listener */ +static void __cb_tss_flusher_flush_callback(void *data, + struct tizen_surface_shm_flusher *tss_flusher) +{ + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); + + tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue); + return; + } +} + +static void __cb_tss_flusher_free_flush_callback(void *data, + struct tizen_surface_shm_flusher *tss_flusher) +{ + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); + + tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue); + return; + } +} + +static const struct tizen_surface_shm_flusher_listener +tss_flusher_listener = { + __cb_tss_flusher_flush_callback, + __cb_tss_flusher_free_flush_callback +}; +/* -- END -- tizen_surface_shm_flusher_listener */ + + +/* -- BEGIN -- tbm_surface_queue callback funstions */ +static void +__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, + void *data) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = NULL; + tpl_surface_t *surface = NULL; + tpl_bool_t is_activated = TPL_FALSE; + int width, height; + + wl_egl_surface = (tpl_wl_egl_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + wl_egl_display = wl_egl_surface->wl_egl_display; + TPL_CHECK_ON_NULL_RETURN(wl_egl_display); + + surface = wl_egl_surface->tpl_surface; + TPL_CHECK_ON_NULL_RETURN(surface); + + /* When the queue is resized, change the reset flag to TPL_TRUE to reflect + * the changed window size at the next frame. */ + width = tbm_surface_queue_get_width(tbm_queue); + height = tbm_surface_queue_get_height(tbm_queue); + if (surface->width != width || surface->height != height) { + TPL_INFO("[QUEUE_RESIZE]", + "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)", + wl_egl_surface, tbm_queue, + surface->width, surface->height, width, height); + } + + /* When queue_reset_callback is called, if is_activated is different from + * its previous state change the reset flag to TPL_TRUE to get a new buffer + * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ + is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client, + wl_egl_surface->tbm_queue); + if (wl_egl_surface->is_activated != is_activated) { + if (is_activated) { + TPL_INFO("[ACTIVATED]", + "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); + } else { + TPL_LOG_T("[DEACTIVATED]", + " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); + } + } + + wl_egl_surface->reset = TPL_TRUE; + + if (surface->reset_cb) + surface->reset_cb(surface->reset_data); +} + +static void __cb_tbm_queue_trace_callback(tbm_surface_queue_h tbm_queue, + tbm_surface_h tbm_surface, + tbm_surface_queue_trace trace, + void *data) +{ + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; + + /* TODO */ +} + +static void +__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue, + void *data) +{ + TPL_IGNORE(tbm_queue); + + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + + tpl_gsource_send_message(wl_egl_surface->surf_source, 2); + + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); +} + +/* -- END -- tbm_surface_queue callback funstions */ -- 2.7.4 From c27c851435b24ea64070e712a7aee07f2492ed26 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 20 Jan 2021 19:58:00 +0900 Subject: [PATCH 12/16] Implement related to wl_egl_buffer Change-Id: I89ecb28818c6a209455c5dd9f1d90e72a4a038a9 Signed-off-by: Joonbum Ko --- src/tpl_wl_egl.c | 1471 ++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 1164 insertions(+), 307 deletions(-) diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index ba3fb80..47ffea2 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -27,14 +27,15 @@ #include "tpl_utils_gthread.h" -static int buffer_info_key; -#define KEY_BUFFER_INFO (unsigned long)(&buffer_info_key) +static int wl_egl_buffer_key; +#define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key) /* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */ #define CLIENT_QUEUE_SIZE 3 typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t; typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t; +typedef struct _tpl_wl_egl_buffer tpl_wl_egl_buffer_t; struct _tpl_wl_egl_display { tpl_gsource *disp_source; @@ -90,9 +91,8 @@ struct _tpl_wl_egl_surface { /* the lists for buffer tracing */ tpl_list_t *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */ tpl_list_t *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */ - tpl_list_t *fence_waiting_bufferss; /* Trace buffers from ENQUEUE to fence signaled */ + tpl_list_t *fence_waiting_buffers; /* Trace buffers from ENQUEUE to fence signaled */ tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */ - tpl_list_t *render_done_fences; /* for attaching to twe_thread with fences passed by enqueue */ tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */ struct { @@ -121,14 +121,16 @@ struct _tpl_wl_egl_surface { tpl_bool_t set_serial_is_used; }; -struct _tpl_wl_egl_bufer { - tbm_surface_h tbm_surface; +struct _tpl_wl_egl_buffer { + tbm_surface_h tbm_surface; struct wl_proxy *wl_buffer; - int dx, dy; - int width, height; + int dx, dy; /* position to attach to wl_surface */ + int width, height; /* size to attach to wl_surface */ - tpl_wl_egl_surface_t *wl_egl_surface; + /* for damage region */ + int num_rects; + int *rects; /* for wayland_tbm_client_set_buffer_transform */ int w_transform; @@ -137,12 +139,6 @@ struct _tpl_wl_egl_bufer { /* for wl_surface_set_buffer_transform */ int transform; - /* for damage region */ - int num_rects; - int *rects; - - unsigned int commit_sync_ts_backup; - /* for wayland_tbm_client_set_buffer_serial */ unsigned int serial; @@ -161,22 +157,27 @@ struct _tpl_wl_egl_bufer { /* each buffers own its release_fence_fd, until it passes ownership * to it to EGL */ - int release_fence_fd; + int32_t release_fence_fd; - /* each buffers own its acquire_fence_fd. until it passes ownership - * to it to SERVER */ - int acquire_fence_fd; -}; + /* each buffers own its acquire_fence_fd. + * If it use zwp_linux_buffer_release_v1 the ownership of this fd + * will be passed to display server + * Otherwise it will be used as a fence waiting for render done + * on tpl thread */ + int32_t acquire_fence_fd; -struct sync_info { - tbm_surface_h tbm_surface; - int sync_fd; -}; + /* Fd to send a signal when wl_surface_commit with this buffer */ + int32_t commit_sync_fd; + + /* to get presentation feedback from display server */ + struct wp_presentation_feedback *presentation_feedback; + + /* Fd to send a siganl when receive the + * presentation feedback from display server */ + int32_t presentation_sync_fd; + + tpl_gsource *waiting_source; -struct _twe_fence_wait_source { - tpl_gsource *fence_source; - tbm_fd fence_fd; - tbm_surface_h tbm_surface; tpl_wl_egl_surface_t *wl_egl_surface; }; @@ -222,7 +223,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) /* If an error occurs in tdm_client_handle_events, it cannot be recovered. * When tdm_source is no longer available due to an unexpected situation, - * twe_thread must remove it from the thread and destroy it. + * wl_egl_thread must remove it from the thread and destroy it. * In that case, tdm_vblank can no longer be used for surfaces and displays * that used this tdm_source. */ if (tdm_err != TDM_ERROR_NONE) { @@ -244,12 +245,11 @@ static void __thread_func_tdm_finalize(tpl_gsource *gsource) { tpl_wl_egl_display_t *wl_egl_display = NULL; - twe_tdm_source *tdm_source = (twe_tdm_source *)source; wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource); - TPL_LOG_T("WL_EGL", "tdm_destroy| tdm_source(%p) tdm_client(%p)", - gsource, wl_egl_display->tdm_client); + TPL_LOG_T("WL_EGL", "tdm_destroy| wl_egl_display(%p) tdm_client(%p)", + wl_egl_display, wl_egl_display->tdm_client); if (wl_egl_display->tdm_client) { tdm_client_destroy(wl_egl_display->tdm_client); @@ -608,7 +608,7 @@ _thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display) ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue); if (ret == -1) { - _twe_display_print_err(wl_egl_display, "roundtrip_queue"); + _wl_display_print_err(wl_egl_display, "roundtrip_queue"); result = TPL_ERROR_INVALID_OPERATION; goto fini; } @@ -769,7 +769,7 @@ __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message) return TPL_GSOURCE_REMOVE; } - g_mutex_lock(&wl_egl_display->wl_event_mutex); + tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); if (tpl_gsource_check_io_condition(gsource)) { if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display, wl_egl_display->ev_queue) == -1) { @@ -778,7 +778,7 @@ __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message) } wl_display_flush(wl_egl_display->wl_display); - g_mutex_unlock(&wl_egl_display->wl_event_mutex); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return TPL_GSOURCE_CONTINUE; } @@ -922,6 +922,25 @@ __tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) return tbm_surface; } +tpl_bool_t +__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy) +{ + struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE); + + /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value + is a memory address pointing the structure of wl_display_interface. */ + if (wl_egl_native_dpy == &wl_display_interface) + return TPL_TRUE; + + if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name, + strlen(wl_display_interface.name)) == 0) { + return TPL_TRUE; + } + + return TPL_FALSE; +} + static tpl_result_t __tpl_wl_egl_surface_init(tpl_surface_t *surface) { @@ -1119,16 +1138,6 @@ _thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface, return NULL; } - if (tbm_surface_queue_add_trace_cb( - tbm_queue, - __cb_tbm_queue_trace_callback, - (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to register trace callback to tbm_surface_queue(%p)", - tbm_queue); - tbm_surface_queue_destroy(tbm_queue); - return NULL; - } - if (tbm_surface_queue_add_acquirable_cb( tbm_queue, __cb_tbm_queue_acquirable_callback, @@ -1218,7 +1227,7 @@ _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface) } if (wl_egl_surface->tss_flusher) { - tizen_surface_shm_flusher_add_listener(surf_source->tss_flusher, + tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher, &tss_flusher_listener, wl_egl_surface); TPL_INFO("[FLUSHER_INIT]", @@ -1245,7 +1254,6 @@ _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface) wl_egl_surface->in_use_buffers = __tpl_list_alloc(); wl_egl_surface->fence_waiting_buffers = __tpl_list_alloc(); wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc(); - wl_egl_surface->render_done_fences = __tpl_list_alloc(); wl_egl_surface->presentation_feedbacks = __tpl_list_alloc(); } @@ -1345,100 +1353,87 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - /* TODO if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) { while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) { tbm_surface_h tbm_surface = __tpl_list_pop_front(wl_egl_surface->presentation_feedbacks, NULL); if (tbm_surface_internal_is_valid(tbm_surface)) { - twe_wl_buffer_info *buf_info = NULL; - tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO, - (void **)&buf_info); - if (buf_info && buf_info->presentation_sync_fd != -1 && - buf_info->presentation_feedback) { - - _write_to_eventfd(buf_info->presentation_sync_fd); - close(buf_info->presentation_sync_fd); - buf_info->presentation_sync_fd = -1; - - wp_presentation_feedback_destroy(buf_info->presentation_feedback); - buf_info->presentation_feedback = NULL; + tpl_wl_egl_buffer_t *wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + if (wl_egl_buffer && + wl_egl_buffer->presentation_sync_fd != -1 && + wl_egl_buffer->presentation_feedback) { + + _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; + + wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback); + wl_egl_buffer->presentation_feedback = NULL; } } } } if (wl_egl_surface->presentation_sync.fd != -1) { - _write_to_eventfd(surf_source->presentation_sync.fd); - close(surf_source->presentation_sync.fd); - surf_source->presentation_sync.fd = -1; + _write_to_eventfd(wl_egl_surface->presentation_sync.fd); + close(wl_egl_surface->presentation_sync.fd); + wl_egl_surface->presentation_sync.fd = -1; } - */ + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - /* TODO buffer if (wl_egl_surface->in_use_buffers) { __tpl_list_free(wl_egl_surface->in_use_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); wl_egl_surface->in_use_buffers = NULL; } - if (surf_source->committed_buffers) { - while (!__tpl_list_is_empty(surf_source->committed_buffers)) { + if (wl_egl_surface->committed_buffers) { + while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) { tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; tbm_surface_h tbm_surface = - __tpl_list_pop_front(surf_source->committed_buffers, + __tpl_list_pop_front(wl_egl_surface->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); } - __tpl_list_free(surf_source->committed_buffers, NULL); - surf_source->committed_buffers = NULL; + __tpl_list_free(wl_egl_surface->committed_buffers, NULL); + wl_egl_surface->committed_buffers = NULL; } - if (surf_source->vblank_waiting_buffers) { - while (!__tpl_list_is_empty(surf_source->vblank_waiting_buffers)) { + if (wl_egl_surface->vblank_waiting_buffers) { + while (!__tpl_list_is_empty(wl_egl_surface->vblank_waiting_buffers)) { tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; tbm_surface_h tbm_surface = - __tpl_list_pop_front(surf_source->vblank_waiting_buffers, + __tpl_list_pop_front(wl_egl_surface->vblank_waiting_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); - tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); } - __tpl_list_free(surf_source->vblank_waiting_buffers, NULL); - surf_source->vblank_waiting_buffers = NULL; + __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL); + wl_egl_surface->vblank_waiting_buffers = NULL; } - if (surf_source->use_sync_fence && surf_source->fence_waiting_sources) { - while (!__tpl_list_is_empty(surf_source->fence_waiting_sources)) { - twe_fence_wait_source *wait_source = - __tpl_list_pop_front(surf_source->fence_waiting_sources, + if (wl_egl_surface->fence_waiting_buffers) { + while (!__tpl_list_is_empty(wl_egl_surface->fence_waiting_buffers)) { + tbm_surface_h tbm_surface = + __tpl_list_pop_front(wl_egl_surface->fence_waiting_buffers, NULL); - if (!g_source_is_destroyed(&wait_source->gsource)) { - tbm_surface_internal_unref(wait_source->tbm_surface); - wait_source->tbm_surface = NULL; - - close(wait_source->fence_fd); - wait_source->fence_fd = -1; - - g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag); - g_source_destroy(&wait_source->gsource); - g_source_unref(&wait_source->gsource); - } + /* TODO */ } } - */ if (wl_egl_surface->surface_sync) { - TPL_INFO("[SURFACE_SYNC_DESTROY]", "wl_egl_surface(%p) surface_sync(%p)", + TPL_INFO("[SURFACE_SYNC_DESTROY]", + "wl_egl_surface(%p) surface_sync(%p)", wl_egl_surface, wl_egl_surface->surface_sync); zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync); wl_egl_surface->surface_sync = NULL; @@ -1493,7 +1488,7 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_lock(&wl_egl_surface->surf_mutex); TPL_DEBUG("wl_egl_surface(%p) acquirable message received!", wl_egl_surface); - /* TODO acquirable */ + _thread_surface_queue_acquire(wl_egl_surface); tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } @@ -1562,171 +1557,167 @@ __tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface, return TPL_ERROR_NONE; } -static tpl_result_t -__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface, - int num_rects, const int *rects, tbm_fd sync_fence) +static tpl_bool_t +__tpl_wl_egl_surface_validate(tpl_surface_t *surface) { + tpl_bool_t retval = TPL_TRUE; + TPL_ASSERT(surface); - TPL_ASSERT(surface->display); - TPL_ASSERT(tbm_surface); - TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); + TPL_ASSERT(surface->backend.data); tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *) surface->backend.data; - tbm_surface_queue_error_e tsq_err; - tpl_result_t ret = TPL_ERROR_NONE; - int bo_name = 0; - - TPL_OBJECT_LOCK(wl_egl_surface); - - bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); + (tpl_wl_egl_surface_t *)surface->backend.data; - if (!wl_egl_surface) { - TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)", - surface, wl_egl_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_OBJECT_UNLOCK(wl_egl_surface); - return TPL_ERROR_INVALID_PARAMETER; - } + retval = !(wl_egl_surface->reset); - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", - tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_OBJECT_UNLOCK(wl_egl_surface); - return TPL_ERROR_INVALID_PARAMETER; - } + return retval; +} - TRACE_MARK("[ENQ] BO_NAME:%d", bo_name); +void +__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) +{ + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; - TPL_LOG_T("WL_EGL", - "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)", - wl_egl_surface, tbm_surface, bo_name, sync_fence); + if (width) + *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); + if (height) + *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); +} - /* If there are received region information, - * save it to buf_info in tbm_surface user_data using below API. */ - if (num_rects && rects) { - ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects); - if (ret != TPL_ERROR_NONE) { - TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)", - num_rects, rects); - } - } +#define CAN_DEQUEUE_TIMEOUT_MS 10000 - if (!wl_egl_surface->need_to_enqueue || - !twe_surface_check_commit_needed(wl_egl_surface->twe_surface, - tbm_surface)) { - TPL_LOG_T("WL_EGL", - "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", - ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_OBJECT_UNLOCK(wl_egl_surface); - return TPL_ERROR_NONE; - } +tpl_result_t +_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and - * commit if surface->frontbuffer that is already set and the tbm_surface - * client want to enqueue are the same. - */ - if (surface->is_frontbuffer_mode) { - /* The first buffer to be activated in frontbuffer mode must be - * committed. Subsequence frames do not need to be committed because - * the buffer is already displayed. - */ - if (surface->frontbuffer == tbm_surface) - wl_egl_surface->need_to_enqueue = TPL_FALSE; + _print_buffer_lists(wl_egl_surface); - if (sync_fence != -1) { - close(sync_fence); - sync_fence = -1; - } + if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue)) + != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)", + wl_egl_surface->tbm_queue, tsq_err); + return TPL_ERROR_INVALID_OPERATION; } - if (sync_fence != -1) { - ret = twe_surface_set_sync_fd(wl_egl_surface->twe_surface, - tbm_surface, sync_fence); - if (ret != TPL_ERROR_NONE) { - TPL_WARN("Failed to set sync fd (%d). But it will continue.", - sync_fence); + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + if (wl_egl_surface->committed_buffers) { + while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) { + tbm_surface_h tbm_surface = + __tpl_list_pop_front(wl_egl_surface->committed_buffers, + (tpl_free_func_t)__cb_buffer_remove_from_list); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + tbm_surface, tsq_err); } } + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - tbm_surface_internal_unref(tbm_surface); - TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d", - tbm_surface, surface, tsq_err); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_OBJECT_UNLOCK(wl_egl_surface); - return TPL_ERROR_INVALID_OPERATION; - } - - tbm_surface_internal_unref(tbm_surface); - - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_OBJECT_UNLOCK(wl_egl_surface); + TPL_INFO("[FORCE_FLUSH]", + "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); return TPL_ERROR_NONE; } -static tpl_bool_t -__tpl_wl_egl_surface_validate(tpl_surface_t *surface) +static void +_wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer, + tpl_wl_egl_surface_t *wl_egl_surface) { - tpl_bool_t retval = TPL_TRUE; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + struct tizen_private *tizen_private = + (struct tizen_private *)wl_egl_window->driver_private; - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); + TPL_ASSERT(tizen_private); - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)surface->backend.data; + wl_egl_buffer->draw_done = TPL_FALSE; + wl_egl_buffer->need_to_commit = TPL_TRUE; - retval = !(wl_egl_surface->reset); + wl_egl_buffer->acquire_fence_fd = -1; + wl_egl_buffer->release_fence_fd = -1; + wl_egl_buffer->commit_sync_fd = -1; + wl_egl_buffer->presentation_sync_fd = -1; - return retval; -} + wl_egl_buffer->presentation_feedback = NULL; + wl_egl_buffer->buffer_release = NULL; -static tpl_result_t -__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface) -{ - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + wl_egl_buffer->transform = tizen_private->transform; - wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - if (!wl_egl_surface) { - TPL_ERR("Invalid backend surface. surface(%p) wl_egl_surface(%p)", - surface, wl_egl_surface); - return TPL_ERROR_INVALID_PARAMETER; + if (wl_egl_buffer->w_transform != tizen_private->window_transform) { + wl_egl_buffer->w_transform = tizen_private->window_transform; + wl_egl_buffer->w_rotated = TPL_TRUE; } - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); - return TPL_ERROR_INVALID_PARAMETER; + if (wl_egl_surface->set_serial_is_used) { + wl_egl_buffer->serial = wl_egl_surface->serial; + } else { + wl_egl_buffer->serial = ++tizen_private->serial; } - tbm_surface_internal_unref(tbm_surface); + if (wl_egl_buffer->rects) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; + } +} - tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to release tbm_surface(%p) surface(%p)", - tbm_surface, surface); - return TPL_ERROR_INVALID_OPERATION; +static tpl_wl_egl_buffer_t * +_get_wl_egl_buffer(tbm_surface_h tbm_surface) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER, + (void **)&wl_egl_buffer); + return wl_egl_buffer; +} + +static tpl_wl_egl_buffer_t * +_wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, + tbm_surface_h tbm_surface) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tpl_bool_t is_new_buffer = TPL_FALSE; + + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + + if (!wl_egl_buffer) { + wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t)); + TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL); + + tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER, + (tbm_data_free)__cb_wl_egl_buffer_free); + tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER, + wl_egl_buffer); + is_new_buffer = TPL_TRUE; + + wl_egl_buffer->wl_buffer = NULL; + wl_egl_buffer->tbm_surface = tbm_surface; + wl_egl_buffer->wl_egl_surface = wl_egl_surface; + + wl_egl_buffer->dx = wl_egl_window->dx; + wl_egl_buffer->dy = wl_egl_window->dy; + wl_egl_buffer->width = tbm_surface_get_width(tbm_surface); + wl_egl_buffer->height = tbm_surface_get_height(tbm_surface); + + TPL_INFO("[WL_EGL_BUFFER_CREATE]", + "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, wl_egl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); } - TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)", - surface, tbm_surface); + _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface); - return TPL_ERROR_NONE; + return wl_egl_buffer; } -#define CAN_DEQUEUE_TIMEOUT_MS 10000 - static tbm_surface_h __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, - tbm_fd *sync_fence) + int32_t *release_fence) { TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); @@ -1734,35 +1725,34 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_ASSERT(surface->display->backend.data); TPL_OBJECT_CHECK_RETURN(surface, NULL); - tbm_surface_h tbm_surface = NULL; tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)surface->display->backend.data; - tbm_surface_queue_error_e tsq_err = 0; - int is_activated = 0; - int bo_name = 0; - tpl_result_t lock_ret = TPL_FALSE; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_bool_t is_activated = 0; + int bo_name = 0; + tbm_surface_h tbm_surface = NULL; TPL_OBJECT_UNLOCK(surface); tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS); TPL_OBJECT_LOCK(surface); - /* After the can dequeue state, call twe_display_lock to prevent other + /* After the can dequeue state, lock the wl_event_mutex to prevent other * events from being processed in wayland_egl_thread * during below dequeue procedure. */ - lock_ret = twe_display_lock(wl_egl_display->twe_display); + tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { - TPL_ERR("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", - wl_egl_surface->tbm_queue, surface); - if (twe_surface_queue_force_flush(wl_egl_surface->twe_surface) - != TPL_ERROR_NONE) { + TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", + wl_egl_surface->tbm_queue, surface); + if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) { TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)", wl_egl_surface->tbm_queue, surface); - if (lock_ret == TPL_ERROR_NONE) - twe_display_unlock(wl_egl_display->twe_display); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return NULL; } else { tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; @@ -1772,8 +1762,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)", wl_egl_surface->tbm_queue, surface); - if (lock_ret == TPL_ERROR_NONE) - twe_display_unlock(wl_egl_display->twe_display); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return NULL; } @@ -1787,11 +1776,16 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, * DEACTIVATED state means composite mode. Client's buffer will be displayed by compositor(E20) with compositing. */ - is_activated = twe_surface_check_activated(wl_egl_surface->twe_surface); + is_activated = wayland_tbm_client_queue_check_activate( + wl_egl_display->wl_tbm_client, + wl_egl_surface->tbm_queue); + wl_egl_surface->is_activated = is_activated; surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); + wl_egl_surface->width = surface->width; + wl_egl_surface->height = surface->height; if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) { /* If surface->frontbuffer is already set in frontbuffer mode, @@ -1799,17 +1793,15 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, * otherwise dequeue the new buffer after initializing * surface->frontbuffer to NULL. */ if (is_activated && !wl_egl_surface->reset) { + bo_name = _get_tbm_surface_bo_name(surface->frontbuffer); + TPL_LOG_T("WL_EGL", "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", - surface->frontbuffer, - tbm_bo_export(tbm_surface_internal_get_bo( - surface->frontbuffer, 0))); + surface->frontbuffer, bo_name); TRACE_ASYNC_BEGIN((int)surface->frontbuffer, "[DEQ]~[ENQ] BO_NAME:%d", - tbm_bo_export(tbm_surface_internal_get_bo( - surface->frontbuffer, 0))); - if (lock_ret == TPL_ERROR_NONE) - twe_display_unlock(wl_egl_display->twe_display); + bo_name); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return surface->frontbuffer; } else { surface->frontbuffer = NULL; @@ -1822,25 +1814,28 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue, &tbm_surface); if (!tbm_surface) { - TPL_ERR("Failed to dequeue from tbm_queue(%p) surface(%p)| tsq_err = %d", - wl_egl_surface->tbm_queue, surface, tsq_err); - if (lock_ret == TPL_ERROR_NONE) - twe_display_unlock(wl_egl_display->twe_display); + TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d", + wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return NULL; } tbm_surface_internal_ref(tbm_surface); - /* If twe_surface_get_buffer_release_fence_fd return -1, + bo_name = _get_tbm_surface_bo_name(tbm_surface); + + wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer."); + + /* If wl_egl_buffer->release_fence_fd is -1, * the tbm_surface can be used immediately. * If not, user(EGL) have to wait until signaled. */ - if (sync_fence) { - *sync_fence = twe_surface_get_buffer_release_fence_fd( - wl_egl_surface->twe_surface, tbm_surface); + if (release_fence && wl_egl_surface->surface_sync) { + *release_fence = wl_egl_buffer->release_fence_fd; + TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)", + wl_egl_surface, wl_egl_buffer, *release_fence); } - bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); - if (surface->is_frontbuffer_mode && is_activated) surface->frontbuffer = tbm_surface; @@ -1848,67 +1843,636 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name); TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)", - tbm_surface, bo_name, sync_fence ? *sync_fence : -1); + TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + tbm_surface, bo_name, release_fence ? *release_fence : -1); - if (lock_ret == TPL_ERROR_NONE) - twe_display_unlock(wl_egl_display->twe_display); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return tbm_surface; } -void -__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) +static tpl_result_t +__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface) { - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)surface->backend.data; + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); - if (width) - *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); - if (height) - *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); -} + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_PARAMETER; + } -tpl_bool_t -__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy) -{ - struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy; - TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE); + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + /* Stop tracking of this canceled tbm_surface */ + __tpl_list_remove_data(wl_egl_surface->in_use_buffers, + (void *)tbm_surface, TPL_FIRST, NULL); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value - is a memory address pointing the structure of wl_display_interface. */ - if (wl_egl_native_dpy == &wl_display_interface) - return TPL_TRUE; + tbm_surface_internal_unref(tbm_surface); - if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name, - strlen(wl_display_interface.name)) == 0) { - return TPL_TRUE; + tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to release tbm_surface(%p) surface(%p)", + tbm_surface, surface); + return TPL_ERROR_INVALID_OPERATION; } - return TPL_FALSE; + TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] wl_egl_surface(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); + + return TPL_ERROR_NONE; } -void -__tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend) +static tpl_result_t +__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, int32_t acquire_fence) { - TPL_ASSERT(backend); - - backend->type = TPL_BACKEND_WAYLAND_THREAD; - backend->data = NULL; + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(tbm_surface); + TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); - backend->init = __tpl_wl_egl_display_init; - backend->fini = __tpl_wl_egl_display_fini; - backend->query_config = __tpl_wl_egl_display_query_config; - backend->filter_config = __tpl_wl_egl_display_filter_config; - backend->get_window_info = __tpl_wl_egl_display_get_window_info; - backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info; - backend->get_buffer_from_native_pixmap = - __tpl_wl_egl_display_get_buffer_from_native_pixmap; -} + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *) surface->backend.data; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_result_t ret = TPL_ERROR_NONE; + int bo_name = -1; -void -__tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend) + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", + tbm_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + return TPL_ERROR_INVALID_PARAMETER; + } + + bo_name = _get_tbm_surface_bo_name(tbm_surface); + + TRACE_MARK("[ENQ] BO_NAME:%d", bo_name); + + TPL_LOG_T("WL_EGL", + "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_egl_surface, tbm_surface, bo_name, acquire_fence); + + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + + /* If there are received region information, save it to wl_egl_buffer */ + if (num_rects && rects) { + if (wl_egl_buffer->rects != NULL) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; + } + + wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects)); + wl_egl_buffer->num_rects = num_rects; + + if (!wl_egl_buffer->rects) { + TPL_ERR("Failed to allocate memory fo damage rects info."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects); + } + + if (!wl_egl_surface->need_to_enqueue || + !wl_egl_buffer->need_to_commit) { + TPL_WARN("WL_EGL", + "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", + ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + return TPL_ERROR_NONE; + } + + /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and + * commit if surface->frontbuffer that is already set and the tbm_surface + * client want to enqueue are the same. + */ + if (surface->is_frontbuffer_mode) { + /* The first buffer to be activated in frontbuffer mode must be + * committed. Subsequence frames do not need to be committed because + * the buffer is already displayed. + */ + if (surface->frontbuffer == tbm_surface) + wl_egl_surface->need_to_enqueue = TPL_FALSE; + + if (acquire_fence != -1) { + close(acquire_fence); + acquire_fence = -1; + } + } + + if (wl_egl_buffer->acquire_fence_fd != -1) + close(wl_egl_buffer->acquire_fence_fd); + + wl_egl_buffer->acquire_fence_fd = acquire_fence; + + tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + tbm_surface_internal_unref(tbm_surface); + TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d", + tbm_surface, wl_egl_surface, tsq_err); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + return TPL_ERROR_INVALID_OPERATION; + } + + tbm_surface_internal_unref(tbm_surface); + + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + + return TPL_ERROR_NONE; +} + +static tpl_result_t +_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tbm_surface_h tbm_surface = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tpl_bool_t ready_to_commit = TPL_FALSE; + + while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) { + tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue, + &tbm_surface); + if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to acquire from tbm_queue(%p)", + wl_egl_surface->tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + + tbm_surface_internal_ref(tbm_surface); + + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, + "wl_egl_buffer sould be not NULL"); + + if (wl_egl_buffer->wl_buffer == NULL) { + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + wl_egl_buffer->wl_buffer = + (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_egl_display->wl_tbm_client, tbm_surface); + + if (!wl_egl_buffer->wl_buffer) { + TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)", + wl_egl_display->wl_tbm_client, tbm_surface); + } + } + + if (wl_egl_buffer->acquire_fence_fd != -1) { + if (wl_egl_surface->surface_sync) + ready_to_commit = TPL_TRUE; + else { + if (wl_egl_buffer->waiting_source) { + tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); + wl_egl_buffer->waiting_source = NULL; + } + + wl_egl_buffer->waiting_source = + tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer, + wl_egl_buffer->acquire_fence_fd, buffer_funcs, + SOURCE_TYPE_DISPOSABLE); + + __tpl_list_push_back(wl_egl_surface->fence_waiting_buffers, tbm_surface); + + TRACE_ASYNC_BEGIN(wl_egl_buffer, "FENCE WAIT fd(%d)", + wl_egl_buffer->acquire_fence_fd); + + ready_to_commit = TPL_FALSE; + } + } + + if (ready_to_commit) { + if (wl_egl_surface->vblank_done) + ready_to_commit = TPL_TRUE; + else { + __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, tbm_surface); + ready_to_commit = TPL_FALSE; + } + } + + if (ready_to_commit) + _thread_wl_surface_commit(wl_egl_surface, tbm_surface); + } + + return TPL_ERROR_NONE; +} + +static const struct wl_buffer_listener wl_buffer_release_listener = { + (void *)__cb_wl_buffer_release, +}; + +static void +__cb_presentation_feedback_sync_output(void *data, + struct wp_presentation_feedback *presentation_feedback, + struct wl_output *output) +{ + TPL_IGNORE(data); + TPL_IGNORE(presentation_feedback); + TPL_IGNORE(output); + /* Nothing to do */ +} + +static void +__cb_presentation_feedback_presented(void *data, + struct wp_presentation_feedback *presentation_feedback, + uint32_t tv_sec_hi, + uint32_t tv_sec_lo, + uint32_t tv_nsec, + uint32_t refresh_nsec, + uint32_t seq_hi, + uint32_t seq_lo, + uint32_t flags) +{ + TPL_IGNORE(tv_sec_hi); + TPL_IGNORE(tv_sec_lo); + TPL_IGNORE(tv_nsec); + TPL_IGNORE(refresh_nsec); + TPL_IGNORE(seq_hi); + TPL_IGNORE(seq_lo); + TPL_IGNORE(flags); + + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface; + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + + TPL_DEBUG("[FEEDBACK][PRESENTED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + if (wl_egl_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (ret == -1) { + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + wl_egl_buffer->presentation_sync_fd); + } + + TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd, + "[PRESENTATION_SYNC] bo(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; + } + + if (wl_egl_buffer->presentation_feedback) + wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback); + + wl_egl_buffer->presentation_feedback = NULL; + + __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface, + TPL_FIRST, NULL); + + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); +} + +static void +__cb_presentation_feedback_discarded(void *data, + struct wp_presentation_feedback *presentation_feedback) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface; + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + + TPL_DEBUG("[FEEDBACK][DISCARDED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + if (wl_egl_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (ret == -1) { + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + wl_egl_buffer->presentation_sync_fd); + } + + TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd, + "[PRESENTATION_SYNC] bo(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; + } + + if (wl_egl_buffer->presentation_feedback) + wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback); + + wl_egl_buffer->presentation_feedback = NULL; + + __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface, + TPL_FIRST, NULL); + + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); +} + +static const struct wp_presentation_feedback_listener feedback_listener = { + __cb_presentation_feedback_sync_output, /* sync_output feedback -*/ + __cb_presentation_feedback_presented, + __cb_presentation_feedback_discarded +}; + +static void +_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, + tbm_surface_h tbm_surface) +{ + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + struct wl_surface *wl_surface = wl_egl_surface->wl_surface; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + uint32_t version; + + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, + "wl_egl_buffer sould be not NULL"); + + if (wl_egl_buffer->wl_buffer == NULL) { + wl_egl_buffer->wl_buffer = + (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_egl_display->wl_tbm_client, tbm_surface); + } + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL, + "[FATAL] Failed to create wl_buffer"); + + wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer, + &wl_buffer_release_listener, wl_egl_buffer); + + version = wl_proxy_get_version((struct wl_proxy *)wl_surface); + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) { + wl_egl_buffer->presentation_feedback = + wp_presentation_feedback(wl_egl_display->presentation, + wl_surface); + wp_presentation_feedback_add_listener(wl_egl_buffer->presentation_feedback, + &feedback_listener, wl_egl_buffer); + __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, tbm_surface); + TRACE_ASYNC_BEGIN(wl_egl_buffer->presentation_sync_fd, + "[PRESENTATION_SYNC] bo(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + } + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + + if (wl_egl_buffer->w_rotated == TPL_TRUE) { + wayland_tbm_client_set_buffer_transform( + wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer, + wl_egl_buffer->w_transform); + wl_egl_buffer->w_rotated = TPL_FALSE; + } + + if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) { + wl_egl_surface->latest_transform = wl_egl_buffer->transform; + wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform); + } + + if (wl_egl_window) { + wl_egl_window->attached_width = wl_egl_buffer->width; + wl_egl_window->attached_height = wl_egl_buffer->height; + } + + wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer, + wl_egl_buffer->dx, wl_egl_buffer->dy); + + if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) { + if (version < 4) { + wl_surface_damage(wl_surface, + wl_egl_buffer->dx, wl_egl_buffer->dy, + wl_egl_buffer->width, wl_egl_buffer->height); + } else { + wl_surface_damage_buffer(wl_surface, + 0, 0, + wl_egl_buffer->width, wl_egl_buffer->height); + } + } else { + int i; + for (i = 0; i < wl_egl_buffer->num_rects; i++) { + int inverted_y = + wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] + + wl_egl_buffer->rects[i * 4 + 3]); + if (version < 4) { + wl_surface_damage(wl_surface, + wl_egl_buffer->rects[i * 4 + 0], + inverted_y, + wl_egl_buffer->rects[i * 4 + 2], + wl_egl_buffer->rects[i * 4 + 3]); + } else { + wl_surface_damage_buffer(wl_surface, + wl_egl_buffer->rects[i * 4 + 0], + inverted_y, + wl_egl_buffer->rects[i * 4 + 2], + wl_egl_buffer->rects[i * 4 + 3]); + } + } + } + + wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer, + wl_egl_buffer->serial); + + wl_egl_buffer->need_to_release = TPL_TRUE; + + if (wl_egl_display->use_explicit_sync && + wl_egl_surface->surface_sync) { + + zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync, + wl_egl_buffer->acquire_fence_fd); + TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)", + wl_egl_surface, tbm_surface, wl_egl_buffer->acquire_fence_fd); + close(wl_egl_buffer->acquire_fence_fd); + wl_egl_buffer->acquire_fence_fd = -1; + + wl_egl_buffer->buffer_release = + zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync); + if (!wl_egl_buffer->buffer_release) { + TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface); + } else { + zwp_linux_buffer_release_v1_add_listener( + wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer); + TPL_DEBUG("add explicit_sync_release_listener."); + } + } + + wl_surface_commit(wl_surface); + + wl_display_flush(wl_egl_display->wl_display); + + TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + wl_egl_buffer->need_to_commit = TPL_FALSE; + + TPL_LOG_T("WL_EGL", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + if (wl_egl_display->tdm_initialized && + _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE) + TPL_ERR("Failed to set wait vblank."); + + if (wl_egl_surface->committed_buffers) { + __tpl_list_push_back(wl_egl_surface->committed_buffers, tbm_surface); + } + + tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); + + if (wl_egl_buffer->commit_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); + if (ret == -1) { + TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd); + } + + TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)", + wl_egl_surface, wl_egl_buffer->commit_sync_fd); + + close(wl_egl_buffer->commit_sync_fd); + wl_egl_buffer->commit_sync_fd = -1; + } + + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); +} + +static tpl_bool_t +__thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = + (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource); + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface; + + wl_egl_surface->render_done_cnt++; + + TRACE_ASYNC_END(wl_egl_buffer, "FENCE WAIT fd(%d)", + wl_egl_buffer->acquire_fence_fd); + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + __tpl_list_remove_data(wl_egl_surface->fence_waiting_buffers, + (void *)tbm_surface, TPL_FIRST, NULL); + + if (wl_egl_surface->vblank_done) + _thread_wl_surface_commit(wl_egl_surface, tbm_surface); + else + __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, + tbm_surface); + + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + + /* This source is used only once and does not allow reuse. + * So finalize will be executed immediately. */ + g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag); + g_source_destroy(&wait_source->gsource); + g_source_unref(&wait_source->gsource); + + return TPL_FALSE; +} + +static void +__thread_func_waiting_source_finalize(tpl_gsource *gsource) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = + (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource); + + TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)", + wl_egl_buffer, wl_egl_buffer->waiting_source, + wl_egl_buffer->acquire_fence_fd); + + close(wl_egl_buffer->acquire_fence_fd); + wl_egl_buffer->acquire_fence_fd = -1; + wl_egl_buffer->waiting_source = NULL; +} + +static tpl_gsource_functions buffer_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_waiting_source_dispatch, + .finalize = __thread_func_waiting_source_finalize, +}; + +static tpl_result_t +_thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tdm_error tdm_err = TDM_ERROR_NONE; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + + if (wl_egl_surface->vblank == NULL) { + wl_egl_surface->vblank = + _thread_create_tdm_client_vblank(wl_egl_display->tdm_client); + if (!wl_egl_surface->vblank) { + TPL_WARN("Failed to create vblank. wl_egl_surface(%p)", + wl_egl_surface); + return TPL_ERROR_OUT_OF_MEMORY; + } + } + + tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank, + wl_egl_surface->post_interval, + __cb_tdm_client_vblank, + (void *)wl_egl_surface); + + if (tdm_err == TDM_ERROR_NONE) { + wl_egl_surface->vblank_done = TPL_FALSE; + TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK"); + } else { + TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); + return TPL_ERROR_INVALID_OPERATION; + } + + return TPL_ERROR_NONE; +} + +static int +_write_to_eventfd(int eventfd) +{ + uint64_t value = 1; + int ret; + + if (eventfd == -1) { + TPL_ERR("Invalid fd(-1)"); + return -1; + } + + ret = write(eventfd, &value, sizeof(uint64_t)); + if (ret == -1) { + TPL_ERR("failed to write to fd(%d)", eventfd); + return ret; + } + + return ret; +} + +void +__tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend) +{ + TPL_ASSERT(backend); + + backend->type = TPL_BACKEND_WAYLAND_THREAD; + backend->data = NULL; + + backend->init = __tpl_wl_egl_display_init; + backend->fini = __tpl_wl_egl_display_fini; + backend->query_config = __tpl_wl_egl_display_query_config; + backend->filter_config = __tpl_wl_egl_display_filter_config; + backend->get_window_info = __tpl_wl_egl_display_get_window_info; + backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info; + backend->get_buffer_from_native_pixmap = + __tpl_wl_egl_display_get_buffer_from_native_pixmap; +} + +void +__tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend) { TPL_ASSERT(backend); @@ -1966,7 +2530,7 @@ __cb_destroy_callback(void *private) free(tizen_private); tizen_private = NULL; - tpl_gmutex_unlock(&surf_source->surf_mutex); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } } @@ -2266,16 +2830,6 @@ __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, surface->reset_cb(surface->reset_data); } -static void __cb_tbm_queue_trace_callback(tbm_surface_queue_h tbm_queue, - tbm_surface_h tbm_surface, - tbm_surface_queue_trace trace, - void *data) -{ - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; - - /* TODO */ -} - static void __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue, void *data) @@ -2293,3 +2847,306 @@ __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue, } /* -- END -- tbm_surface_queue callback funstions */ + +/* tdm_client vblank callback function */ +static void +__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, + unsigned int sequence, unsigned int tv_sec, + unsigned int tv_usec, void *user_data) +{ + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data; + tbm_surface_h tbm_surface = NULL; + + TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK"); + + if (error == TDM_ERROR_TIMEOUT) + TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)", + wl_egl_surface); + + wl_egl_surface->vblank_done = TPL_TRUE; + + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + tbm_surface = (tbm_surface_h)__tpl_list_pop_front( + wl_egl_surface->vblank_waiting_buffers, + NULL); + _thread_wl_surface_commit(wl_egl_surface, tbm_surface); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); +} + +static void +__cb_buffer_fenced_release(void *data, + struct zwp_linux_buffer_release_v1 *release, int32_t fence) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + if (wl_egl_buffer) + tbm_surface = wl_egl_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + if (wl_egl_buffer->need_to_release) { + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_queue_error_e tsq_err; + + if (wl_egl_surface->committed_buffers) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + __tpl_list_remove_data(wl_egl_surface->committed_buffers, + (void *)tbm_surface, + TPL_FIRST, NULL); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } + + wl_egl_buffer->need_to_release = TPL_FALSE; + + zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); + wl_egl_buffer->buffer_release = NULL; + + wl_egl_buffer->release_fence_fd = fence; + + TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", + _get_tbm_surface_bo_name(tbm_surface), + fence); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + TPL_LOG_T("WL_EGL", + "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface), + fence); + + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); + } + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +static void +__cb_buffer_immediate_release(void *data, + struct zwp_linux_buffer_release_v1 *release) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + if (wl_egl_buffer) + tbm_surface = wl_egl_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + if (wl_egl_buffer->need_to_release) { + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_queue_error_e tsq_err; + + if (wl_egl_surface->committed_buffers) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + __tpl_list_remove_data(wl_egl_surface->committed_buffers, + (void *)tbm_surface, + TPL_FIRST, NULL); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } + + wl_egl_buffer->need_to_release = TPL_FALSE; + + zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); + wl_egl_buffer->buffer_release = NULL; + + wl_egl_buffer->release_fence_fd = -1; + + TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + TPL_LOG_T("WL_EGL", + "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); + } + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = { + __cb_buffer_fenced_release, + __cb_buffer_immediate_release, +}; + +static void +__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; + + if (wl_egl_buffer) + tbm_surface = wl_egl_buffer->tbm_surface; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + if (wl_egl_buffer->need_to_release) { + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_queue_error_e tsq_err; + + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + if (wl_egl_surface->committed_buffers) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + __tpl_list_remove_data(wl_egl_surface->committed_buffers, + (void *)tbm_surface, + TPL_FIRST, NULL); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } + + wl_egl_buffer->need_to_release = TPL_FALSE; + + TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + + tbm_surface_internal_unref(tbm_surface); + } + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +void +__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) +{ + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->disp_source; + + TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); + + wl_display_flush(wl_egl_display->wl_display); + + if (wl_egl_buffer->wl_buffer) + wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer); + + if (wl_egl_buffer->commit_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); + if (ret == -1) + TPL_ERR("Failed to send commit_sync signal to fd(%d)", + wl_egl_buffer->commit_sync_fd); + close(wl_egl_buffer->commit_sync_fd); + wl_egl_buffer->commit_sync_fd = -1; + } + + if (wl_egl_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (ret == -1) + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + wl_egl_buffer->presentation_sync_fd); + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; + + if (wl_egl_buffer->presentation_feedback) + wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback); + wl_egl_buffer->presentation_feedback = NULL; + } + + if (wl_egl_buffer->rects) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; + } + + wl_egl_buffer->tbm_surface = NULL; + + free(wl_egl_buffer); +} + +static void +__cb_buffer_remove_from_list(void *data) +{ + tbm_surface_h tbm_surface = (tbm_surface_h)data; + + if (tbm_surface && tbm_surface_internal_is_valid(tbm_surface)) + tbm_surface_internal_unref(tbm_surface); +} + +static int +_get_tbm_surface_bo_name(tbm_surface_h tbm_surface) +{ + return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); +} + +static void +_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) +{ + int count = 0; + int idx = 0; + tpl_list_node_t *node = NULL; + tbm_surface_h tbm_surface = NULL; + + /* vblank waiting list */ + count = __tpl_list_get_count(wl_egl_surface->vblank_waiting_buffers); + TPL_DEBUG("VBLANK WAITING BUFFERS | wl_egl_surface(%p) list(%p) count(%d)", + wl_egl_surface, wl_egl_surface->vblank_waiting_buffers, count); + + while ((!node && + (node = __tpl_list_get_front_node(wl_egl_surface->vblank_waiting_buffers))) || + (node && (node = __tpl_list_node_next(node)))) { + tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node); + TPL_DEBUG("VBLANK WAITING BUFFERS | %d | tbm_surface(%p) bo(%d)", + idx, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + idx++; + } + + idx = 0; + node = NULL; + + /* in use buffers list */ + count = __tpl_list_get_count(wl_egl_surface->in_use_buffers); + TPL_DEBUG("DEQUEUED BUFFERS | wl_egl_surface(%p) list(%p) count(%d)", + wl_egl_surface, wl_egl_surface->in_use_buffers, count); + + while ((!node && + (node = __tpl_list_get_front_node(wl_egl_surface->in_use_buffers))) || + (node && (node = __tpl_list_node_next(node)))) { + tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node); + TPL_DEBUG("DEQUEUED BUFFERS | %d | tbm_surface(%p) bo(%d)", + idx, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + idx++; + } + + idx = 0; + node = NULL; + + /* committed buffers list */ + count = __tpl_list_get_count(wl_egl_surface->committed_buffers); + TPL_DEBUG("COMMITTED BUFFERS | wl_egl_surface(%p) list(%p) count(%d)", + wl_egl_surface, wl_egl_surface->committed_buffers, count); + + while ((!node && + (node = __tpl_list_get_front_node(wl_egl_surface->committed_buffers))) || + (node && (node = __tpl_list_node_next(node)))) { + tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node); + TPL_DEBUG("COMMITTED BUFFERS | %d | tbm_surface(%p) bo(%d)", + idx, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); + idx++; + } +} -- 2.7.4 From ddf6af3b048eb3dc64b7c7e43bb5db9bf5b905fa Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 27 Jan 2021 12:44:03 +0900 Subject: [PATCH 13/16] Fix some build problems of utils_gthread Change-Id: I3c7e46a2196286664f4284c466b943cbf8ed1ba7 Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 39 +++++++++++++++++++++++++-------------- src/tpl_utils_gthread.h | 5 +---- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index 8bfcb01..b1480c1 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -34,7 +34,7 @@ struct _tpl_gsource { static gpointer _tpl_gthread_init(gpointer data) { - tpl_gthread *thread = data; + tpl_gthread *thread = (tpl_gthread *)data; g_mutex_lock(&thread->thread_mutex); @@ -44,15 +44,17 @@ _tpl_gthread_init(gpointer data) g_cond_signal(&thread->thread_cond); g_mutex_unlock(&thread->thread_mutex); - g_main_loop_run(thread->twe_loop); + g_main_loop_run(thread->loop); return thread; } -static void -_tpl_gthread_fini(gpointer data) +static tpl_bool_t +_tpl_gthread_fini(tpl_gsource *source, uint64_t message) { - tpl_gthread *thread = data; + tpl_gthread *thread = (tpl_gthread *)source->data; + + TPL_IGNORE(message); g_mutex_lock(&thread->thread_mutex); @@ -60,7 +62,9 @@ _tpl_gthread_fini(gpointer data) thread->deinit_func(thread->func_data); g_cond_signal(&thread->thread_cond); - g_mutex_unlock(&thread->thread_mutex); + g_mutex_unlock(&thread->thread_mutex); + + return TPL_FALSE; } static tpl_gsource_functions thread_destroy_funcs = { @@ -107,7 +111,7 @@ tpl_gthread_create(const char *thread_name, g_mutex_lock(&new_thread->thread_mutex); new_thread->destroy_sig_source = tpl_gsource_create(new_thread, new_thread, -1, - &thread_destroy_funcs, TPL_TRUE); + &thread_destroy_funcs, SOURCE_TYPE_FINALIZER); new_thread->loop = loop; new_thread->init_func = init_func; @@ -133,11 +137,14 @@ tpl_gthread_destroy(tpl_gthread *thread, tpl_gthread_func deinit_func) g_thread_join(thread->thread); g_main_loop_unref(thread->loop); + thread->loop = NULL; + g_mutex_unlock(&thread->thread_mutex); g_mutex_clear(&thread->thread_mutex); g_cond_clear(&thread->thread_cond); - thread->func = NULL; + thread->func_data = NULL; + thread->thread = NULL; free(thread); thread = NULL; @@ -179,7 +186,7 @@ static gboolean _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) { tpl_gsource *gsource = (tpl_gsource *)source; - tpl_bool_t ret = TPL_GSOURCE_CONTINUE; + gboolean ret = G_SOURCE_CONTINUE; GIOCondition cond = g_source_query_unix_fd(source, gsource->tag); TPL_IGNORE(cb); @@ -209,7 +216,7 @@ _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) g_source_unref(&del_source->gsource); g_cond_signal(&del_source->thread->thread_cond); - g_mutex_unlock(&del_source->thread_mutex); + g_mutex_unlock(&del_source->thread->thread_mutex); } } } else { @@ -224,7 +231,7 @@ _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) g_source_remove_unix_fd(&gsource->gsource, gsource->tag); g_source_destroy(&gsource->gsource); g_source_unref(&gsource->gsource); - ret = TPL_GSOURCE_REMOVE; + ret = G_SOURCE_REMOVE; } return ret; @@ -359,8 +366,12 @@ tpl_gsource_send_message(tpl_gsource *source, uint64_t message) void * tpl_gsource_get_data(tpl_gsource *source) { - if (source && source->data) - return source->data; + void *data = NULL; + + if (source) + data = source->data; + + return data; } tpl_bool_t @@ -373,7 +384,7 @@ tpl_gsource_check_io_condition(tpl_gsource *source) return TPL_FALSE; } - cond = g_source_query_unix_fd(source->gsource, source->tag); + cond = g_source_query_unix_fd(&source->gsource, source->tag); if (cond & G_IO_IN) return TPL_TRUE; diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h index a30b86f..ee30b05 100644 --- a/src/tpl_utils_gthread.h +++ b/src/tpl_utils_gthread.h @@ -11,9 +11,6 @@ typedef struct _tpl_gthread tpl_gthread; typedef struct _tpl_gsource tpl_gsource; typedef struct _tpl_gsource_functions tpl_gsource_functions; -typedef TPL_TRUE TPL_GSOURCE_CONTINUE; -typedef TPL_FALSE TPL_GSOURCE_REMOVE; - typedef void (*tpl_gthread_func) (void *user_data); typedef GMutex tpl_gmutex; @@ -48,7 +45,7 @@ struct _tpl_gsource_functions { */ tpl_gthread * tpl_gthread_create(const char *thread_name, - tpl_gthread_init_func init_func, void *func_data); + tpl_gthread_func init_func, void *func_data); /** * Stop thread and Destroy tpl_gthread -- 2.7.4 From 49882ec7f3934ea57af5dabb1a4ac92af3430920 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 27 Jan 2021 16:20:48 +0900 Subject: [PATCH 14/16] Corrected syntax errors Change-Id: Id7a41946e66e13dd6b850c38636bcfbab743581e Signed-off-by: Joonbum Ko --- src/tpl_wl_egl.c | 3311 +++++++++++++++++++++++++++--------------------------- 1 file changed, 1649 insertions(+), 1662 deletions(-) diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index 47ffea2..21f8073 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -105,9 +105,6 @@ struct _tpl_wl_egl_surface { int fd; } presentation_sync; - tpl_gmutex free_queue_mutex; - tpl_gcond free_queue_cond; - tpl_gmutex surf_mutex; tpl_gcond surf_cond; @@ -181,7 +178,28 @@ struct _tpl_wl_egl_buffer { tpl_wl_egl_surface_t *wl_egl_surface; }; -tpl_bool_t + +static void +__cb_buffer_remove_from_list(void *data); +static int +_get_tbm_surface_bo_name(tbm_surface_h tbm_surface); +static void +_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface); +static void +__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer); +static tpl_wl_egl_buffer_t * +_get_wl_egl_buffer(tbm_surface_h tbm_surface); +static int +_write_to_eventfd(int eventfd); +static void +_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface); +static tpl_result_t +_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface); +static void +_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, + tbm_surface_h tbm_surface); + +static tpl_bool_t _check_native_handle_is_wl_display(tpl_handle_t display) { struct wl_interface *wl_egl_native_dpy = *(void **) display; @@ -216,7 +234,7 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) if (!wl_egl_display) { TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource); TPL_WARN("tdm_source(%p) will be removed from thread.", gsource); - return TPL_GSOURCE_REMOVE; + return TPL_FALSE; } tdm_err = tdm_client_handle_events(wl_egl_display->tdm_client); @@ -235,10 +253,10 @@ __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message) wl_egl_display->tdm_source = NULL; - return G_SOURCE_REMOVE; + return TPL_FALSE; } - return G_SOURCE_CONTINUE; + return TPL_TRUE; } static void @@ -308,14 +326,14 @@ _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display) TPL_LOG_T("WL_EGL", "TPL_WAIT_VBLANK:DEFAULT_ENABLED"); TPL_LOG_T("WL_EGL", "wl_egl_display(%p) tdm_source(%p) tdm_client(%p)", - wl_egl_display, tdm_source, client); + wl_egl_display, tdm_source, tdm_client); return TPL_ERROR_NONE; } #define IMPL_TIZEN_SURFACE_SHM_VERSION 2 -void +static void __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, uint32_t name, const char *interface, uint32_t version) @@ -347,7 +365,7 @@ __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry, } } -void +static void __cb_wl_resistry_global_remove_callback(void *data, struct wl_registry *wl_registry, uint32_t name) @@ -386,175 +404,7 @@ _wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display, wl_egl_display->last_error = errno; } -static void* -_thread_init(void *data) -{ - tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; - - if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) { - TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)", - wl_egl_display, wl_egl_display->wl_display); - } - - if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) { - TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED"); - } - - return wl_egl_display; -} - -static void -_thread_fini(void *data) -{ - tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; - - if (wl_egl_display->tdm_initialized) - tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_FALSE); - if (wl_egl_display->wl_initialized) - _thread_wl_display_fini(wl_egl_display); -} - -static tpl_result_t -__tpl_wl_egl_display_init(tpl_display_t *display) -{ - tpl_wl_egl_display_t *wl_egl_display = NULL; - - TPL_ASSERT(display); - - /* Do not allow default display in wayland. */ - if (!display->native_handle) { - TPL_ERR("Invalid native handle for display."); - return TPL_ERROR_INVALID_PARAMETER; - } - - if (!_check_native_handle_is_wl_display(display->native_handle)) { - TPL_ERR("native_handle(%p) is not wl_display", display->native_handle); - return TPL_ERROR_INVALID_PARAMETER; - } - - ev_queue = wl_display_create_queue(display->native_handle); - if (!ev_queue) { - TPL_ERR("Failed to create wl_event_queue."); - return TPL_ERROR_OUT_OF_MEMORY; - } - - wl_egl_display = (tpl_wl_egl_display_t *) calloc(1, - sizeof(tpl_wl_egl_display_t)); - if (!wl_egl_display) { - TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t."); - return TPL_ERROR_OUT_OF_MEMORY; - } - - display->backend.data = wl_egl_display; - display->bufmgr_fd = -1; - - wl_egl_display->tdm_initialized = TPL_FALSE; - wl_egl_display->wl_initialized = TPL_FALSE; - - wl_egl_display->ev_queue = ev_queue; - wl_egl_display->wl_display = (struct wl_display *)display->native_handle; - wl_egl_display->last_error = 0; - wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled - wl_egl_display->prepared = TPL_FALSE; - - /* Wayland Interfaces */ - wl_egl_display->tss = NULL; - wl_egl_display->presentation = NULL; - wl_egl_display->explicit_sync = NULL; - - wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled - env = tpl_getenv("TPL_WAIT_VBLANK"); - if (env && !atoi(env)) { - wl_egl_display->use_wait_vblank = TPL_FALSE; - } - - tpl_gmutex_init(&wl_egl_display->wl_event_mutex); - - /* Create gthread */ - wl_egl_display->thread = tpl_gthread_create("wl_egl_thread", - _thread_init, (void *)wl_egl_display); - if (!wl_egl_display->thread) { - TPL_ERR("Failed to create wl_egl_thread"); - goto free_display; - } - - wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread, - (void *)wl_egl_display, - wl_display_get_fd(wl_egl_display->wl_display), - &disp_funcs, SOURCE_TYPE_NORMAL); - if (!wl_egl_display->disp_source) { - TPL_ERR("Failed to add native_display(%p) to thread(%p)", - display->native_handle, - wl_egl_display->thread); - goto free_display; - } - - TPL_LOG_T("WL_EGL", - "[INIT DISPLAY] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", - wl_egl_display, - wl_egl_display->thread, - wl_egl_display->wl_display); - - TPL_LOG_T("WL_EGL", - "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%S) USE_EXPLICIT_SYNC(%s)", - wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE", - wl_egl_display->tss ? "TRUE" : "FALSE", - wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE"); - - return TPL_ERROR_NONE; - -free_display: - if (wl_egl_display->thread) { - tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); - tpl_gthread_destroy(wl_egl_display->thread, _thread_fini); - } - - wl_egl_display->thread = NULL; - free(wl_egl_display); - - display->backend.data = NULL; - return TPL_ERROR_INVALID_OPERATION; -} - -static void -__tpl_wl_egl_display_fini(tpl_display_t *display) -{ - tpl_wl_egl_display_t *wl_egl_display; - - TPL_ASSERT(display); - - wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data; - if (wl_egl_display) { - TPL_LOG_T("WL_EGL", - "[FINI] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", - wl_egl_display, - wl_egl_display->thread, - wl_egl_display->wl_display); - - if (wl_egl_display->gsource) { - tpl_gsource_destroy(wl_egl_display->gsource, TPL_TRUE); - wl_egl_display->gsource = NULL; - } - - if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) { - tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); - wl_egl_display->tdm_source = NULL; - } - - if (wl_egl_display->thread) { - tpl_gthread_destroy(wl_egl_display->thread, NULL); - wl_egl_display->wl_egl_thread = NULL; - } - - tpl_gmutex_clear(&wl_egl_display->wl_event_mutex); - - free(wl_egl_display); - } - - display->backend.data = NULL; -} - -static tpl_result_t +tpl_result_t _thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display) { struct wl_registry *registry = NULL; @@ -647,7 +497,7 @@ fini: return result; } -static void +void _thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display) { /* If wl_egl_display is in prepared state, cancel it */ @@ -688,6 +538,35 @@ _thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display) wl_egl_display->wl_display); } + +static void* +_thread_init(void *data) +{ + tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; + + if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) { + TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)", + wl_egl_display, wl_egl_display->wl_display); + } + + if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) { + TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED"); + } + + return wl_egl_display; +} + +static void +_thread_fini(void *data) +{ + tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data; + + if (wl_egl_display->tdm_initialized) + tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_FALSE); + if (wl_egl_display->wl_initialized) + _thread_wl_display_fini(wl_egl_display); +} + static tpl_bool_t __thread_func_disp_prepare(tpl_gsource *gsource) { @@ -766,7 +645,7 @@ __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message) * to remove the gsource from the main loop. * This is because wl_egl_display is not valid since last_error was set.*/ if (wl_egl_display->last_error) { - return TPL_GSOURCE_REMOVE; + return TPL_FALSE; } tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); @@ -780,7 +659,7 @@ __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message) wl_display_flush(wl_egl_display->wl_display); tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - return TPL_GSOURCE_CONTINUE; + return TPL_TRUE; } static void @@ -798,6 +677,7 @@ __thread_func_disp_finalize(tpl_gsource *gsource) return; } + static tpl_gsource_functions disp_funcs = { .prepare = __thread_func_disp_prepare, .check = __thread_func_disp_check, @@ -806,40 +686,176 @@ static tpl_gsource_functions disp_funcs = { }; static tpl_result_t -__tpl_wl_egl_display_query_config(tpl_display_t *display, - tpl_surface_type_t surface_type, - int red_size, int green_size, - int blue_size, int alpha_size, - int color_depth, int *native_visual_id, - tpl_bool_t *is_slow) +__tpl_wl_egl_display_init(tpl_display_t *display) { + tpl_wl_egl_display_t *wl_egl_display = NULL; + TPL_ASSERT(display); - if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 && - green_size == 8 && blue_size == 8 && - (color_depth == 32 || color_depth == 24)) { + /* Do not allow default display in wayland. */ + if (!display->native_handle) { + TPL_ERR("Invalid native handle for display."); + return TPL_ERROR_INVALID_PARAMETER; + } - if (alpha_size == 8) { - if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888; - if (is_slow) *is_slow = TPL_FALSE; - return TPL_ERROR_NONE; - } - if (alpha_size == 0) { - if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888; - if (is_slow) *is_slow = TPL_FALSE; - return TPL_ERROR_NONE; - } + if (!_check_native_handle_is_wl_display(display->native_handle)) { + TPL_ERR("native_handle(%p) is not wl_display", display->native_handle); + return TPL_ERROR_INVALID_PARAMETER; } - return TPL_ERROR_INVALID_PARAMETER; -} + wl_egl_display = (tpl_wl_egl_display_t *) calloc(1, + sizeof(tpl_wl_egl_display_t)); + if (!wl_egl_display) { + TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } -static tpl_result_t -__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id, - int alpha_size) -{ - TPL_IGNORE(display); - TPL_IGNORE(visual_id); + display->backend.data = wl_egl_display; + display->bufmgr_fd = -1; + + wl_egl_display->tdm_initialized = TPL_FALSE; + wl_egl_display->wl_initialized = TPL_FALSE; + + wl_egl_display->ev_queue = NULL; + wl_egl_display->wl_display = (struct wl_display *)display->native_handle; + wl_egl_display->last_error = 0; + wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled + wl_egl_display->prepared = TPL_FALSE; + + /* Wayland Interfaces */ + wl_egl_display->tss = NULL; + wl_egl_display->presentation = NULL; + wl_egl_display->explicit_sync = NULL; + + wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled + { + char *env = tpl_getenv("TPL_WAIT_VBLANK"); + if (env && !atoi(env)) { + wl_egl_display->use_wait_vblank = TPL_FALSE; + } + } + + tpl_gmutex_init(&wl_egl_display->wl_event_mutex); + + /* Create gthread */ + wl_egl_display->thread = tpl_gthread_create("wl_egl_thread", + (tpl_gthread_func)_thread_init, (void *)wl_egl_display); + if (!wl_egl_display->thread) { + TPL_ERR("Failed to create wl_egl_thread"); + goto free_display; + } + + wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread, + (void *)wl_egl_display, + wl_display_get_fd(wl_egl_display->wl_display), + &disp_funcs, SOURCE_TYPE_NORMAL); + if (!wl_egl_display->disp_source) { + TPL_ERR("Failed to add native_display(%p) to thread(%p)", + display->native_handle, + wl_egl_display->thread); + goto free_display; + } + + TPL_LOG_T("WL_EGL", + "[INIT DISPLAY] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_egl_display, + wl_egl_display->thread, + wl_egl_display->wl_display); + + TPL_LOG_T("WL_EGL", + "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)", + wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE", + wl_egl_display->tss ? "TRUE" : "FALSE", + wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE"); + + return TPL_ERROR_NONE; + +free_display: + if (wl_egl_display->thread) { + tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); + tpl_gthread_destroy(wl_egl_display->thread, _thread_fini); + } + + wl_egl_display->thread = NULL; + free(wl_egl_display); + + display->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; +} + +static void +__tpl_wl_egl_display_fini(tpl_display_t *display) +{ + tpl_wl_egl_display_t *wl_egl_display; + + TPL_ASSERT(display); + + wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data; + if (wl_egl_display) { + TPL_LOG_T("WL_EGL", + "[FINI] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", + wl_egl_display, + wl_egl_display->thread, + wl_egl_display->wl_display); + + if (wl_egl_display->disp_source) { + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); + wl_egl_display->disp_source = NULL; + } + + if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) { + tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); + wl_egl_display->tdm_source = NULL; + } + + if (wl_egl_display->thread) { + tpl_gthread_destroy(wl_egl_display->thread, NULL); + wl_egl_display->thread = NULL; + } + + tpl_gmutex_clear(&wl_egl_display->wl_event_mutex); + + free(wl_egl_display); + } + + display->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_egl_display_query_config(tpl_display_t *display, + tpl_surface_type_t surface_type, + int red_size, int green_size, + int blue_size, int alpha_size, + int color_depth, int *native_visual_id, + tpl_bool_t *is_slow) +{ + TPL_ASSERT(display); + + if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 && + green_size == 8 && blue_size == 8 && + (color_depth == 32 || color_depth == 24)) { + + if (alpha_size == 8) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + if (alpha_size == 0) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + } + + return TPL_ERROR_INVALID_PARAMETER; +} + +static tpl_result_t +__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id, + int alpha_size) +{ + TPL_IGNORE(display); + TPL_IGNORE(visual_id); TPL_IGNORE(alpha_size); return TPL_ERROR_NONE; } @@ -923,7 +939,7 @@ __tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap) } tpl_bool_t -__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy) +__tpl_display_choose_backend_wl_egl_thread2(tpl_handle_t native_dpy) { struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy; TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE); @@ -941,403 +957,357 @@ __tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy) return TPL_FALSE; } -static tpl_result_t -__tpl_wl_egl_surface_init(tpl_surface_t *surface) +/* -- BEGIN -- wl_egl_window callback functions */ +static void +__cb_destroy_callback(void *private) { - tpl_wl_egl_display_t *wl_egl_display = NULL; + struct tizen_private *tizen_private = (struct tizen_private *)private; tpl_wl_egl_surface_t *wl_egl_surface = NULL; - tbm_surface_queue_h tbm_queue = NULL; - tpl_gsource *surf_source = NULL; - tpl_result_t ret = TPL_ERROR_NONE; - struct wl_egl_window *wl_egl_window = - (struct wl_egl_window *)surface->native_handle; + if (!tizen_private) { + TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface"); + return; + } - TPL_ASSERT(surface); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); - TPL_ASSERT(surface->native_handle); + wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + if (wl_egl_surface) { + TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.", + wl_egl_surface->wl_egl_window); + TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface."); - wl_egl_display = - (tpl_wl_egl_display_t *)surface->display->backend.data; - if (!wl_egl_display) { - TPL_ERR("Invalid parameter. wl_egl_display(%p)", - wl_egl_display); - return TPL_ERROR_INVALID_PARAMETER; - } + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + wl_egl_surface->wl_egl_window->destroy_window_callback = NULL; + wl_egl_surface->wl_egl_window->resize_callback = NULL; + wl_egl_surface->wl_egl_window->driver_private = NULL; + wl_egl_surface->wl_egl_window = NULL; + wl_egl_surface->wl_surface = NULL; - wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1, - sizeof(tpl_wl_egl_surface_t)); - if (!wl_egl_surface) { - TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t."); - return TPL_ERROR_OUT_OF_MEMORY; - } + tizen_private->set_window_serial_callback = NULL; + tizen_private->rotate_callback = NULL; + tizen_private->get_rotation_capability = NULL; + tizen_private->set_frontbuffer_callback = NULL; + tizen_private->create_commit_sync_fd = NULL; + tizen_private->create_presentation_sync_fd = NULL; + tizen_private->data = NULL; - surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface, - -1, surf_funcs, SOURCE_TYPE_NORMAL); - if (!surf_source) { - TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)", - wl_egl_surface); - goto surf_source_create_fail; + free(tizen_private); + tizen_private = NULL; + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } +} - surface->backend.data = (void *)wl_egl_surface; - surface->width = wl_egl_window->width; - surface->height = wl_egl_window->height; - surface->rotation = 0; +static void +__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); - wl_egl_surface->tpl_surface = surface; - wl_egl_surface->width = wl_egl_window->width; - wl_egl_surface->height = wl_egl_window->height; - wl_egl_surface->format = surface->format; + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + int cur_w, cur_h, req_w, req_h, format; - wl_egl_surface->surf_source = surf_source; - wl_egl_surface->wl_egl_window = wl_egl_window; - wl_egl_surface->wl_surface = wl_egl_window->surface; + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return; + } - wl_egl_surface->wl_egl_display = wl_egl_display; + format = wl_egl_surface->format; + cur_w = wl_egl_surface->width; + cur_h = wl_egl_surface->height; + req_w = wl_egl_window->width; + req_h = wl_egl_window->height; - wl_egl_surface->reset = TPL_FALSE; - wl_egl_surface->is_activated = TPL_FALSE; - wl_egl_surface->need_to_enqueue = TPL_FALSE; - wl_egl_surface->prerotation_capability = TPL_FALSE; - wl_egl_surface->vblank_done = TPL_TRUE; - wl_egl_surface->use_render_done_fence = TPL_FALSE; - wl_egl_surface->set_serial_is_used = TPL_FALSE; + TPL_INFO("[WINDOW_RESIZE]", + "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)", + wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h); - wl_egl_surface->latest_transform = 0; - wl_egl_surface->render_done_cnt = 0; - wl_egl_surface->serial = 0; + if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format) + != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue); + return; + } +} +/* -- END -- wl_egl_window callback functions */ - wl_egl_surface->vblank = NULL; - wl_egl_surface->tss_flusher = NULL; - wl_egl_surface->surface_sync = NULL; +/* -- BEGIN -- wl_egl_window tizen private callback functions */ - wl_egl_surface->post_interval = surface->post_interval; +/* There is no usecase for using prerotation callback below */ +static void +__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); - wl_egl_surface->commit_sync.fd = -1; - wl_egl_surface->presentation_sync.fd = -1; + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + int rotation = tizen_private->rotation; - { - struct tizen_private *tizen_private = NULL; + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return; + } - if (wl_egl_window->driver_private) - tizen_private = (struct tizen_private *)wl_egl_window->driver_private; - else { - tizen_private = tizen_private_create(); - wl_egl_window->driver_private = (void *)tizen_private; - } + TPL_INFO("[WINDOW_ROTATE]", + "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)", + wl_egl_surface, wl_egl_window, + wl_egl_surface->rotation, rotation); - if (tizen_private) { - tizen_private->data = (void *)wl_egl_surface; - tizen_private->rotate_callback = (void *)__cb_rotate_callback; - tizen_private->get_rotation_capability = (void *) - __cb_get_rotation_capability; - tizen_private->set_window_serial_callback = (void *) - __cb_set_window_serial_callback; - tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd; - tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd; + wl_egl_surface->rotation = rotation; +} - wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback; - wl_egl_window->resize_callback = (void *)__cb_resize_callback; - } +/* There is no usecase for using prerotation callback below */ +static int +__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window, + void *private) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); + + int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE; + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return rotation_capability; } - tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex); - tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex); + if (wl_egl_surface->prerotation_capability == TPL_TRUE) + rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED; + else + rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED; - tpl_gmutex_init(&wl_egl_surface->free_queue_mutex); - tpl_gmutex_init(&wl_egl_surface->surf_mutex); - tpl_gcond_init(&wl_egl_surface->free_queue_cond); - tpl_gcond_init(&wl_egl_surface->surf_cond); - /* Initialize in thread */ - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - tpl_gsource_send_message(wl_egl_surface->surf_source, 1); - tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + return rotation_capability; +} - TPL_ASSERT(wl_egl_surface->tbm_queue); +static void +__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window, + void *private, unsigned int serial) +{ + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); - TPL_INFO("[SURFACE_INIT]", - "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)", - surface, wl_egl_surface, wl_egl_surface->surf_source); + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - return TPL_ERROR_NONE; + if (!wl_egl_surface) { + TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", + wl_egl_window); + return; + } -surf_source_create_fail: - free(wl_egl_surface); - surface->backend.data = NULL; - return TPL_ERROR_INVALID_OPERATION; + wl_egl_surface->set_serial_is_used = TPL_TRUE; + wl_egl_surface->serial = serial; } -static tbm_surface_queue_h -_thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface, - struct wayland_tbm_client *wl_tbm_client, - int num_buffers) +static int +__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) { - tbm_surface_queue_h tbm_queue = NULL; - tbm_bufmgr bufmgr = NULL; - unsigned int capability; + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); - struct wl_surface *wl_surface = wl_egl_surface->wl_surface; - int width = wl_egl_surface->width; - int height = wl_egl_surface->height; - int format = wl_egl_surface->format; + int commit_sync_fd = -1; - if (!wl_tbm_client || !wl_surface) { - TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)", - wl_tbm_client, wl_surface); - return NULL; + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid parameter. wl_egl_surface(%p) is NULL", wl_egl_surface); + return -1; } - bufmgr = tbm_bufmgr_init(-1); - capability = tbm_bufmgr_get_capability(bufmgr); - tbm_bufmgr_deinit(bufmgr); + tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); - if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) { - tbm_queue = wayland_tbm_client_create_surface_queue_tiled( - wl_tbm_client, - wl_surface, - num_buffers, - width, - height, - format); - } else { - tbm_queue = wayland_tbm_client_create_surface_queue( - wl_tbm_client, - wl_surface, - num_buffers, - width, - height, - format); + if (wl_egl_surface->commit_sync.fd != -1) { + commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); + TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)", + wl_egl_surface->commit_sync.fd, commit_sync_fd); + TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)", + wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd); + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + return commit_sync_fd; } - if (tbm_queue) { - TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)", - wl_tbm_client); - return NULL; + wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC); + if (wl_egl_surface->commit_sync.fd == -1) { + TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)", wl_egl_surface); + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + return -1; } - if (tbm_surface_queue_set_modes( - tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) != - TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)", - tbm_queue); - tbm_surface_queue_destroy(tbm_queue); - return NULL; - } + commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); - if (tbm_surface_queue_add_reset_cb( - tbm_queue, - __cb_tbm_queue_reset_callback, - (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", - tbm_queue); - tbm_surface_queue_destroy(tbm_queue); - return NULL; - } + TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)", + wl_egl_surface->commit_sync.fd, commit_sync_fd); + TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)", + wl_egl_surface, commit_sync_fd); - if (tbm_surface_queue_add_acquirable_cb( - tbm_queue, - __cb_tbm_queue_acquirable_callback, - (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", - tbm_queue); - tbm_surface_queue_destroy(tbm_queue); - return NULL; - } + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); - return tbm_queue; + return commit_sync_fd; } -static tdm_client_vblank* -_thread_create_tdm_client_vblank(tdm_client *tdm_client) +static int +__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private) { - tdm_client_vblank *vblank = NULL; - tdm_client_output *tdm_output = NULL; - tdm_error tdm_err = TDM_ERROR_NONE; + TPL_ASSERT(private); + TPL_ASSERT(wl_egl_window); - if (!tdm_client) { - TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client); - return NULL; + int presentation_sync_fd = -1; + + struct tizen_private *tizen_private = (struct tizen_private *)private; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + + if (!wl_egl_surface) { + TPL_ERR("Invalid parameter. wl_egl_surface is NULL"); + return -1; } - tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err); - if (!tdm_output || tdm_err != TDM_ERROR_NONE) { - TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err); - return NULL; + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + if (wl_egl_surface->presentation_sync.fd != -1) { + presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); + TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)", + wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", + wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + return presentation_sync_fd; } - vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err); - if (!vblank || tdm_err != TDM_ERROR_NONE) { - TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err); - return NULL; + wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC); + if (wl_egl_surface->presentation_sync.fd == -1) { + TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)", wl_egl_surface); + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + return -1; } - tdm_client_vblank_set_enable_fake(vblank, 1); - tdm_client_vblank_set_sync(vblank, 0); + presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); + TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)", + wl_egl_surface->presentation_sync.fd, presentation_sync_fd); + TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", + wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); - return vblank; + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + + return presentation_sync_fd; } +/* -- END -- wl_egl_window tizen private callback functions */ -static void -_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface) +/* -- BEGIN -- tizen_surface_shm_flusher_listener */ +static void __cb_tss_flusher_flush_callback(void *data, + struct tizen_surface_shm_flusher *tss_flusher) { - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - wl_egl_surface->tbm_queue = _thread_create_tbm_queue( - wl_egl_surface->wl_surface, - wl_egl_display->wl_tbm_client, - wl_egl_surface->width, - wl_egl_surface->height, - wl_egl_surface->format, - CLIENT_QUEUE_SIZE); - if (!wl_egl_surface->tbm_queue) { - TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)", - wl_egl_surface, wl_egl_display->wl_tbm_client); - return; - } + TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); - TPL_INFO("[QUEUE_CREATION]", - "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)", - wl_egl_surface, wl_egl_surface->wl_surface, - wl_egl_display->wl_tbm_client); - TPL_INFO("[QUEUE_CREATION]", - "tbm_queue(%p) size(%d x %d) X %d format(%d)", - wl_egl_surface->tbm_queue, - wl_egl_surface->width, - wl_egl_surface->height, - CLIENT_QUEUE_SIZE, - wl_egl_surface->format); - - wl_egl_surface->vblank = _thread_create_tdm_client_vblank( - wl_egl_display->tdm_client); - if (wl_egl_surface->vblank) { - TPL_INFO("[VBLANK_INIT]", - "wl_egl_surface(%p) tdm_client(%p) vblank(%p)", - wl_egl_surface, wl_egl_display->tdm_client, - wl_egl_surface->vblank); + tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue); + return; } +} - if (wl_egl_display->tss) { - wl_egl_surface->tss_flusher = - tizen_surface_shm_get_flusher(wl_egl_display->tss, - wl_egl_surface->wl_surface); - } +static void __cb_tss_flusher_free_flush_callback(void *data, + struct tizen_surface_shm_flusher *tss_flusher) +{ + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - if (wl_egl_surface->tss_flusher) { - tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher, - &tss_flusher_listener, - wl_egl_surface); - TPL_INFO("[FLUSHER_INIT]", - "wl_egl_surface(%p) tss_flusher(%p)", - wl_egl_surface, wl_egl_surface->tss_flusher); - } + TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); - if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) { - wl_egl_surface->surface_sync = - zwp_linux_explicit_synchronization_v1_get_synchronization( - wl_egl_display->explicit_sync, wl_egl_surface->wl_surface); - if (wl_egl_surface->surface_sync) { - TPL_INFO("[EXPLICIT_SYNC_INIT]", - "wl_egl_surface(%p) surface_sync(%p)", - wl_egl_surface, wl_egl_surface->surface_sync); - } else { - TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)", - wl_egl_surface); - wl_egl_display->use_explicit_sync = TPL_FALSE; - } + tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue); + return; } - - wl_egl_surface->committed_buffers = __tpl_list_alloc(); - wl_egl_surface->in_use_buffers = __tpl_list_alloc(); - wl_egl_surface->fence_waiting_buffers = __tpl_list_alloc(); - wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc(); - wl_egl_surface->presentation_feedbacks = __tpl_list_alloc(); } +static const struct tizen_surface_shm_flusher_listener +tss_flusher_listener = { + __cb_tss_flusher_flush_callback, + __cb_tss_flusher_free_flush_callback +}; +/* -- END -- tizen_surface_shm_flusher_listener */ + + +/* -- BEGIN -- tbm_surface_queue callback funstions */ static void -__tpl_wl_egl_surface_fini(tpl_surface_t *surface) +__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, + void *data) { tpl_wl_egl_surface_t *wl_egl_surface = NULL; tpl_wl_egl_display_t *wl_egl_display = NULL; + tpl_surface_t *surface = NULL; + tpl_bool_t is_activated = TPL_FALSE; + int width, height; - TPL_ASSERT(surface); - TPL_ASSERT(surface->display); - - TPL_CHECK_ON_NULL_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW); - - wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data; + wl_egl_surface = (tpl_wl_egl_surface_t *)data; TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); wl_egl_display = wl_egl_surface->wl_egl_display; TPL_CHECK_ON_NULL_RETURN(wl_egl_display); - TPL_INFO("[SURFACE_FINI][BEGIN]", - "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", - wl_egl_surface, - wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue); - - if (wl_egl_surface->surf_source) - tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); - wl_egl_surface->surf_source = NULL; + surface = wl_egl_surface->tpl_surface; + TPL_CHECK_ON_NULL_RETURN(surface); - if (wl_egl_surface->wl_egl_window) { - struct tizen_private *tizen_private = NULL; - struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; - TPL_INFO("[WL_EGL_WINDOW_FINI]", - "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", - wl_egl_surface, wl_egl_window, - wl_egl_surface->wl_surface); - tizen_private = (struct tizen_private *)wl_egl_window->driver_private; - if (tizen_private) { - tizen_private->set_window_serial_callback = NULL; - tizen_private->rotate_callback = NULL; - tizen_private->get_rotation_capability = NULL; - tizen_private->create_presentation_sync_fd = NULL; - tizen_private->create_commit_sync_fd = NULL; - tizen_private->set_frontbuffer_callback = NULL; - tizen_private->merge_sync_fds = NULL; - tizen_private->data = NULL; - free(tizen_private); + /* When the queue is resized, change the reset flag to TPL_TRUE to reflect + * the changed window size at the next frame. */ + width = tbm_surface_queue_get_width(tbm_queue); + height = tbm_surface_queue_get_height(tbm_queue); + if (surface->width != width || surface->height != height) { + TPL_INFO("[QUEUE_RESIZE]", + "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)", + wl_egl_surface, tbm_queue, + surface->width, surface->height, width, height); + } - wl_egl_window->dirver_private = NULL; + /* When queue_reset_callback is called, if is_activated is different from + * its previous state change the reset flag to TPL_TRUE to get a new buffer + * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ + is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client, + wl_egl_surface->tbm_queue); + if (wl_egl_surface->is_activated != is_activated) { + if (is_activated) { + TPL_INFO("[ACTIVATED]", + "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); + } else { + TPL_LOG_T("[DEACTIVATED]", + " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); } - - wl_egl_window->destroy_window_callback = NULL; - wl_egl_window->resize_callback = NULL; - - wl_egl_surface->wl_egl_window = NULL; } - wl_egl_surface->wl_surface = NULL; - wl_egl_surface->wl_egl_display = NULL; - wl_egl_surface->tpl_surface = NULL; - - tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); - tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex); + wl_egl_surface->reset = TPL_TRUE; - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex); + if (surface->reset_cb) + surface->reset_cb(surface->reset_data); +} - tpl_gmutex_lock(&wl_egl_surface->free_queue_mutex); - tpl_gmutex_unlock(&wl_egl_surface->free_queue_mutex); - tpl_gmutex_clear(&wl_egl_surface->free_queue_cond); +static void +__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue, + void *data) +{ + TPL_IGNORE(tbm_queue); - tpl_gmutex_clear(&wl_egl_surface->surf_mutex); - tpl_gcond_clear(&wl_egl_surface->surf_cond); + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); - g_cond_clear(&wl_egl_surface->free_queue_cond); - g_mutex_clear(&wl_egl_surface->free_queue_mutex); + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface); + tpl_gsource_send_message(wl_egl_surface->surf_source, 2); - free(wl_egl_surface); - surface->backend.data = NULL; + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } +/* -- END -- tbm_surface_queue callback funstions */ static void _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) @@ -1470,9 +1440,6 @@ static tpl_bool_t __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) { tpl_wl_egl_surface_t *wl_egl_surface = NULL; - tpl_result_t res = TPL_ERROR_NONE; - ssize_t s; - uint64_t message = 0; wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource); @@ -1482,7 +1449,7 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) TPL_DEBUG("wl_egl_surface(%p) initialize message received!", wl_egl_surface); _thread_wl_egl_surface_init(wl_egl_surface); - tpl_gcond_signal(wl_egl_surface->surf_cond); + tpl_gcond_signal(&wl_egl_surface->surf_cond); tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } else if (message == 2) { tpl_gmutex_lock(&wl_egl_surface->surf_mutex); @@ -1505,7 +1472,7 @@ __thread_func_surf_finalize(tpl_gsource *gsource) _thread_wl_egl_surface_fini(wl_egl_surface); - TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%d)", + TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%p)", gsource, wl_egl_surface); } @@ -1517,832 +1484,858 @@ static tpl_gsource_functions surf_funcs = { }; static tpl_result_t -__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface, - tpl_bool_t set) +__tpl_wl_egl_surface_init(tpl_surface_t *surface) { - tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = NULL; + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tpl_gsource *surf_source = NULL; - TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); + struct wl_egl_window *wl_egl_window = + (struct wl_egl_window *)surface->native_handle; - wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); + TPL_ASSERT(surface->native_handle); - TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); + wl_egl_display = + (tpl_wl_egl_display_t *)surface->display->backend.data; + if (!wl_egl_display) { + TPL_ERR("Invalid parameter. wl_egl_display(%p)", + wl_egl_display); + return TPL_ERROR_INVALID_PARAMETER; + } - TPL_INFO("[SET_PREROTATION_CAPABILITY]", - "wl_egl_surface(%p) prerotation capability set to [%s]", - wl_egl_surface, (set ? "TRUE" : "FALSE")); + wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1, + sizeof(tpl_wl_egl_surface_t)); + if (!wl_egl_surface) { + TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } - wl_egl_surface->prerotation_capability = set; - return TPL_ERROR_NONE; -} + surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface, + -1, &surf_funcs, SOURCE_TYPE_NORMAL); + if (!surf_source) { + TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)", + wl_egl_surface); + goto surf_source_create_fail; + } -static tpl_result_t -__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface, - int post_interval) -{ - tpl_wl_egl_surface_t *wl_egl_surface = NULL; + surface->backend.data = (void *)wl_egl_surface; + surface->width = wl_egl_window->width; + surface->height = wl_egl_window->height; + surface->rotation = 0; - TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); + wl_egl_surface->tpl_surface = surface; + wl_egl_surface->width = wl_egl_window->width; + wl_egl_surface->height = wl_egl_window->height; + wl_egl_surface->format = surface->format; - wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; + wl_egl_surface->surf_source = surf_source; + wl_egl_surface->wl_egl_window = wl_egl_window; + wl_egl_surface->wl_surface = wl_egl_window->surface; - TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); + wl_egl_surface->wl_egl_display = wl_egl_display; - TPL_INFO("[SET_POST_INTERVAL]", - "wl_egl_surface(%p) post_interval(%d -> %d)", - wl_egl_surface, wl_egl_surface->post_interval, post_interval); + wl_egl_surface->reset = TPL_FALSE; + wl_egl_surface->is_activated = TPL_FALSE; + wl_egl_surface->need_to_enqueue = TPL_FALSE; + wl_egl_surface->prerotation_capability = TPL_FALSE; + wl_egl_surface->vblank_done = TPL_TRUE; + wl_egl_surface->use_render_done_fence = TPL_FALSE; + wl_egl_surface->set_serial_is_used = TPL_FALSE; - wl_egl_surface->post_interval = post_interval; + wl_egl_surface->latest_transform = 0; + wl_egl_surface->render_done_cnt = 0; + wl_egl_surface->serial = 0; - return TPL_ERROR_NONE; -} + wl_egl_surface->vblank = NULL; + wl_egl_surface->tss_flusher = NULL; + wl_egl_surface->surface_sync = NULL; -static tpl_bool_t -__tpl_wl_egl_surface_validate(tpl_surface_t *surface) -{ - tpl_bool_t retval = TPL_TRUE; - - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)surface->backend.data; - - retval = !(wl_egl_surface->reset); + wl_egl_surface->post_interval = surface->post_interval; - return retval; -} + wl_egl_surface->commit_sync.fd = -1; + wl_egl_surface->presentation_sync.fd = -1; -void -__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) -{ - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)surface->backend.data; + { + struct tizen_private *tizen_private = NULL; - if (width) - *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); - if (height) - *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); -} + if (wl_egl_window->driver_private) + tizen_private = (struct tizen_private *)wl_egl_window->driver_private; + else { + tizen_private = tizen_private_create(); + wl_egl_window->driver_private = (void *)tizen_private; + } -#define CAN_DEQUEUE_TIMEOUT_MS 10000 + if (tizen_private) { + tizen_private->data = (void *)wl_egl_surface; + tizen_private->rotate_callback = (void *)__cb_rotate_callback; + tizen_private->get_rotation_capability = (void *) + __cb_get_rotation_capability; + tizen_private->set_window_serial_callback = (void *) + __cb_set_window_serial_callback; + tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd; + tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd; -tpl_result_t -_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) -{ - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback; + wl_egl_window->resize_callback = (void *)__cb_resize_callback; + } + } - _print_buffer_lists(wl_egl_surface); + tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex); + tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex); - if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue)) - != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)", - wl_egl_surface->tbm_queue, tsq_err); - return TPL_ERROR_INVALID_OPERATION; - } + tpl_gmutex_init(&wl_egl_surface->surf_mutex); + tpl_gcond_init(&wl_egl_surface->surf_cond); + /* Initialize in thread */ tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - if (wl_egl_surface->committed_buffers) { - while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) { - tbm_surface_h tbm_surface = - __tpl_list_pop_front(wl_egl_surface->committed_buffers, - (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", - tbm_surface, tsq_err); - } - } + tpl_gsource_send_message(wl_egl_surface->surf_source, 1); + tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex); tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - TPL_INFO("[FORCE_FLUSH]", - "wl_egl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->tbm_queue); + TPL_ASSERT(wl_egl_surface->tbm_queue); + + TPL_INFO("[SURFACE_INIT]", + "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)", + surface, wl_egl_surface, wl_egl_surface->surf_source); return TPL_ERROR_NONE; + +surf_source_create_fail: + free(wl_egl_surface); + surface->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; } -static void -_wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer, - tpl_wl_egl_surface_t *wl_egl_surface) +static tbm_surface_queue_h +_thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface, + struct wayland_tbm_client *wl_tbm_client, + int num_buffers) { - struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; - struct tizen_private *tizen_private = - (struct tizen_private *)wl_egl_window->driver_private; + tbm_surface_queue_h tbm_queue = NULL; + tbm_bufmgr bufmgr = NULL; + unsigned int capability; - TPL_ASSERT(tizen_private); + struct wl_surface *wl_surface = wl_egl_surface->wl_surface; + int width = wl_egl_surface->width; + int height = wl_egl_surface->height; + int format = wl_egl_surface->format; - wl_egl_buffer->draw_done = TPL_FALSE; - wl_egl_buffer->need_to_commit = TPL_TRUE; + if (!wl_tbm_client || !wl_surface) { + TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)", + wl_tbm_client, wl_surface); + return NULL; + } - wl_egl_buffer->acquire_fence_fd = -1; - wl_egl_buffer->release_fence_fd = -1; - wl_egl_buffer->commit_sync_fd = -1; - wl_egl_buffer->presentation_sync_fd = -1; + bufmgr = tbm_bufmgr_init(-1); + capability = tbm_bufmgr_get_capability(bufmgr); + tbm_bufmgr_deinit(bufmgr); - wl_egl_buffer->presentation_feedback = NULL; - wl_egl_buffer->buffer_release = NULL; + if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) { + tbm_queue = wayland_tbm_client_create_surface_queue_tiled( + wl_tbm_client, + wl_surface, + num_buffers, + width, + height, + format); + } else { + tbm_queue = wayland_tbm_client_create_surface_queue( + wl_tbm_client, + wl_surface, + num_buffers, + width, + height, + format); + } - wl_egl_buffer->transform = tizen_private->transform; + if (tbm_queue) { + TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)", + wl_tbm_client); + return NULL; + } - if (wl_egl_buffer->w_transform != tizen_private->window_transform) { - wl_egl_buffer->w_transform = tizen_private->window_transform; - wl_egl_buffer->w_rotated = TPL_TRUE; + if (tbm_surface_queue_set_modes( + tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) != + TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return NULL; } - if (wl_egl_surface->set_serial_is_used) { - wl_egl_buffer->serial = wl_egl_surface->serial; - } else { - wl_egl_buffer->serial = ++tizen_private->serial; + if (tbm_surface_queue_add_reset_cb( + tbm_queue, + __cb_tbm_queue_reset_callback, + (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return NULL; } - if (wl_egl_buffer->rects) { - free(wl_egl_buffer->rects); - wl_egl_buffer->rects = NULL; - wl_egl_buffer->num_rects = 0; + if (tbm_surface_queue_add_acquirable_cb( + tbm_queue, + __cb_tbm_queue_acquirable_callback, + (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", + tbm_queue); + tbm_surface_queue_destroy(tbm_queue); + return NULL; } -} -static tpl_wl_egl_buffer_t * -_get_wl_egl_buffer(tbm_surface_h tbm_surface) -{ - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER, - (void **)&wl_egl_buffer); - return wl_egl_buffer; + return tbm_queue; } -static tpl_wl_egl_buffer_t * -_wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, - tbm_surface_h tbm_surface) +static tdm_client_vblank* +_thread_create_tdm_client_vblank(tdm_client *tdm_client) { - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - tpl_bool_t is_new_buffer = TPL_FALSE; - - wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); - - if (!wl_egl_buffer) { - wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t)); - TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL); - - tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER, - (tbm_data_free)__cb_wl_egl_buffer_free); - tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER, - wl_egl_buffer); - is_new_buffer = TPL_TRUE; + tdm_client_vblank *vblank = NULL; + tdm_client_output *tdm_output = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; - wl_egl_buffer->wl_buffer = NULL; - wl_egl_buffer->tbm_surface = tbm_surface; - wl_egl_buffer->wl_egl_surface = wl_egl_surface; + if (!tdm_client) { + TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client); + return NULL; + } - wl_egl_buffer->dx = wl_egl_window->dx; - wl_egl_buffer->dy = wl_egl_window->dy; - wl_egl_buffer->width = tbm_surface_get_width(tbm_surface); - wl_egl_buffer->height = tbm_surface_get_height(tbm_surface); + tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err); + if (!tdm_output || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err); + return NULL; + } - TPL_INFO("[WL_EGL_BUFFER_CREATE]", - "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)", - wl_egl_surface, wl_egl_buffer, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); + vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err); + if (!vblank || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err); + return NULL; } - _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface); + tdm_client_vblank_set_enable_fake(vblank, 1); + tdm_client_vblank_set_sync(vblank, 0); - return wl_egl_buffer; + return vblank; } -static tbm_surface_h -__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, - int32_t *release_fence) +static void +_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface) { - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->display->backend.data); - TPL_OBJECT_CHECK_RETURN(surface, NULL); + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)surface->backend.data; - tpl_wl_egl_display_t *wl_egl_display = - (tpl_wl_egl_display_t *)surface->display->backend.data; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + wl_egl_surface->tbm_queue = _thread_create_tbm_queue( + wl_egl_surface, + wl_egl_display->wl_tbm_client, + CLIENT_QUEUE_SIZE); + if (!wl_egl_surface->tbm_queue) { + TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)", + wl_egl_surface, wl_egl_display->wl_tbm_client); + return; + } - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tpl_bool_t is_activated = 0; - int bo_name = 0; - tbm_surface_h tbm_surface = NULL; + TPL_INFO("[QUEUE_CREATION]", + "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)", + wl_egl_surface, wl_egl_surface->wl_surface, + wl_egl_display->wl_tbm_client); + TPL_INFO("[QUEUE_CREATION]", + "tbm_queue(%p) size(%d x %d) X %d format(%d)", + wl_egl_surface->tbm_queue, + wl_egl_surface->width, + wl_egl_surface->height, + CLIENT_QUEUE_SIZE, + wl_egl_surface->format); - TPL_OBJECT_UNLOCK(surface); - tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( - wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS); - TPL_OBJECT_LOCK(surface); + wl_egl_surface->vblank = _thread_create_tdm_client_vblank( + wl_egl_display->tdm_client); + if (wl_egl_surface->vblank) { + TPL_INFO("[VBLANK_INIT]", + "wl_egl_surface(%p) tdm_client(%p) vblank(%p)", + wl_egl_surface, wl_egl_display->tdm_client, + wl_egl_surface->vblank); + } - /* After the can dequeue state, lock the wl_event_mutex to prevent other - * events from being processed in wayland_egl_thread - * during below dequeue procedure. */ - tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); + if (wl_egl_display->tss) { + wl_egl_surface->tss_flusher = + tizen_surface_shm_get_flusher(wl_egl_display->tss, + wl_egl_surface->wl_surface); + } - if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { - TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", - wl_egl_surface->tbm_queue, surface); - if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) { - TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)", - wl_egl_surface->tbm_queue, surface); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - return NULL; + if (wl_egl_surface->tss_flusher) { + tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher, + &tss_flusher_listener, + wl_egl_surface); + TPL_INFO("[FLUSHER_INIT]", + "wl_egl_surface(%p) tss_flusher(%p)", + wl_egl_surface, wl_egl_surface->tss_flusher); + } + + if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) { + wl_egl_surface->surface_sync = + zwp_linux_explicit_synchronization_v1_get_synchronization( + wl_egl_display->explicit_sync, wl_egl_surface->wl_surface); + if (wl_egl_surface->surface_sync) { + TPL_INFO("[EXPLICIT_SYNC_INIT]", + "wl_egl_surface(%p) surface_sync(%p)", + wl_egl_surface, wl_egl_surface->surface_sync); } else { - tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)", + wl_egl_surface); + wl_egl_display->use_explicit_sync = TPL_FALSE; } } - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)", - wl_egl_surface->tbm_queue, surface); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - return NULL; - } + wl_egl_surface->committed_buffers = __tpl_list_alloc(); + wl_egl_surface->in_use_buffers = __tpl_list_alloc(); + wl_egl_surface->fence_waiting_buffers = __tpl_list_alloc(); + wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc(); + wl_egl_surface->presentation_feedbacks = __tpl_list_alloc(); +} - /* wayland client can check their states (ACTIVATED or DEACTIVATED) with - * below function [wayland_tbm_client_queue_check_activate()]. - * This function has to be called before tbm_surface_queue_dequeue() - * in order to know what state the buffer will be dequeued next. - * - * ACTIVATED state means non-composite mode. Client can get buffers which - can be displayed directly(without compositing). - * DEACTIVATED state means composite mode. Client's buffer will be displayed - by compositor(E20) with compositing. - */ - is_activated = wayland_tbm_client_queue_check_activate( - wl_egl_display->wl_tbm_client, - wl_egl_surface->tbm_queue); +static void +__tpl_wl_egl_surface_fini(tpl_surface_t *surface) +{ + tpl_wl_egl_surface_t *wl_egl_surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = NULL; - wl_egl_surface->is_activated = is_activated; + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); - surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); - surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); - wl_egl_surface->width = surface->width; - wl_egl_surface->height = surface->height; + TPL_CHECK_ON_TRUE_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW); - if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) { - /* If surface->frontbuffer is already set in frontbuffer mode, - * it will return that frontbuffer if it is still activated, - * otherwise dequeue the new buffer after initializing - * surface->frontbuffer to NULL. */ - if (is_activated && !wl_egl_surface->reset) { - bo_name = _get_tbm_surface_bo_name(surface->frontbuffer); + wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); - TPL_LOG_T("WL_EGL", - "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", - surface->frontbuffer, bo_name); - TRACE_ASYNC_BEGIN((int)surface->frontbuffer, - "[DEQ]~[ENQ] BO_NAME:%d", - bo_name); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - return surface->frontbuffer; - } else { - surface->frontbuffer = NULL; - wl_egl_surface->need_to_enqueue = TPL_TRUE; - } - } else { - surface->frontbuffer = NULL; - } + wl_egl_display = wl_egl_surface->wl_egl_display; + TPL_CHECK_ON_NULL_RETURN(wl_egl_display); - tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue, - &tbm_surface); - if (!tbm_surface) { - TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d", - wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); - return NULL; - } + TPL_INFO("[SURFACE_FINI][BEGIN]", + "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", + wl_egl_surface, + wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue); - tbm_surface_internal_ref(tbm_surface); + if (wl_egl_surface->surf_source) + tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE); + wl_egl_surface->surf_source = NULL; - bo_name = _get_tbm_surface_bo_name(tbm_surface); + if (wl_egl_surface->wl_egl_window) { + struct tizen_private *tizen_private = NULL; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + TPL_INFO("[WL_EGL_WINDOW_FINI]", + "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)", + wl_egl_surface, wl_egl_window, + wl_egl_surface->wl_surface); + tizen_private = (struct tizen_private *)wl_egl_window->driver_private; + if (tizen_private) { + tizen_private->set_window_serial_callback = NULL; + tizen_private->rotate_callback = NULL; + tizen_private->get_rotation_capability = NULL; + tizen_private->create_presentation_sync_fd = NULL; + tizen_private->create_commit_sync_fd = NULL; + tizen_private->set_frontbuffer_callback = NULL; + tizen_private->merge_sync_fds = NULL; + tizen_private->data = NULL; + free(tizen_private); - wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface); - TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer."); + wl_egl_window->driver_private = NULL; + } - /* If wl_egl_buffer->release_fence_fd is -1, - * the tbm_surface can be used immediately. - * If not, user(EGL) have to wait until signaled. */ - if (release_fence && wl_egl_surface->surface_sync) { - *release_fence = wl_egl_buffer->release_fence_fd; - TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)", - wl_egl_surface, wl_egl_buffer, *release_fence); + wl_egl_window->destroy_window_callback = NULL; + wl_egl_window->resize_callback = NULL; + + wl_egl_surface->wl_egl_window = NULL; } - if (surface->is_frontbuffer_mode && is_activated) - surface->frontbuffer = tbm_surface; + wl_egl_surface->wl_surface = NULL; + wl_egl_surface->wl_egl_display = NULL; + wl_egl_surface->tpl_surface = NULL; - wl_egl_surface->reset = TPL_FALSE; + tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex); - TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name); - TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", - tbm_surface, bo_name, release_fence ? *release_fence : -1); + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex); - tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + tpl_gmutex_clear(&wl_egl_surface->surf_mutex); + tpl_gcond_clear(&wl_egl_surface->surf_cond); - return tbm_surface; + TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface); + + free(wl_egl_surface); + surface->backend.data = NULL; } static tpl_result_t -__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface) +__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface, + tpl_bool_t set) { - TPL_ASSERT(surface); - TPL_ASSERT(surface->backend.data); - - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *)surface->backend.data; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); - return TPL_ERROR_INVALID_PARAMETER; - } + tpl_wl_egl_surface_t *wl_egl_surface = NULL; - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - /* Stop tracking of this canceled tbm_surface */ - __tpl_list_remove_data(wl_egl_surface->in_use_buffers, - (void *)tbm_surface, TPL_FIRST, NULL); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); - tbm_surface_internal_unref(tbm_surface); + wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to release tbm_surface(%p) surface(%p)", - tbm_surface, surface); - return TPL_ERROR_INVALID_OPERATION; - } + TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); - TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] wl_egl_surface(%p) tbm_surface(%p) bo(%d)", - wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); + TPL_INFO("[SET_PREROTATION_CAPABILITY]", + "wl_egl_surface(%p) prerotation capability set to [%s]", + wl_egl_surface, (set ? "TRUE" : "FALSE")); + wl_egl_surface->prerotation_capability = set; return TPL_ERROR_NONE; } static tpl_result_t -__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, - tbm_surface_h tbm_surface, - int num_rects, const int *rects, int32_t acquire_fence) +__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface, + int post_interval) { - TPL_ASSERT(surface); - TPL_ASSERT(surface->display); - TPL_ASSERT(surface->backend.data); - TPL_ASSERT(tbm_surface); - TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); + tpl_wl_egl_surface_t *wl_egl_surface = NULL; - tpl_wl_egl_surface_t *wl_egl_surface = - (tpl_wl_egl_surface_t *) surface->backend.data; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tpl_result_t ret = TPL_ERROR_NONE; - int bo_name = -1; + TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER); - if (!tbm_surface_internal_is_valid(tbm_surface)) { - TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", - tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - return TPL_ERROR_INVALID_PARAMETER; - } + wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; - bo_name = _get_tbm_surface_bo_name(tbm_surface); + TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER); - TRACE_MARK("[ENQ] BO_NAME:%d", bo_name); + TPL_INFO("[SET_POST_INTERVAL]", + "wl_egl_surface(%p) post_interval(%d -> %d)", + wl_egl_surface, wl_egl_surface->post_interval, post_interval); - TPL_LOG_T("WL_EGL", - "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)", - wl_egl_surface, tbm_surface, bo_name, acquire_fence); + wl_egl_surface->post_interval = post_interval; - wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + return TPL_ERROR_NONE; +} - /* If there are received region information, save it to wl_egl_buffer */ - if (num_rects && rects) { - if (wl_egl_buffer->rects != NULL) { - free(wl_egl_buffer->rects); - wl_egl_buffer->rects = NULL; - wl_egl_buffer->num_rects = 0; - } +static tpl_bool_t +__tpl_wl_egl_surface_validate(tpl_surface_t *surface) +{ + tpl_bool_t retval = TPL_TRUE; - wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects)); - wl_egl_buffer->num_rects = num_rects; + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); - if (!wl_egl_buffer->rects) { - TPL_ERR("Failed to allocate memory fo damage rects info."); - return TPL_ERROR_OUT_OF_MEMORY; - } + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; - memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects); - } + retval = !(wl_egl_surface->reset); - if (!wl_egl_surface->need_to_enqueue || - !wl_egl_buffer->need_to_commit) { - TPL_WARN("WL_EGL", - "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", - ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); - return TPL_ERROR_NONE; - } + return retval; +} - /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and - * commit if surface->frontbuffer that is already set and the tbm_surface - * client want to enqueue are the same. - */ - if (surface->is_frontbuffer_mode) { - /* The first buffer to be activated in frontbuffer mode must be - * committed. Subsequence frames do not need to be committed because - * the buffer is already displayed. - */ - if (surface->frontbuffer == tbm_surface) - wl_egl_surface->need_to_enqueue = TPL_FALSE; +static void +__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height) +{ + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; - if (acquire_fence != -1) { - close(acquire_fence); - acquire_fence = -1; - } - } + if (width) + *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); + if (height) + *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); +} - if (wl_egl_buffer->acquire_fence_fd != -1) - close(wl_egl_buffer->acquire_fence_fd); - - wl_egl_buffer->acquire_fence_fd = acquire_fence; +#define CAN_DEQUEUE_TIMEOUT_MS 10000 - tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - tbm_surface_internal_unref(tbm_surface); - TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d", - tbm_surface, wl_egl_surface, tsq_err); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); +tpl_result_t +_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) +{ + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + + _print_buffer_lists(wl_egl_surface); + + if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue)) + != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)", + wl_egl_surface->tbm_queue, tsq_err); return TPL_ERROR_INVALID_OPERATION; } - tbm_surface_internal_unref(tbm_surface); + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + if (wl_egl_surface->committed_buffers) { + while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) { + tbm_surface_h tbm_surface = + __tpl_list_pop_front(wl_egl_surface->committed_buffers, + (tpl_free_func_t)__cb_buffer_remove_from_list); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + tbm_surface, tsq_err); + } + } + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_INFO("[FORCE_FLUSH]", + "wl_egl_surface(%p) tbm_queue(%p)", + wl_egl_surface, wl_egl_surface->tbm_queue); return TPL_ERROR_NONE; } -static tpl_result_t -_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) +static void +_wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer, + tpl_wl_egl_surface_t *wl_egl_surface) { - tbm_surface_h tbm_surface = NULL; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - tpl_bool_t ready_to_commit = TPL_FALSE; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + struct tizen_private *tizen_private = + (struct tizen_private *)wl_egl_window->driver_private; - while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) { - tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue, - &tbm_surface); - if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to acquire from tbm_queue(%p)", - wl_egl_surface->tbm_queue); - return TPL_ERROR_INVALID_OPERATION; - } + TPL_ASSERT(tizen_private); - tbm_surface_internal_ref(tbm_surface); + wl_egl_buffer->draw_done = TPL_FALSE; + wl_egl_buffer->need_to_commit = TPL_TRUE; - wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); - TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, - "wl_egl_buffer sould be not NULL"); + wl_egl_buffer->acquire_fence_fd = -1; + wl_egl_buffer->release_fence_fd = -1; + wl_egl_buffer->commit_sync_fd = -1; + wl_egl_buffer->presentation_sync_fd = -1; - if (wl_egl_buffer->wl_buffer == NULL) { - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - wl_egl_buffer->wl_buffer = - (struct wl_proxy *)wayland_tbm_client_create_buffer( - wl_egl_display->wl_tbm_client, tbm_surface); + wl_egl_buffer->presentation_feedback = NULL; + wl_egl_buffer->buffer_release = NULL; - if (!wl_egl_buffer->wl_buffer) { - TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)", - wl_egl_display->wl_tbm_client, tbm_surface); - } - } + wl_egl_buffer->transform = tizen_private->transform; - if (wl_egl_buffer->acquire_fence_fd != -1) { - if (wl_egl_surface->surface_sync) - ready_to_commit = TPL_TRUE; - else { - if (wl_egl_buffer->waiting_source) { - tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); - wl_egl_buffer->waiting_source = NULL; - } + if (wl_egl_buffer->w_transform != tizen_private->window_transform) { + wl_egl_buffer->w_transform = tizen_private->window_transform; + wl_egl_buffer->w_rotated = TPL_TRUE; + } - wl_egl_buffer->waiting_source = - tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer, - wl_egl_buffer->acquire_fence_fd, buffer_funcs, - SOURCE_TYPE_DISPOSABLE); + if (wl_egl_surface->set_serial_is_used) { + wl_egl_buffer->serial = wl_egl_surface->serial; + } else { + wl_egl_buffer->serial = ++tizen_private->serial; + } - __tpl_list_push_back(wl_egl_surface->fence_waiting_buffers, tbm_surface); + if (wl_egl_buffer->rects) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; + } +} - TRACE_ASYNC_BEGIN(wl_egl_buffer, "FENCE WAIT fd(%d)", - wl_egl_buffer->acquire_fence_fd); +static tpl_wl_egl_buffer_t * +_get_wl_egl_buffer(tbm_surface_h tbm_surface) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER, + (void **)&wl_egl_buffer); + return wl_egl_buffer; +} - ready_to_commit = TPL_FALSE; - } - } +static tpl_wl_egl_buffer_t * +_wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, + tbm_surface_h tbm_surface) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; - if (ready_to_commit) { - if (wl_egl_surface->vblank_done) - ready_to_commit = TPL_TRUE; - else { - __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, tbm_surface); - ready_to_commit = TPL_FALSE; - } - } + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); - if (ready_to_commit) - _thread_wl_surface_commit(wl_egl_surface, tbm_surface); + if (!wl_egl_buffer) { + wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t)); + TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL); + + tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER, + (tbm_data_free)__cb_wl_egl_buffer_free); + tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER, + wl_egl_buffer); + + wl_egl_buffer->wl_buffer = NULL; + wl_egl_buffer->tbm_surface = tbm_surface; + wl_egl_buffer->wl_egl_surface = wl_egl_surface; + + wl_egl_buffer->dx = wl_egl_window->dx; + wl_egl_buffer->dy = wl_egl_window->dy; + wl_egl_buffer->width = tbm_surface_get_width(tbm_surface); + wl_egl_buffer->height = tbm_surface_get_height(tbm_surface); + + TPL_INFO("[WL_EGL_BUFFER_CREATE]", + "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, wl_egl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); } - return TPL_ERROR_NONE; + _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface); + + return wl_egl_buffer; } -static const struct wl_buffer_listener wl_buffer_release_listener = { - (void *)__cb_wl_buffer_release, -}; - -static void -__cb_presentation_feedback_sync_output(void *data, - struct wp_presentation_feedback *presentation_feedback, - struct wl_output *output) +static tbm_surface_h +__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, + int32_t *release_fence) { - TPL_IGNORE(data); - TPL_IGNORE(presentation_feedback); - TPL_IGNORE(output); - /* Nothing to do */ -} + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->display->backend.data); + TPL_OBJECT_CHECK_RETURN(surface, NULL); -static void -__cb_presentation_feedback_presented(void *data, - struct wp_presentation_feedback *presentation_feedback, - uint32_t tv_sec_hi, - uint32_t tv_sec_lo, - uint32_t tv_nsec, - uint32_t refresh_nsec, - uint32_t seq_hi, - uint32_t seq_lo, - uint32_t flags) -{ - TPL_IGNORE(tv_sec_hi); - TPL_IGNORE(tv_sec_lo); - TPL_IGNORE(tv_nsec); - TPL_IGNORE(refresh_nsec); - TPL_IGNORE(seq_hi); - TPL_IGNORE(seq_lo); - TPL_IGNORE(flags); + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + tpl_wl_egl_display_t *wl_egl_display = + (tpl_wl_egl_display_t *)surface->display->backend.data; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_bool_t is_activated = 0; + int bo_name = 0; + tbm_surface_h tbm_surface = NULL; - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + TPL_OBJECT_UNLOCK(surface); + tsq_err = tbm_surface_queue_can_dequeue_wait_timeout( + wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS); + TPL_OBJECT_LOCK(surface); - TPL_DEBUG("[FEEDBACK][PRESENTED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)", - wl_egl_surface, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); + /* After the can dequeue state, lock the wl_event_mutex to prevent other + * events from being processed in wayland_egl_thread + * during below dequeue procedure. */ + tpl_gmutex_lock(&wl_egl_display->wl_event_mutex); - if (wl_egl_buffer->presentation_sync_fd != -1) { - int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); - if (ret == -1) { - TPL_ERR("Failed to send presentation_sync signal to fd(%d)", - wl_egl_buffer->presentation_sync_fd); + if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) { + TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)", + wl_egl_surface->tbm_queue, surface); + if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) { + TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)", + wl_egl_surface->tbm_queue, surface); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + return NULL; + } else { + tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; } + } - TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd, - "[PRESENTATION_SYNC] bo(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - - close(wl_egl_buffer->presentation_sync_fd); - wl_egl_buffer->presentation_sync_fd = -1; + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)", + wl_egl_surface->tbm_queue, surface); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + return NULL; } - if (wl_egl_buffer->presentation_feedback) - wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback); + /* wayland client can check their states (ACTIVATED or DEACTIVATED) with + * below function [wayland_tbm_client_queue_check_activate()]. + * This function has to be called before tbm_surface_queue_dequeue() + * in order to know what state the buffer will be dequeued next. + * + * ACTIVATED state means non-composite mode. Client can get buffers which + can be displayed directly(without compositing). + * DEACTIVATED state means composite mode. Client's buffer will be displayed + by compositor(E20) with compositing. + */ + is_activated = wayland_tbm_client_queue_check_activate( + wl_egl_display->wl_tbm_client, + wl_egl_surface->tbm_queue); - wl_egl_buffer->presentation_feedback = NULL; + wl_egl_surface->is_activated = is_activated; - __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface, - TPL_FIRST, NULL); + surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue); + surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue); + wl_egl_surface->width = surface->width; + wl_egl_surface->height = surface->height; - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); -} + if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) { + /* If surface->frontbuffer is already set in frontbuffer mode, + * it will return that frontbuffer if it is still activated, + * otherwise dequeue the new buffer after initializing + * surface->frontbuffer to NULL. */ + if (is_activated && !wl_egl_surface->reset) { + bo_name = _get_tbm_surface_bo_name(surface->frontbuffer); -static void -__cb_presentation_feedback_discarded(void *data, - struct wp_presentation_feedback *presentation_feedback) -{ - tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface; + TPL_LOG_T("WL_EGL", + "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)", + surface->frontbuffer, bo_name); + TRACE_ASYNC_BEGIN((int)surface->frontbuffer, + "[DEQ]~[ENQ] BO_NAME:%d", + bo_name); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + return surface->frontbuffer; + } else { + surface->frontbuffer = NULL; + wl_egl_surface->need_to_enqueue = TPL_TRUE; + } + } else { + surface->frontbuffer = NULL; + } - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue, + &tbm_surface); + if (!tbm_surface) { + TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d", + wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err); + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); + return NULL; + } - TPL_DEBUG("[FEEDBACK][DISCARDED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)", - wl_egl_surface, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); + tbm_surface_internal_ref(tbm_surface); - if (wl_egl_buffer->presentation_sync_fd != -1) { - int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); - if (ret == -1) { - TPL_ERR("Failed to send presentation_sync signal to fd(%d)", - wl_egl_buffer->presentation_sync_fd); - } + bo_name = _get_tbm_surface_bo_name(tbm_surface); - TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd, - "[PRESENTATION_SYNC] bo(%d)", - _get_tbm_surface_bo_name(tbm_surface)); + wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer."); - close(wl_egl_buffer->presentation_sync_fd); - wl_egl_buffer->presentation_sync_fd = -1; + /* If wl_egl_buffer->release_fence_fd is -1, + * the tbm_surface can be used immediately. + * If not, user(EGL) have to wait until signaled. */ + if (release_fence && wl_egl_surface->surface_sync) { + *release_fence = wl_egl_buffer->release_fence_fd; + TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)", + wl_egl_surface, wl_egl_buffer, *release_fence); } - if (wl_egl_buffer->presentation_feedback) - wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback); + if (surface->is_frontbuffer_mode && is_activated) + surface->frontbuffer = tbm_surface; - wl_egl_buffer->presentation_feedback = NULL; + wl_egl_surface->reset = TPL_FALSE; - __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface, - TPL_FIRST, NULL); + TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name); + TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_egl_buffer, tbm_surface, bo_name, release_fence ? *release_fence : -1); - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); -} + tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); -static const struct wp_presentation_feedback_listener feedback_listener = { - __cb_presentation_feedback_sync_output, /* sync_output feedback -*/ - __cb_presentation_feedback_presented, - __cb_presentation_feedback_discarded -}; + return tbm_surface; +} -static void -_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, - tbm_surface_h tbm_surface) +static tpl_result_t +__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface) { - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; - struct wl_surface *wl_surface = wl_egl_surface->wl_surface; - struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; - uint32_t version; + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); - wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); - TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, - "wl_egl_buffer sould be not NULL"); + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *)surface->backend.data; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - if (wl_egl_buffer->wl_buffer == NULL) { - wl_egl_buffer->wl_buffer = - (struct wl_proxy *)wayland_tbm_client_create_buffer( - wl_egl_display->wl_tbm_client, tbm_surface); + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_PARAMETER; } - TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL, - "[FATAL] Failed to create wl_buffer"); - wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer, - &wl_buffer_release_listener, wl_egl_buffer); + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + /* Stop tracking of this canceled tbm_surface */ + __tpl_list_remove_data(wl_egl_surface->in_use_buffers, + (void *)tbm_surface, TPL_FIRST, NULL); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - version = wl_proxy_get_version((struct wl_proxy *)wl_surface); + tbm_surface_internal_unref(tbm_surface); - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) { - wl_egl_buffer->presentation_feedback = - wp_presentation_feedback(wl_egl_display->presentation, - wl_surface); - wp_presentation_feedback_add_listener(wl_egl_buffer->presentation_feedback, - &feedback_listener, wl_egl_buffer); - __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, tbm_surface); - TRACE_ASYNC_BEGIN(wl_egl_buffer->presentation_sync_fd, - "[PRESENTATION_SYNC] bo(%d)", - _get_tbm_surface_bo_name(tbm_surface)); + tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to release tbm_surface(%p) surface(%p)", + tbm_surface, surface); + return TPL_ERROR_INVALID_OPERATION; } - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - if (wl_egl_buffer->w_rotated == TPL_TRUE) { - wayland_tbm_client_set_buffer_transform( - wl_egl_display->wl_tbm_client, - (void *)wl_egl_buffer->wl_buffer, - wl_egl_buffer->w_transform); - wl_egl_buffer->w_rotated = TPL_FALSE; - } + TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] wl_egl_surface(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); - if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) { - wl_egl_surface->latest_transform = wl_egl_buffer->transform; - wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform); - } + return TPL_ERROR_NONE; +} - if (wl_egl_window) { - wl_egl_window->attached_width = wl_egl_buffer->width; - wl_egl_window->attached_height = wl_egl_buffer->height; - } +static tpl_result_t +__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, int32_t acquire_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(tbm_surface); + TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER); - wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer, - wl_egl_buffer->dx, wl_egl_buffer->dy); + tpl_wl_egl_surface_t *wl_egl_surface = + (tpl_wl_egl_surface_t *) surface->backend.data; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + int bo_name = -1; - if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) { - if (version < 4) { - wl_surface_damage(wl_surface, - wl_egl_buffer->dx, wl_egl_buffer->dy, - wl_egl_buffer->width, wl_egl_buffer->height); - } else { - wl_surface_damage_buffer(wl_surface, - 0, 0, - wl_egl_buffer->width, wl_egl_buffer->height); - } - } else { - int i; - for (i = 0; i < wl_egl_buffer->num_rects; i++) { - int inverted_y = - wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] + - wl_egl_buffer->rects[i * 4 + 3]); - if (version < 4) { - wl_surface_damage(wl_surface, - wl_egl_buffer->rects[i * 4 + 0], - inverted_y, - wl_egl_buffer->rects[i * 4 + 2], - wl_egl_buffer->rects[i * 4 + 3]); - } else { - wl_surface_damage_buffer(wl_surface, - wl_egl_buffer->rects[i * 4 + 0], - inverted_y, - wl_egl_buffer->rects[i * 4 + 2], - wl_egl_buffer->rects[i * 4 + 3]); - } - } + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", + tbm_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + return TPL_ERROR_INVALID_PARAMETER; } - wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client, - (void *)wl_egl_buffer->wl_buffer, - wl_egl_buffer->serial); + bo_name = _get_tbm_surface_bo_name(tbm_surface); - wl_egl_buffer->need_to_release = TPL_TRUE; + TRACE_MARK("[ENQ] BO_NAME:%d", bo_name); - if (wl_egl_display->use_explicit_sync && - wl_egl_surface->surface_sync) { + TPL_LOG_T("WL_EGL", + "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_egl_surface, tbm_surface, bo_name, acquire_fence); - zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync, - wl_egl_buffer->acquire_fence_fd); - TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)", - wl_egl_surface, tbm_surface, wl_egl_buffer->acquire_fence_fd); - close(wl_egl_buffer->acquire_fence_fd); - wl_egl_buffer->acquire_fence_fd = -1; + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); - wl_egl_buffer->buffer_release = - zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync); - if (!wl_egl_buffer->buffer_release) { - TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface); - } else { - zwp_linux_buffer_release_v1_add_listener( - wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer); - TPL_DEBUG("add explicit_sync_release_listener."); + /* If there are received region information, save it to wl_egl_buffer */ + if (num_rects && rects) { + if (wl_egl_buffer->rects != NULL) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; } - } - - wl_surface_commit(wl_surface); - - wl_display_flush(wl_egl_display->wl_display); - - TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - wl_egl_buffer->need_to_commit = TPL_FALSE; + wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects)); + wl_egl_buffer->num_rects = num_rects; - TPL_LOG_T("WL_EGL", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", - wl_egl_buffer->wl_buffer, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); + if (!wl_egl_buffer->rects) { + TPL_ERR("Failed to allocate memory fo damage rects info."); + return TPL_ERROR_OUT_OF_MEMORY; + } - if (wl_egl_display->tdm_initialized && - _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE) - TPL_ERR("Failed to set wait vblank."); + memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects); + } - if (wl_egl_surface->committed_buffers) { - __tpl_list_push_back(wl_egl_surface->committed_buffers, tbm_surface); + if (!wl_egl_surface->need_to_enqueue || + !wl_egl_buffer->need_to_commit) { + TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", + ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + return TPL_ERROR_NONE; } - tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); + /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and + * commit if surface->frontbuffer that is already set and the tbm_surface + * client want to enqueue are the same. + */ + if (surface->is_frontbuffer_mode) { + /* The first buffer to be activated in frontbuffer mode must be + * committed. Subsequence frames do not need to be committed because + * the buffer is already displayed. + */ + if (surface->frontbuffer == tbm_surface) + wl_egl_surface->need_to_enqueue = TPL_FALSE; - if (wl_egl_buffer->commit_sync_fd != -1) { - int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); - if (ret == -1) { - TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd); + if (acquire_fence != -1) { + close(acquire_fence); + acquire_fence = -1; } + } - TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)", - wl_egl_surface, wl_egl_buffer->commit_sync_fd); + if (wl_egl_buffer->acquire_fence_fd != -1) + close(wl_egl_buffer->acquire_fence_fd); + + wl_egl_buffer->acquire_fence_fd = acquire_fence; - close(wl_egl_buffer->commit_sync_fd); - wl_egl_buffer->commit_sync_fd = -1; + tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + tbm_surface_internal_unref(tbm_surface); + TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d", + tbm_surface, wl_egl_surface, tsq_err); + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + return TPL_ERROR_INVALID_OPERATION; } - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + tbm_surface_internal_unref(tbm_surface); + + TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + + return TPL_ERROR_NONE; } static tpl_bool_t @@ -2355,7 +2348,7 @@ __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message) wl_egl_surface->render_done_cnt++; - TRACE_ASYNC_END(wl_egl_buffer, "FENCE WAIT fd(%d)", + TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)", wl_egl_buffer->acquire_fence_fd); tpl_gmutex_lock(&wl_egl_surface->surf_mutex); @@ -2370,12 +2363,6 @@ __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message) tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - /* This source is used only once and does not allow reuse. - * So finalize will be executed immediately. */ - g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag); - g_source_destroy(&wait_source->gsource); - g_source_unref(&wait_source->gsource); - return TPL_FALSE; } @@ -2402,637 +2389,637 @@ static tpl_gsource_functions buffer_funcs = { }; static tpl_result_t -_thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface) +_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) { - tdm_error tdm_err = TDM_ERROR_NONE; - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tbm_surface_h tbm_surface = NULL; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + tpl_bool_t ready_to_commit = TPL_FALSE; - if (wl_egl_surface->vblank == NULL) { - wl_egl_surface->vblank = - _thread_create_tdm_client_vblank(wl_egl_display->tdm_client); - if (!wl_egl_surface->vblank) { - TPL_WARN("Failed to create vblank. wl_egl_surface(%p)", - wl_egl_surface); - return TPL_ERROR_OUT_OF_MEMORY; + while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) { + tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue, + &tbm_surface); + if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to acquire from tbm_queue(%p)", + wl_egl_surface->tbm_queue); + return TPL_ERROR_INVALID_OPERATION; } - } - - tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank, - wl_egl_surface->post_interval, - __cb_tdm_client_vblank, - (void *)wl_egl_surface); - - if (tdm_err == TDM_ERROR_NONE) { - wl_egl_surface->vblank_done = TPL_FALSE; - TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK"); - } else { - TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); - return TPL_ERROR_INVALID_OPERATION; - } - - return TPL_ERROR_NONE; -} - -static int -_write_to_eventfd(int eventfd) -{ - uint64_t value = 1; - int ret; - - if (eventfd == -1) { - TPL_ERR("Invalid fd(-1)"); - return -1; - } - - ret = write(eventfd, &value, sizeof(uint64_t)); - if (ret == -1) { - TPL_ERR("failed to write to fd(%d)", eventfd); - return ret; - } - - return ret; -} - -void -__tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend) -{ - TPL_ASSERT(backend); - - backend->type = TPL_BACKEND_WAYLAND_THREAD; - backend->data = NULL; - - backend->init = __tpl_wl_egl_display_init; - backend->fini = __tpl_wl_egl_display_fini; - backend->query_config = __tpl_wl_egl_display_query_config; - backend->filter_config = __tpl_wl_egl_display_filter_config; - backend->get_window_info = __tpl_wl_egl_display_get_window_info; - backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info; - backend->get_buffer_from_native_pixmap = - __tpl_wl_egl_display_get_buffer_from_native_pixmap; -} - -void -__tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend) -{ - TPL_ASSERT(backend); - - backend->type = TPL_BACKEND_WAYLAND_THREAD; - backend->data = NULL; - - backend->init = __tpl_wl_egl_surface_init; - backend->fini = __tpl_wl_egl_surface_fini; - backend->validate = __tpl_wl_egl_surface_validate; - backend->cancel_dequeued_buffer = - __tpl_wl_egl_surface_cancel_dequeued_buffer; - backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer; - backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer; - backend->set_rotation_capability = - __tpl_wl_egl_surface_set_rotation_capability; - backend->set_post_interval = - __tpl_wl_egl_surface_set_post_interval; - backend->get_size = - __tpl_wl_egl_surface_get_size; -} - -/* -- BEGIN -- wl_egl_window callback functions */ -static void -__cb_destroy_callback(void *private) -{ - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - - if (!tizen_private) { - TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface"); - return; - } - - wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - if (wl_egl_surface) { - TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.", - wl_egl_surface->wl_egl_window); - TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface."); - - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - wl_egl_surface->wl_egl_window->destroy_window_callback = NULL; - wl_egl_surface->wl_egl_window->resize_callback = NULL; - wl_egl_surface->wl_egl_window->driver_private = NULL; - wl_egl_surface->wl_egl_window = NULL; - wl_egl_surface->surf = NULL; - wl_egl_surface->is_destroying = TPL_TRUE; - - tizen_private->set_window_serial_callback = NULL; - tizen_private->rotate_callback = NULL; - tizen_private->get_rotation_capability = NULL; - tizen_private->set_frontbuffer_callback = NULL; - tizen_private->create_commit_sync_fd = NULL; - tizen_private->create_presentation_sync_fd = NULL; - tizen_private->data = NULL; - free(tizen_private); - tizen_private = NULL; - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - } -} + tbm_surface_internal_ref(tbm_surface); -static void -__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private) -{ - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, + "wl_egl_buffer sould be not NULL"); - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - int cur_w, cur_h, req_w, req_h, format; + if (wl_egl_buffer->wl_buffer == NULL) { + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + wl_egl_buffer->wl_buffer = + (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_egl_display->wl_tbm_client, tbm_surface); - if (!wl_egl_surface) { - TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", - wl_egl_window); - return; - } + if (!wl_egl_buffer->wl_buffer) { + TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)", + wl_egl_display->wl_tbm_client, tbm_surface); + } + } - format = wl_egl_surface->format; - cur_w = wl_egl_surface->width; - cur_h = wl_egl_surface->height; - req_w = wl_egl_window->width; - req_h = wl_egl_window->height; + if (wl_egl_buffer->acquire_fence_fd != -1) { + if (wl_egl_surface->surface_sync) + ready_to_commit = TPL_TRUE; + else { + if (wl_egl_buffer->waiting_source) { + tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); + wl_egl_buffer->waiting_source = NULL; + } - TPL_INFO("[WINDOW_RESIZE]", - "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)", - wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h); + wl_egl_buffer->waiting_source = + tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer, + wl_egl_buffer->acquire_fence_fd, &buffer_funcs, + SOURCE_TYPE_DISPOSABLE); - if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format) - != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue); - return; - } -} -/* -- END -- wl_egl_window callback functions */ + __tpl_list_push_back(wl_egl_surface->fence_waiting_buffers, tbm_surface); -/* -- BEGIN -- wl_egl_window tizen private callback functions */ + TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)", + wl_egl_buffer->acquire_fence_fd); -/* There is no usecase for using prerotation callback below */ -static void -__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private) -{ - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); + ready_to_commit = TPL_FALSE; + } + } - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; - int rotation = tizen_private->rotation; + if (ready_to_commit) { + if (wl_egl_surface->vblank_done) + ready_to_commit = TPL_TRUE; + else { + __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, tbm_surface); + ready_to_commit = TPL_FALSE; + } + } - if (!wl_egl_surface) { - TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", - wl_egl_window); - return; + if (ready_to_commit) + _thread_wl_surface_commit(wl_egl_surface, tbm_surface); } - TPL_INFO("[WINDOW_ROTATE]", - "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)", - wl_egl_surface, wl_egl_window, - wl_egl_surface->rotation, rotation); - - wl_egl_surface->rotation = rotation; + return TPL_ERROR_NONE; } -/* There is no usecase for using prerotation callback below */ -static int -__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window, - void *private) +/* -- BEGIN -- tdm_client vblank callback function */ +static void +__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, + unsigned int sequence, unsigned int tv_sec, + unsigned int tv_usec, void *user_data) { - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); - - int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE; - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data; + tbm_surface_h tbm_surface = NULL; - if (!wl_egl_surface) { - TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", - wl_egl_window); - return rotation_capability; - } + TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK"); - if (wl_egl_surface->rotation_capability == TPL_TRUE) - rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED; - else - rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED; + if (error == TDM_ERROR_TIMEOUT) + TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)", + wl_egl_surface); + wl_egl_surface->vblank_done = TPL_TRUE; - return rotation_capability; + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + tbm_surface = (tbm_surface_h)__tpl_list_pop_front( + wl_egl_surface->vblank_waiting_buffers, + NULL); + _thread_wl_surface_commit(wl_egl_surface, tbm_surface); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); } +/* -- END -- tdm_client vblank callback function */ static void -__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window, - void *private, unsigned int serial) +__cb_buffer_fenced_release(void *data, + struct zwp_linux_buffer_release_v1 *release, int32_t fence) { - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + if (wl_egl_buffer) + tbm_surface = wl_egl_buffer->tbm_surface; - if (!wl_egl_surface) { - TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.", - wl_egl_window); - return; - } + if (tbm_surface_internal_is_valid(tbm_surface)) { + if (wl_egl_buffer->need_to_release) { + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_queue_error_e tsq_err; - wl_egl_surface->set_serial_is_used = TPL_TRUE; - wl_egl_surface->serial = serial; -} + if (wl_egl_surface->committed_buffers) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + __tpl_list_remove_data(wl_egl_surface->committed_buffers, + (void *)tbm_surface, + TPL_FIRST, NULL); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } -static int -__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private) -{ - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); + wl_egl_buffer->need_to_release = TPL_FALSE; - int commit_sync_fd = -1; + zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); + wl_egl_buffer->buffer_release = NULL; - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + wl_egl_buffer->release_fence_fd = fence; - if (!wl_egl_surface) { - TPL_ERR("Invalid parameter. wl_egl_surface is NULL", wl_egl_surface); - return -1; - } + TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", + _get_tbm_surface_bo_name(tbm_surface), + fence); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); - tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); + TPL_LOG_T("WL_EGL", + "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface), + fence); - if (wl_egl_surface->commit_sync.fd != -1) { - commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); - TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)", - wl_egl_surface->commit_sync.fd, commit_sync_fd); - TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)", - wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd); - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); - return commit_sync_fd; - } + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); - wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC); - if (wl_egl_surface->commit_sync.fd == -1) { - TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)", wl_egl_surface); - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); - return -1; + tbm_surface_internal_unref(tbm_surface); + } + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); } +} - commit_sync_fd = dup(wl_egl_surface->commit_sync.fd); +static void +__cb_buffer_immediate_release(void *data, + struct zwp_linux_buffer_release_v1 *release) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; - TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)", - wl_egl_surface->commit_sync.fd, commit_sync_fd); - TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)", - wl_egl_surface, commit_sync_fd); + if (wl_egl_buffer) + tbm_surface = wl_egl_buffer->tbm_surface; - tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); + if (tbm_surface_internal_is_valid(tbm_surface)) { + if (wl_egl_buffer->need_to_release) { + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_queue_error_e tsq_err; - return commit_sync_fd; -} + if (wl_egl_surface->committed_buffers) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + __tpl_list_remove_data(wl_egl_surface->committed_buffers, + (void *)tbm_surface, + TPL_FIRST, NULL); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } -static int -__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private) -{ - TPL_ASSERT(private); - TPL_ASSERT(wl_egl_window); + wl_egl_buffer->need_to_release = TPL_FALSE; - int presentation_sync_fd = -1; + zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); + wl_egl_buffer->buffer_release = NULL; - struct tizen_private *tizen_private = (struct tizen_private *)private; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data; + wl_egl_buffer->release_fence_fd = -1; - if (!wl_egl_surface) { - TPL_ERR("Invalid parameter. wl_egl_surface is NULL", wl_egl_surface); - return -1; - } + TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); - tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - if (wl_egl_surface->presentation_sync.fd != -1) { - presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); - TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)", - wl_egl_surface->presentation_sync.fd, presentation_sync_fd); - TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", - wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - return presentation_sync_fd; - } + TPL_LOG_T("WL_EGL", + "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); - wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC); - if (wl_egl_surface->presentation_sync.fd == -1) { - TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)", wl_egl_surface); - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - return -1; + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); + } + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); } +} - presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd); - TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)", - wl_egl_surface->presentation_sync.fd, presentation_sync_fd); - TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)", - wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd); +static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = { + __cb_buffer_fenced_release, + __cb_buffer_immediate_release, +}; - tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); +static void +__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) +{ + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tbm_surface_h tbm_surface = NULL; - return presentation_sync_fd; -} -/* -- END -- wl_egl_window tizen private callback functions */ + if (wl_egl_buffer) + tbm_surface = wl_egl_buffer->tbm_surface; -/* -- BEGIN -- tizen_surface_shm_flusher_listener */ -static void __cb_tss_flusher_flush_callback(void *data, - struct tizen_surface_shm_flusher *tss_flusher) -{ - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + if (tbm_surface_internal_is_valid(tbm_surface)) { + if (wl_egl_buffer->need_to_release) { + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_queue_error_e tsq_err; - TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->tbm_queue); + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); - tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue); - return; - } -} + if (wl_egl_surface->committed_buffers) { + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + __tpl_list_remove_data(wl_egl_surface->committed_buffers, + (void *)tbm_surface, + TPL_FIRST, NULL); + tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + } + + wl_egl_buffer->need_to_release = TPL_FALSE; -static void __cb_tss_flusher_free_flush_callback(void *data, - struct tizen_surface_shm_flusher *tss_flusher) -{ - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); - TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->tbm_queue); + TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); - tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue); - return; + tbm_surface_internal_unref(tbm_surface); + } + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); } } -static const struct tizen_surface_shm_flusher_listener -tss_flusher_listener = { - __cb_tss_flusher_flush_callback, - __cb_tss_flusher_free_flush_callback +static const struct wl_buffer_listener wl_buffer_release_listener = { + (void *)__cb_wl_buffer_release, }; -/* -- END -- tizen_surface_shm_flusher_listener */ - -/* -- BEGIN -- tbm_surface_queue callback funstions */ static void -__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, - void *data) +__cb_presentation_feedback_sync_output(void *data, + struct wp_presentation_feedback *presentation_feedback, + struct wl_output *output) { - tpl_wl_egl_surface_t *wl_egl_surface = NULL; - tpl_wl_egl_display_t *wl_egl_display = NULL; - tpl_surface_t *surface = NULL; - tpl_bool_t is_activated = TPL_FALSE; - int width, height; + TPL_IGNORE(data); + TPL_IGNORE(presentation_feedback); + TPL_IGNORE(output); + /* Nothing to do */ +} - wl_egl_surface = (tpl_wl_egl_surface_t *)data; - TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); +static void +__cb_presentation_feedback_presented(void *data, + struct wp_presentation_feedback *presentation_feedback, + uint32_t tv_sec_hi, + uint32_t tv_sec_lo, + uint32_t tv_nsec, + uint32_t refresh_nsec, + uint32_t seq_hi, + uint32_t seq_lo, + uint32_t flags) +{ + TPL_IGNORE(tv_sec_hi); + TPL_IGNORE(tv_sec_lo); + TPL_IGNORE(tv_nsec); + TPL_IGNORE(refresh_nsec); + TPL_IGNORE(seq_hi); + TPL_IGNORE(seq_lo); + TPL_IGNORE(flags); - wl_egl_display = wl_egl_surface->wl_egl_display; - TPL_CHECK_ON_NULL_RETURN(wl_egl_display); + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface; - surface = wl_egl_surface->tpl_surface; - TPL_CHECK_ON_NULL_RETURN(surface); + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - /* When the queue is resized, change the reset flag to TPL_TRUE to reflect - * the changed window size at the next frame. */ - width = tbm_surface_queue_get_width(tbm_queue); - height = tbm_surface_queue_get_height(tbm_queue); - if (surface->width != width || surface->height != height) { - TPL_INFO("[QUEUE_RESIZE]", - "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)", - wl_egl_surface, tbm_queue, - surface->width, surface->height, width, height); - } + TPL_DEBUG("[FEEDBACK][PRESENTED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); - /* When queue_reset_callback is called, if is_activated is different from - * its previous state change the reset flag to TPL_TRUE to get a new buffer - * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ - is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client, - wl_egl_surface->tbm_queue); - if (wl_egl_surface->is_activated != is_activated) { - if (is_activated) { - TPL_INFO("[ACTIVATED]", - "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); - } else { - TPL_LOG_T("[DEACTIVATED]", - " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)", - wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue); + if (wl_egl_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (ret == -1) { + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + wl_egl_buffer->presentation_sync_fd); } + + TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd, + "[PRESENTATION_SYNC] bo(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; } - wl_egl_surface->reset = TPL_TRUE; + if (wl_egl_buffer->presentation_feedback) + wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback); - if (surface->reset_cb) - surface->reset_cb(surface->reset_data); + wl_egl_buffer->presentation_feedback = NULL; + + __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface, + TPL_FIRST, NULL); + + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); } static void -__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue, - void *data) +__cb_presentation_feedback_discarded(void *data, + struct wp_presentation_feedback *presentation_feedback) { - TPL_IGNORE(tbm_queue); + tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface; - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data; - TPL_CHECK_ON_NULL_RETURN(wl_egl_surface); + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); + TPL_DEBUG("[FEEDBACK][DISCARDED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)", + wl_egl_surface, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); - tpl_gsource_send_message(wl_egl_surface->surf_source, 2); + if (wl_egl_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (ret == -1) { + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + wl_egl_buffer->presentation_sync_fd); + } - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd, + "[PRESENTATION_SYNC] bo(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; + } + + if (wl_egl_buffer->presentation_feedback) + wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback); + + wl_egl_buffer->presentation_feedback = NULL; + + __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface, + TPL_FIRST, NULL); + + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); } -/* -- END -- tbm_surface_queue callback funstions */ +static const struct wp_presentation_feedback_listener feedback_listener = { + __cb_presentation_feedback_sync_output, /* sync_output feedback -*/ + __cb_presentation_feedback_presented, + __cb_presentation_feedback_discarded +}; -/* tdm_client vblank callback function */ -static void -__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error, - unsigned int sequence, unsigned int tv_sec, - unsigned int tv_usec, void *user_data) +static tpl_result_t +_thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface) { - tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data; - tbm_surface_h tbm_surface = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; - TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK"); + if (wl_egl_surface->vblank == NULL) { + wl_egl_surface->vblank = + _thread_create_tdm_client_vblank(wl_egl_display->tdm_client); + if (!wl_egl_surface->vblank) { + TPL_WARN("Failed to create vblank. wl_egl_surface(%p)", + wl_egl_surface); + return TPL_ERROR_OUT_OF_MEMORY; + } + } - if (error == TDM_ERROR_TIMEOUT) - TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)", - wl_egl_surface); + tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank, + wl_egl_surface->post_interval, + __cb_tdm_client_vblank, + (void *)wl_egl_surface); - wl_egl_surface->vblank_done = TPL_TRUE; + if (tdm_err == TDM_ERROR_NONE) { + wl_egl_surface->vblank_done = TPL_FALSE; + TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK"); + } else { + TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err); + return TPL_ERROR_INVALID_OPERATION; + } - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - tbm_surface = (tbm_surface_h)__tpl_list_pop_front( - wl_egl_surface->vblank_waiting_buffers, - NULL); - _thread_wl_surface_commit(wl_egl_surface, tbm_surface); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + return TPL_ERROR_NONE; } static void -__cb_buffer_fenced_release(void *data, - struct zwp_linux_buffer_release_v1 *release, int32_t fence) +_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, + tbm_surface_h tbm_surface) { - tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; - tbm_surface_h tbm_surface = NULL; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + struct wl_surface *wl_surface = wl_egl_surface->wl_surface; + struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window; + uint32_t version; - if (wl_egl_buffer) - tbm_surface = wl_egl_buffer->tbm_surface; + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, + "wl_egl_buffer sould be not NULL"); + + if (wl_egl_buffer->wl_buffer == NULL) { + wl_egl_buffer->wl_buffer = + (struct wl_proxy *)wayland_tbm_client_create_buffer( + wl_egl_display->wl_tbm_client, tbm_surface); + } + TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL, + "[FATAL] Failed to create wl_buffer"); + + wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer, + &wl_buffer_release_listener, wl_egl_buffer); + + version = wl_proxy_get_version((struct wl_proxy *)wl_surface); + + tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex); + if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) { + wl_egl_buffer->presentation_feedback = + wp_presentation_feedback(wl_egl_display->presentation, + wl_surface); + wp_presentation_feedback_add_listener(wl_egl_buffer->presentation_feedback, + &feedback_listener, wl_egl_buffer); + __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, tbm_surface); + TRACE_ASYNC_BEGIN(wl_egl_buffer->presentation_sync_fd, + "[PRESENTATION_SYNC] bo(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + } + tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); + + if (wl_egl_buffer->w_rotated == TPL_TRUE) { + wayland_tbm_client_set_buffer_transform( + wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer, + wl_egl_buffer->w_transform); + wl_egl_buffer->w_rotated = TPL_FALSE; + } - if (tbm_surface_internal_is_valid(tbm_surface)) { - if (wl_egl_buffer->need_to_release) { - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tbm_surface_queue_error_e tsq_err; + if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) { + wl_egl_surface->latest_transform = wl_egl_buffer->transform; + wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform); + } - if (wl_egl_surface->committed_buffers) { - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - __tpl_list_remove_data(wl_egl_surface->committed_buffers, - (void *)tbm_surface, - TPL_FIRST, NULL); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - } + if (wl_egl_window) { + wl_egl_window->attached_width = wl_egl_buffer->width; + wl_egl_window->attached_height = wl_egl_buffer->height; + } - wl_egl_buffer->need_to_release = TPL_FALSE; + wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer, + wl_egl_buffer->dx, wl_egl_buffer->dy); - zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); - wl_egl_buffer->buffer_release = NULL; + if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) { + if (version < 4) { + wl_surface_damage(wl_surface, + wl_egl_buffer->dx, wl_egl_buffer->dy, + wl_egl_buffer->width, wl_egl_buffer->height); + } else { + wl_surface_damage_buffer(wl_surface, + 0, 0, + wl_egl_buffer->width, wl_egl_buffer->height); + } + } else { + int i; + for (i = 0; i < wl_egl_buffer->num_rects; i++) { + int inverted_y = + wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] + + wl_egl_buffer->rects[i * 4 + 3]); + if (version < 4) { + wl_surface_damage(wl_surface, + wl_egl_buffer->rects[i * 4 + 0], + inverted_y, + wl_egl_buffer->rects[i * 4 + 2], + wl_egl_buffer->rects[i * 4 + 3]); + } else { + wl_surface_damage_buffer(wl_surface, + wl_egl_buffer->rects[i * 4 + 0], + inverted_y, + wl_egl_buffer->rects[i * 4 + 2], + wl_egl_buffer->rects[i * 4 + 3]); + } + } + } - wl_egl_buffer->release_fence_fd = fence; + wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer, + wl_egl_buffer->serial); - TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", - _get_tbm_surface_bo_name(tbm_surface), - fence); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); + wl_egl_buffer->need_to_release = TPL_TRUE; - TPL_LOG_T("WL_EGL", - "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", - wl_egl_buffer->wl_buffer, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface), - fence); + if (wl_egl_display->use_explicit_sync && + wl_egl_surface->surface_sync) { - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync, + wl_egl_buffer->acquire_fence_fd); + TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)", + wl_egl_surface, tbm_surface, wl_egl_buffer->acquire_fence_fd); + close(wl_egl_buffer->acquire_fence_fd); + wl_egl_buffer->acquire_fence_fd = -1; - tbm_surface_internal_unref(tbm_surface); + wl_egl_buffer->buffer_release = + zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync); + if (!wl_egl_buffer->buffer_release) { + TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface); + } else { + zwp_linux_buffer_release_v1_add_listener( + wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer); + TPL_DEBUG("add explicit_sync_release_listener."); } - } else { - TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); } -} -static void -__cb_buffer_immediate_release(void *data, - struct zwp_linux_buffer_release_v1 *release) -{ - tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; - tbm_surface_h tbm_surface = NULL; + wl_surface_commit(wl_surface); - if (wl_egl_buffer) - tbm_surface = wl_egl_buffer->tbm_surface; + wl_display_flush(wl_egl_display->wl_display); - if (tbm_surface_internal_is_valid(tbm_surface)) { - if (wl_egl_buffer->need_to_release) { - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tbm_surface_queue_error_e tsq_err; + TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + _get_tbm_surface_bo_name(tbm_surface)); - if (wl_egl_surface->committed_buffers) { - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - __tpl_list_remove_data(wl_egl_surface->committed_buffers, - (void *)tbm_surface, - TPL_FIRST, NULL); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - } + wl_egl_buffer->need_to_commit = TPL_FALSE; - wl_egl_buffer->need_to_release = TPL_FALSE; + TPL_LOG_T("WL_EGL", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", + wl_egl_buffer->wl_buffer, tbm_surface, + _get_tbm_surface_bo_name(tbm_surface)); - zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); - wl_egl_buffer->buffer_release = NULL; + if (wl_egl_display->tdm_initialized && + _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE) + TPL_ERR("Failed to set wait vblank."); - wl_egl_buffer->release_fence_fd = -1; + if (wl_egl_surface->committed_buffers) { + __tpl_list_push_back(wl_egl_surface->committed_buffers, tbm_surface); + } - TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); + tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); - TPL_LOG_T("WL_EGL", - "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)", - wl_egl_buffer->wl_buffer, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); + if (wl_egl_buffer->commit_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); + if (ret == -1) { + TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd); + } - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)", + _get_tbm_surface_bo_name(tbm_surface)); + TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)", + wl_egl_surface, wl_egl_buffer->commit_sync_fd); - tbm_surface_internal_unref(tbm_surface); - } - } else { - TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + close(wl_egl_buffer->commit_sync_fd); + wl_egl_buffer->commit_sync_fd = -1; } -} -static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = { - __cb_buffer_fenced_release, - __cb_buffer_immediate_release, -}; + tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); +} -static void -__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) +static int +_write_to_eventfd(int eventfd) { - tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; - tbm_surface_h tbm_surface = NULL; + uint64_t value = 1; + int ret; - if (wl_egl_buffer) - tbm_surface = wl_egl_buffer->tbm_surface; + if (eventfd == -1) { + TPL_ERR("Invalid fd(-1)"); + return -1; + } - if (tbm_surface_internal_is_valid(tbm_surface)) { - if (wl_egl_buffer->need_to_release) { - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tbm_surface_queue_error_e tsq_err; + ret = write(eventfd, &value, sizeof(uint64_t)); + if (ret == -1) { + TPL_ERR("failed to write to fd(%d)", eventfd); + return ret; + } - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, - tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + return ret; +} - if (wl_egl_surface->committed_buffers) { - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - __tpl_list_remove_data(wl_egl_surface->committed_buffers, - (void *)tbm_surface, - TPL_FIRST, NULL); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - } +void +__tpl_display_init_backend_wl_egl_thread2(tpl_display_backend_t *backend) +{ + TPL_ASSERT(backend); - wl_egl_buffer->need_to_release = TPL_FALSE; + backend->type = TPL_BACKEND_WAYLAND_THREAD; + backend->data = NULL; - TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); + backend->init = __tpl_wl_egl_display_init; + backend->fini = __tpl_wl_egl_display_fini; + backend->query_config = __tpl_wl_egl_display_query_config; + backend->filter_config = __tpl_wl_egl_display_filter_config; + backend->get_window_info = __tpl_wl_egl_display_get_window_info; + backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info; + backend->get_buffer_from_native_pixmap = + __tpl_wl_egl_display_get_buffer_from_native_pixmap; +} - TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", - wl_egl_buffer->wl_buffer, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); +void +__tpl_surface_init_backend_wl_egl_thread2(tpl_surface_backend_t *backend) +{ + TPL_ASSERT(backend); - tbm_surface_internal_unref(tbm_surface); - } - } else { - TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); - } + backend->type = TPL_BACKEND_WAYLAND_THREAD; + backend->data = NULL; + + backend->init = __tpl_wl_egl_surface_init; + backend->fini = __tpl_wl_egl_surface_fini; + backend->validate = __tpl_wl_egl_surface_validate; + backend->cancel_dequeued_buffer = + __tpl_wl_egl_surface_cancel_dequeued_buffer; + backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer; + backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer; + backend->set_rotation_capability = + __tpl_wl_egl_surface_set_rotation_capability; + backend->set_post_interval = + __tpl_wl_egl_surface_set_post_interval; + backend->get_size = + __tpl_wl_egl_surface_get_size; } -void +static void __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) { tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->disp_source; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); -- 2.7.4 From 874c7f35418c05993376894eeb939870cbc338ae Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Mon, 1 Feb 2021 12:25:02 +0900 Subject: [PATCH 15/16] Change to use buffers array instead of tpl_list. - There is no need to maintain the tpl_list which was divided according to usage and divided into several. - One wl_egl_surface->buffers is created, but each buffer has a buffer_status. Change-Id: I4d148fcbb2e13fab9d0904b2ab1b61604ee5895a Signed-off-by: Joonbum Ko --- src/tpl_wl_egl.c | 389 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 204 insertions(+), 185 deletions(-) diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index 21f8073..320827d 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -32,6 +32,7 @@ static int wl_egl_buffer_key; /* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */ #define CLIENT_QUEUE_SIZE 3 +#define BUFFER_ARRAY_SIZE (CLIENT_QUEUE_SIZE * 2) typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t; typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t; @@ -88,10 +89,11 @@ struct _tpl_wl_egl_surface { tpl_wl_egl_display_t *wl_egl_display; tpl_surface_t *tpl_surface; - /* the lists for buffer tracing */ - tpl_list_t *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */ - tpl_list_t *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */ - tpl_list_t *fence_waiting_buffers; /* Trace buffers from ENQUEUE to fence signaled */ + /* wl_egl_buffer array for buffer tracing */ + tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE]; + int buffer_cnt; /* the number of using wl_egl_buffers */ + tpl_gmutex buffers_mutex; + tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */ tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */ @@ -118,6 +120,16 @@ struct _tpl_wl_egl_surface { tpl_bool_t set_serial_is_used; }; +typedef enum buffer_status { + RELEASED = 0, + DEQUEUED, + ENQUEUED, + ACQUIRED, + WAITING_SIGNALED, + WAITING_VBLANK, + COMMITTED, +} buffer_status_t; + struct _tpl_wl_egl_buffer { tbm_surface_h tbm_surface; @@ -125,6 +137,9 @@ struct _tpl_wl_egl_buffer { int dx, dy; /* position to attach to wl_surface */ int width, height; /* size to attach to wl_surface */ + buffer_status_t status; /* for tracing buffer status */ + int idx; /* position index in buffers array of wl_egl_surface */ + /* for damage region */ int num_rects; int *rects; @@ -142,9 +157,6 @@ struct _tpl_wl_egl_buffer { /* for checking need_to_commit (frontbuffer mode) */ tpl_bool_t need_to_commit; - /* for checking need to release */ - tpl_bool_t need_to_release; - /* for checking draw done */ tpl_bool_t draw_done; @@ -175,12 +187,12 @@ struct _tpl_wl_egl_buffer { tpl_gsource *waiting_source; + tpl_gmutex mutex; + tpl_gcond cond; + tpl_wl_egl_surface_t *wl_egl_surface; }; - -static void -__cb_buffer_remove_from_list(void *data); static int _get_tbm_surface_bo_name(tbm_surface_h tbm_surface); static void @@ -422,7 +434,7 @@ _thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display) } wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display); - if (wl_egl_display->ev_queue) { + if (!wl_egl_display->ev_queue) { TPL_ERR("Failed to create wl_queue wl_display(%p)", wl_egl_display->wl_display); result = TPL_ERROR_INVALID_OPERATION; @@ -1352,52 +1364,65 @@ _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface) tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex); - if (wl_egl_surface->in_use_buffers) { - __tpl_list_free(wl_egl_surface->in_use_buffers, - (tpl_free_func_t)__cb_buffer_remove_from_list); - wl_egl_surface->in_use_buffers = NULL; - } - if (wl_egl_surface->committed_buffers) { - while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) { - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tbm_surface_h tbm_surface = - __tpl_list_pop_front(wl_egl_surface->committed_buffers, - (tpl_free_func_t)__cb_buffer_remove_from_list); + { + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + int idx = 0; + tpl_bool_t need_to_release = TPL_FALSE; + tpl_bool_t need_to_cancel = TPL_FALSE; + + while (wl_egl_surface->buffer_cnt) { + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + wl_egl_buffer = wl_egl_surface->buffers[idx]; + if (wl_egl_buffer) { + TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%d)", + idx, wl_egl_buffer, + wl_egl_buffer->tbm_surface, wl_egl_buffer->status); + + wl_egl_surface->buffers[idx] = NULL; + wl_egl_surface->buffer_cnt--; + } else { + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + idx++; + continue; + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", - tbm_surface, tsq_err); - } - __tpl_list_free(wl_egl_surface->committed_buffers, NULL); - wl_egl_surface->committed_buffers = NULL; - } + tpl_gmutex_lock(&wl_egl_buffer->mutex); - if (wl_egl_surface->vblank_waiting_buffers) { - while (!__tpl_list_is_empty(wl_egl_surface->vblank_waiting_buffers)) { - tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - tbm_surface_h tbm_surface = - __tpl_list_pop_front(wl_egl_surface->vblank_waiting_buffers, - (tpl_free_func_t)__cb_buffer_remove_from_list); + need_to_release = (wl_egl_buffer->status == ACQUIRED || + wl_egl_buffer->status == WAITING_SIGNALED || + wl_egl_buffer->status == WAITING_VBLANK || + wl_egl_buffer->status == COMMITTED); + + need_to_cancel = wl_egl_buffer->status == DEQUEUED; - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); + if (wl_egl_buffer->status == WAITING_SIGNALED) + tpl_gcond_wait(&wl_egl_buffer->cond, &wl_egl_buffer->mutex); + + if (need_to_release) { + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + wl_egl_buffer->tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", - tbm_surface, tsq_err); - } - __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL); - wl_egl_surface->vblank_waiting_buffers = NULL; - } + wl_egl_buffer->tbm_surface, tsq_err); + } - if (wl_egl_surface->fence_waiting_buffers) { - while (!__tpl_list_is_empty(wl_egl_surface->fence_waiting_buffers)) { - tbm_surface_h tbm_surface = - __tpl_list_pop_front(wl_egl_surface->fence_waiting_buffers, - NULL); - /* TODO */ + if (need_to_cancel) { + tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue, + wl_egl_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)", + wl_egl_buffer->tbm_surface, tsq_err); + } + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + + if (need_to_release || need_to_cancel) + tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); + + idx++; } } @@ -1559,6 +1584,13 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) wl_egl_surface->presentation_sync.fd = -1; { + int i = 0; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) + wl_egl_surface->buffers[i] = NULL; + wl_egl_surface->buffer_cnt = 0; + } + + { struct tizen_private *tizen_private = NULL; if (wl_egl_window->driver_private) @@ -1586,6 +1618,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex); tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex); + tpl_gmutex_init(&wl_egl_surface->buffers_mutex); + tpl_gmutex_init(&wl_egl_surface->surf_mutex); tpl_gcond_init(&wl_egl_surface->surf_cond); @@ -1785,9 +1819,6 @@ _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface) } } - wl_egl_surface->committed_buffers = __tpl_list_alloc(); - wl_egl_surface->in_use_buffers = __tpl_list_alloc(); - wl_egl_surface->fence_waiting_buffers = __tpl_list_alloc(); wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc(); wl_egl_surface->presentation_feedbacks = __tpl_list_alloc(); } @@ -1952,21 +1983,24 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) return TPL_ERROR_INVALID_OPERATION; } - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - if (wl_egl_surface->committed_buffers) { - while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) { - tbm_surface_h tbm_surface = - __tpl_list_pop_front(wl_egl_surface->committed_buffers, - (tpl_free_func_t)__cb_buffer_remove_from_list); - TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", - _get_tbm_surface_bo_name(tbm_surface)); - tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) - TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", - tbm_surface, tsq_err); + { + int i; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) { + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + wl_egl_buffer = wl_egl_surface->buffers[i]; + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + if (wl_egl_buffer && wl_egl_buffer->status == COMMITTED) { + wl_egl_buffer->status = RELEASED; + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, + wl_egl_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + wl_egl_buffer->tbm_surface, tsq_err); + tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); + } } } - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); TPL_INFO("[FORCE_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", @@ -2047,11 +2081,28 @@ _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, wl_egl_buffer->tbm_surface = tbm_surface; wl_egl_buffer->wl_egl_surface = wl_egl_surface; + wl_egl_buffer->status = RELEASED; + wl_egl_buffer->dx = wl_egl_window->dx; wl_egl_buffer->dy = wl_egl_window->dy; wl_egl_buffer->width = tbm_surface_get_width(tbm_surface); wl_egl_buffer->height = tbm_surface_get_height(tbm_surface); + tpl_gmutex_init(&wl_egl_buffer->mutex); + tpl_gcond_init(&wl_egl_buffer->cond); + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + { + int i; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) + if (wl_egl_surface->buffers[i] == NULL) break; + + wl_egl_surface->buffer_cnt++; + wl_egl_surface->buffers[i] = wl_egl_buffer; + wl_egl_buffer->idx = i; + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + TPL_INFO("[WL_EGL_BUFFER_CREATE]", "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)", wl_egl_surface, wl_egl_buffer, tbm_surface, @@ -2175,6 +2226,9 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface); TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer."); + tpl_gmutex_lock(&wl_egl_buffer->mutex); + wl_egl_buffer->status = DEQUEUED; + /* If wl_egl_buffer->release_fence_fd is -1, * the tbm_surface can be used immediately. * If not, user(EGL) have to wait until signaled. */ @@ -2194,6 +2248,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", wl_egl_buffer, tbm_surface, bo_name, release_fence ? *release_fence : -1); + tpl_gmutex_unlock(&wl_egl_buffer->mutex); tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex); return tbm_surface; @@ -2208,6 +2263,7 @@ __tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data; + tpl_wl_egl_buffer_t *wl_egl_buffer = NULL; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; if (!tbm_surface_internal_is_valid(tbm_surface)) { @@ -2215,11 +2271,12 @@ __tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - /* Stop tracking of this canceled tbm_surface */ - __tpl_list_remove_data(wl_egl_surface->in_use_buffers, - (void *)tbm_surface, TPL_FIRST, NULL); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); + wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + if (wl_egl_buffer) { + tpl_gmutex_lock(&wl_egl_buffer->mutex); + wl_egl_buffer->status = RELEASED; + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + } tbm_surface_internal_unref(tbm_surface); @@ -2270,6 +2327,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, wl_egl_surface, tbm_surface, bo_name, acquire_fence); wl_egl_buffer = _get_wl_egl_buffer(tbm_surface); + tpl_gmutex_lock(&wl_egl_buffer->mutex); /* If there are received region information, save it to wl_egl_buffer */ if (num_rects && rects) { @@ -2284,6 +2342,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (!wl_egl_buffer->rects) { TPL_ERR("Failed to allocate memory fo damage rects info."); + tpl_gmutex_unlock(&wl_egl_buffer->mutex); return TPL_ERROR_OUT_OF_MEMORY; } @@ -2295,6 +2354,7 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue", ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface); TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name); + tpl_gmutex_unlock(&wl_egl_buffer->mutex); return TPL_ERROR_NONE; } @@ -2318,8 +2378,11 @@ __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface, if (wl_egl_buffer->acquire_fence_fd != -1) close(wl_egl_buffer->acquire_fence_fd); - + wl_egl_buffer->acquire_fence_fd = acquire_fence; + wl_egl_buffer->status = ENQUEUED; + + tpl_gmutex_unlock(&wl_egl_buffer->mutex); tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue, tbm_surface); @@ -2351,9 +2414,12 @@ __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message) TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)", wl_egl_buffer->acquire_fence_fd); + tpl_gmutex_lock(&wl_egl_buffer->mutex); + tpl_gcond_signal(&wl_egl_buffer->cond); + wl_egl_buffer->status = WAITING_VBLANK; + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - __tpl_list_remove_data(wl_egl_surface->fence_waiting_buffers, - (void *)tbm_surface, TPL_FIRST, NULL); if (wl_egl_surface->vblank_done) _thread_wl_surface_commit(wl_egl_surface, tbm_surface); @@ -2412,6 +2478,10 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL, "wl_egl_buffer sould be not NULL"); + tpl_gmutex_lock(&wl_egl_buffer->mutex); + + wl_egl_buffer->status = ACQUIRED; + if (wl_egl_buffer->wl_buffer == NULL) { tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; wl_egl_buffer->wl_buffer = @@ -2437,8 +2507,7 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer, wl_egl_buffer->acquire_fence_fd, &buffer_funcs, SOURCE_TYPE_DISPOSABLE); - - __tpl_list_push_back(wl_egl_surface->fence_waiting_buffers, tbm_surface); + wl_egl_buffer->status = WAITING_SIGNALED; TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)", wl_egl_buffer->acquire_fence_fd); @@ -2451,11 +2520,14 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface) if (wl_egl_surface->vblank_done) ready_to_commit = TPL_TRUE; else { + wl_egl_buffer->status = WAITING_VBLANK; __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, tbm_surface); ready_to_commit = TPL_FALSE; } } + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + if (ready_to_commit) _thread_wl_surface_commit(wl_egl_surface, tbm_surface); } @@ -2496,28 +2568,22 @@ __cb_buffer_fenced_release(void *data, tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; tbm_surface_h tbm_surface = NULL; - if (wl_egl_buffer) - tbm_surface = wl_egl_buffer->tbm_surface; + TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer); + + tbm_surface = wl_egl_buffer->tbm_surface; if (tbm_surface_internal_is_valid(tbm_surface)) { - if (wl_egl_buffer->need_to_release) { + if (wl_egl_buffer->status == COMMITTED) { tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; tbm_surface_queue_error_e tsq_err; - if (wl_egl_surface->committed_buffers) { - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - __tpl_list_remove_data(wl_egl_surface->committed_buffers, - (void *)tbm_surface, - TPL_FIRST, NULL); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - } - - wl_egl_buffer->need_to_release = TPL_FALSE; + tpl_gmutex_lock(&wl_egl_buffer->mutex); zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); wl_egl_buffer->buffer_release = NULL; wl_egl_buffer->release_fence_fd = fence; + wl_egl_buffer->status = RELEASED; TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", _get_tbm_surface_bo_name(tbm_surface), @@ -2531,6 +2597,8 @@ __cb_buffer_fenced_release(void *data, _get_tbm_surface_bo_name(tbm_surface), fence); + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) @@ -2550,28 +2618,22 @@ __cb_buffer_immediate_release(void *data, tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; tbm_surface_h tbm_surface = NULL; - if (wl_egl_buffer) - tbm_surface = wl_egl_buffer->tbm_surface; + TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer); + + tbm_surface = wl_egl_buffer->tbm_surface; if (tbm_surface_internal_is_valid(tbm_surface)) { - if (wl_egl_buffer->need_to_release) { + if (wl_egl_buffer->status == COMMITTED) { tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; tbm_surface_queue_error_e tsq_err; - if (wl_egl_surface->committed_buffers) { - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - __tpl_list_remove_data(wl_egl_surface->committed_buffers, - (void *)tbm_surface, - TPL_FIRST, NULL); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - } - - wl_egl_buffer->need_to_release = TPL_FALSE; + tpl_gmutex_lock(&wl_egl_buffer->mutex); zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); wl_egl_buffer->buffer_release = NULL; wl_egl_buffer->release_fence_fd = -1; + wl_egl_buffer->status = RELEASED; TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); @@ -2583,6 +2645,8 @@ __cb_buffer_immediate_release(void *data, wl_egl_buffer->wl_buffer, tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) @@ -2606,28 +2670,24 @@ __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data; tbm_surface_h tbm_surface = NULL; - if (wl_egl_buffer) - tbm_surface = wl_egl_buffer->tbm_surface; + TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer) + + tbm_surface = wl_egl_buffer->tbm_surface; if (tbm_surface_internal_is_valid(tbm_surface)) { - if (wl_egl_buffer->need_to_release) { - tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; - tbm_surface_queue_error_e tsq_err; + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE; + + tpl_gmutex_lock(&wl_egl_buffer->mutex); + + if (wl_egl_buffer->status == COMMITTED) { tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); - if (wl_egl_surface->committed_buffers) { - tpl_gmutex_lock(&wl_egl_surface->surf_mutex); - __tpl_list_remove_data(wl_egl_surface->committed_buffers, - (void *)tbm_surface, - TPL_FIRST, NULL); - tpl_gmutex_unlock(&wl_egl_surface->surf_mutex); - } - - wl_egl_buffer->need_to_release = TPL_FALSE; + wl_egl_buffer->status = RELEASED; TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", @@ -2636,9 +2696,12 @@ __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer) TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", wl_egl_buffer->wl_buffer, tbm_surface, _get_tbm_surface_bo_name(tbm_surface)); + } + tpl_gmutex_unlock(&wl_egl_buffer->mutex); + + if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) tbm_surface_internal_unref(tbm_surface); - } } else { TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); } @@ -2890,8 +2953,6 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, (void *)wl_egl_buffer->wl_buffer, wl_egl_buffer->serial); - wl_egl_buffer->need_to_release = TPL_TRUE; - if (wl_egl_display->use_explicit_sync && wl_egl_surface->surface_sync) { @@ -2920,7 +2981,8 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface)); - wl_egl_buffer->need_to_commit = TPL_FALSE; + wl_egl_buffer->need_to_commit = TPL_FALSE; + wl_egl_buffer->status = COMMITTED; TPL_LOG_T("WL_EGL", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", wl_egl_buffer->wl_buffer, tbm_surface, @@ -2930,10 +2992,6 @@ _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface, _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE) TPL_ERR("Failed to set wait vblank."); - if (wl_egl_surface->committed_buffers) { - __tpl_list_push_back(wl_egl_surface->committed_buffers, tbm_surface); - } - tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); if (wl_egl_buffer->commit_sync_fd != -1) { @@ -3024,6 +3082,15 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + if (wl_egl_buffer->idx > 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) { + wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL; + wl_egl_surface->buffer_cnt--; + + wl_egl_buffer->idx = -1; + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + wl_display_flush(wl_egl_display->wl_display); if (wl_egl_buffer->wl_buffer) @@ -3063,15 +3130,6 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) free(wl_egl_buffer); } -static void -__cb_buffer_remove_from_list(void *data) -{ - tbm_surface_h tbm_surface = (tbm_surface_h)data; - - if (tbm_surface && tbm_surface_internal_is_valid(tbm_surface)) - tbm_surface_internal_unref(tbm_surface); -} - static int _get_tbm_surface_bo_name(tbm_surface_h tbm_surface) { @@ -3081,59 +3139,20 @@ _get_tbm_surface_bo_name(tbm_surface_h tbm_surface) static void _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) { - int count = 0; - int idx = 0; - tpl_list_node_t *node = NULL; - tbm_surface_h tbm_surface = NULL; - - /* vblank waiting list */ - count = __tpl_list_get_count(wl_egl_surface->vblank_waiting_buffers); - TPL_DEBUG("VBLANK WAITING BUFFERS | wl_egl_surface(%p) list(%p) count(%d)", - wl_egl_surface, wl_egl_surface->vblank_waiting_buffers, count); - - while ((!node && - (node = __tpl_list_get_front_node(wl_egl_surface->vblank_waiting_buffers))) || - (node && (node = __tpl_list_node_next(node)))) { - tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node); - TPL_DEBUG("VBLANK WAITING BUFFERS | %d | tbm_surface(%p) bo(%d)", - idx, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); - idx++; - } - - idx = 0; - node = NULL; - - /* in use buffers list */ - count = __tpl_list_get_count(wl_egl_surface->in_use_buffers); - TPL_DEBUG("DEQUEUED BUFFERS | wl_egl_surface(%p) list(%p) count(%d)", - wl_egl_surface, wl_egl_surface->in_use_buffers, count); - - while ((!node && - (node = __tpl_list_get_front_node(wl_egl_surface->in_use_buffers))) || - (node && (node = __tpl_list_node_next(node)))) { - tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node); - TPL_DEBUG("DEQUEUED BUFFERS | %d | tbm_surface(%p) bo(%d)", - idx, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); - idx++; - } - - idx = 0; - node = NULL; - - /* committed buffers list */ - count = __tpl_list_get_count(wl_egl_surface->committed_buffers); - TPL_DEBUG("COMMITTED BUFFERS | wl_egl_surface(%p) list(%p) count(%d)", - wl_egl_surface, wl_egl_surface->committed_buffers, count); - - while ((!node && - (node = __tpl_list_get_front_node(wl_egl_surface->committed_buffers))) || - (node && (node = __tpl_list_node_next(node)))) { - tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node); - TPL_DEBUG("COMMITTED BUFFERS | %d | tbm_surface(%p) bo(%d)", - idx, tbm_surface, - _get_tbm_surface_bo_name(tbm_surface)); - idx++; + int idx = 0; + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)", + wl_egl_surface, wl_egl_surface->buffer_cnt); + for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) { + tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx]; + if (wl_egl_buffer) { + TPL_INFO("[INFO]", + "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%d)", + idx, wl_egl_buffer, wl_egl_buffer->tbm_surface, + _get_tbm_surface_bo_name(wl_egl_buffer->tbm_surface), + wl_egl_buffer->status); + } } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); } -- 2.7.4 From 6bd1801e4eefd1a2ce533ed0aae189f3c1c176fa Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Wed, 3 Feb 2021 10:27:03 +0900 Subject: [PATCH 16/16] Change the order of creation of tdm_source. Change-Id: I3bdfa3db71ed6a0dbacda4cf5a56d2ea81840d9e Signed-off-by: Joonbum Ko --- src/tpl_utils_gthread.c | 30 ++++++++++++++++-------- src/tpl_wl_egl.c | 62 ++++++++++++++++++++++++++++--------------------- 2 files changed, 55 insertions(+), 37 deletions(-) diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c index b1480c1..23e28d5 100644 --- a/src/tpl_utils_gthread.c +++ b/src/tpl_utils_gthread.c @@ -109,17 +109,19 @@ tpl_gthread_create(const char *thread_name, g_mutex_lock(&new_thread->thread_mutex); - new_thread->destroy_sig_source = - tpl_gsource_create(new_thread, new_thread, -1, - &thread_destroy_funcs, SOURCE_TYPE_FINALIZER); new_thread->loop = loop; + TPL_DEBUG("loop(%p)", loop); new_thread->init_func = init_func; new_thread->func_data = func_data; new_thread->thread = g_thread_new(thread_name, _tpl_gthread_init, new_thread); g_cond_wait(&new_thread->thread_cond, &new_thread->thread_mutex); + + new_thread->destroy_sig_source = + tpl_gsource_create(new_thread, new_thread, -1, + &thread_destroy_funcs, SOURCE_TYPE_FINALIZER); g_mutex_unlock(&new_thread->thread_mutex); return new_thread; @@ -207,16 +209,17 @@ _thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data) ret = gsource->gsource_funcs->dispatch(gsource, message); if (gsource->type == SOURCE_TYPE_FINALIZER) { - tpl_gsource *del_source = (tpl_gsource *)data; + tpl_gsource *del_source = (tpl_gsource *)gsource->data; if (!g_source_is_destroyed(&del_source->gsource)) { - g_mutex_lock(&del_source->thread->thread_mutex); + tpl_gthread *thread = del_source->thread; + g_mutex_lock(&thread->thread_mutex); g_source_remove_unix_fd(&del_source->gsource, del_source->tag); g_source_destroy(&del_source->gsource); g_source_unref(&del_source->gsource); - g_cond_signal(&del_source->thread->thread_cond); - g_mutex_unlock(&del_source->thread->thread_mutex); + g_cond_signal(&thread->thread_cond); + g_mutex_unlock(&thread->thread_mutex); } } } else { @@ -307,6 +310,9 @@ tpl_gsource_create(tpl_gthread *thread, void *data, int fd, g_source_attach(&new_gsource->gsource, g_main_loop_get_context(thread->loop)); + TPL_DEBUG("[GSOURCE_CREATE] tpl_gsource(%p) thread(%p) data(%p) fd(%d) type(%d)", + new_gsource, thread, data, new_gsource->fd, type); + return new_gsource; } @@ -319,14 +325,18 @@ tpl_gsource_destroy(tpl_gsource *source, tpl_bool_t destroy_in_thread) return; } + TPL_DEBUG("[GSOURCE_DESTROY] tpl_gsource(%p) type(%d)", + source, source->type); + if (destroy_in_thread) { + tpl_gthread *thread = source->thread; if (source->type == SOURCE_TYPE_NORMAL) { - g_mutex_lock(&source->thread->thread_mutex); + g_mutex_lock(&thread->thread_mutex); tpl_gsource_send_message(source->finalizer, 1); - g_cond_wait(&source->thread->thread_cond, &source->thread->thread_mutex); - g_mutex_unlock(&source->thread->thread_mutex); + g_cond_wait(&thread->thread_cond, &thread->thread_mutex); + g_mutex_unlock(&thread->thread_mutex); } } else { if (source->type == SOURCE_TYPE_NORMAL && diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c index 320827d..ba7cd6b 100644 --- a/src/tpl_wl_egl.c +++ b/src/tpl_wl_egl.c @@ -53,6 +53,7 @@ struct _tpl_wl_egl_display { tdm_client *tdm_client; tpl_gsource *tdm_source; + int tdm_display_fd; tpl_bool_t use_wait_vblank; tpl_bool_t use_explicit_sync; @@ -284,6 +285,7 @@ __thread_func_tdm_finalize(tpl_gsource *gsource) if (wl_egl_display->tdm_client) { tdm_client_destroy(wl_egl_display->tdm_client); wl_egl_display->tdm_client = NULL; + wl_egl_display->tdm_display_fd = -1; } wl_egl_display->tdm_initialized = TPL_FALSE; @@ -299,16 +301,10 @@ static tpl_gsource_functions tdm_funcs = { tpl_result_t _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display) { - tpl_gsource *tdm_source = NULL; tdm_client *tdm_client = NULL; int tdm_display_fd = -1; tdm_error tdm_err = TDM_ERROR_NONE; - if (!wl_egl_display->thread) { - TPL_ERR("thread should be created before init tdm_client."); - return TPL_ERROR_INVALID_OPERATION; - } - tdm_client = tdm_client_create(&tdm_err); if (!tdm_client || tdm_err != TDM_ERROR_NONE) { TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err); @@ -322,23 +318,14 @@ _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display) return TPL_ERROR_INVALID_OPERATION; } - tdm_source = tpl_gsource_create(wl_egl_display->thread, - (void *)wl_egl_display, tdm_display_fd, - &tdm_funcs, SOURCE_TYPE_NORMAL); - if (!tdm_source) { - TPL_ERR("Failed to create tdm_gsource\n"); - tdm_client_destroy(tdm_client); - return TPL_ERROR_INVALID_OPERATION; - } - - wl_egl_display->tdm_client = tdm_client; - wl_egl_display->tdm_source = tdm_source; - + wl_egl_display->tdm_display_fd = tdm_display_fd; + wl_egl_display->tdm_client = tdm_client; + wl_egl_display->tdm_source = NULL; wl_egl_display->tdm_initialized = TPL_TRUE; - TPL_LOG_T("WL_EGL", "TPL_WAIT_VBLANK:DEFAULT_ENABLED"); - TPL_LOG_T("WL_EGL", "wl_egl_display(%p) tdm_source(%p) tdm_client(%p)", - wl_egl_display, tdm_source, tdm_client); + TPL_INFO("[TDM_CLIENT_INIT]", + "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)", + wl_egl_display, tdm_client, tdm_display_fd); return TPL_ERROR_NONE; } @@ -498,6 +485,14 @@ _thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display) wl_egl_display->wl_initialized = TPL_TRUE; + TPL_INFO("[WAYLAND_INIT]", + "wl_egl_display(%p) wl_display(%p) event_queue(%p)", + wl_egl_display, wl_egl_display->wl_display, wl_egl_display->ev_queue); + TPL_INFO("[WAYLAND_INIT]", + "tizen_surface_shm(%p) wp_presentation(%p) explicit_sync(%p)", + wl_egl_display->tss, wl_egl_display->presentation, + wl_egl_display->explicit_sync); + fini: if (display_wrapper) wl_proxy_wrapper_destroy(display_wrapper); @@ -768,6 +763,15 @@ __tpl_wl_egl_display_init(tpl_display_t *display) goto free_display; } + wl_egl_display->tdm_source = tpl_gsource_create(wl_egl_display->thread, + (void *)wl_egl_display, + wl_egl_display->tdm_display_fd, + &tdm_funcs, SOURCE_TYPE_NORMAL); + if (!wl_egl_display->tdm_source) { + TPL_ERR("Failed to create tdm_gsource\n"); + goto free_display; + } + TPL_LOG_T("WL_EGL", "[INIT DISPLAY] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)", wl_egl_display, @@ -784,7 +788,11 @@ __tpl_wl_egl_display_init(tpl_display_t *display) free_display: if (wl_egl_display->thread) { - tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); + if (wl_egl_display->tdm_source) + tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); + if (wl_egl_display->disp_source) + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); + tpl_gthread_destroy(wl_egl_display->thread, _thread_fini); } @@ -810,16 +818,16 @@ __tpl_wl_egl_display_fini(tpl_display_t *display) wl_egl_display->thread, wl_egl_display->wl_display); - if (wl_egl_display->disp_source) { - tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); - wl_egl_display->disp_source = NULL; - } - if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) { tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE); wl_egl_display->tdm_source = NULL; } + if (wl_egl_display->disp_source) { + tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE); + wl_egl_display->disp_source = NULL; + } + if (wl_egl_display->thread) { tpl_gthread_destroy(wl_egl_display->thread, NULL); wl_egl_display->thread = NULL; -- 2.7.4