Implemented initial tpl_utils_gthread. 65/254765/1
authorJoonbum Ko <joonbum.ko@samsung.com>
Thu, 22 Oct 2020 07:55:12 +0000 (16:55 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Tue, 9 Mar 2021 08:44:09 +0000 (17:44 +0900)
 - Thread-related functions included in tpl_wayland_egl_thread
  have been separated with util.

Change-Id: Ia75d1410e20241d8994e0bf55f7e6bc50016278c
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_utils_gthread.c [new file with mode: 0644]
src/tpl_utils_gthread.h [new file with mode: 0644]
src/tpl_wl_egl.c [new file with mode: 0644]

diff --git a/src/tpl_utils_gthread.c b/src/tpl_utils_gthread.c
new file mode 100644 (file)
index 0000000..181bf31
--- /dev/null
@@ -0,0 +1,334 @@
+#include "tpl_utils_gthread.h"
+
+struct _tpl_gthread {
+       GThread               *thread;
+       GMainLoop             *loop;
+
+       tpl_gsource           *destroy_sig_source;
+
+       GMutex                 thread_mutex;
+       GCond                  thread_cond;
+
+       tpl_gthread_func       init_func;
+       tpl_gthread_func       deinit_func;
+       void                  *func_data;
+};
+
+struct _tpl_gsource {
+       GSource                gsource;
+       gpointer               tag;
+
+       tpl_gthread           *thread;
+
+       int                    fd;
+       tpl_bool_t             is_eventfd;
+       tpl_gsource_functions *gsource_funcs;
+
+       tpl_bool_t             is_disposable;
+
+       void                  *data;
+};
+
+
+static gpointer
+_tpl_gthread_init(gpointer data)
+{
+       tpl_gthread *thread = data;
+
+       g_mutex_lock(&thread->thread_mutex);
+
+       if (thread->init_func)
+               thread->init_func(thread->func_data);
+
+       g_cond_signal(&thread->thread_cond);
+       g_mutex_unlock(&thread->thread_mutex);
+
+       g_main_loop_run(thread->twe_loop);
+
+       return thread;
+}
+
+static void
+_tpl_gthread_fini(gpointer data)
+{
+       tpl_gthread *thread = data;
+
+       g_mutex_lock(&thread->thread_mutex);
+
+       if (thread->deinit_func)
+               thread->deinit_func(thread->func_data);
+
+       g_cond_signal(&thread->thread_cond);
+       g_mutex_unlock(&thread->thread_mutex);  
+}
+
+static tpl_gsource_functions thread_destroy_funcs = {
+       .prepare  = NULL,
+       .check    = NULL,
+       .dispatch = _tpl_gthread_fini,
+       .finalize = NULL,
+};
+
+tpl_gthread *
+tpl_gthread_create(const char *thread_name,
+                                  tpl_gthread_func init_func, void *func_data)
+{
+       GMainContext *context    = NULL;
+       GMainLoop    *loop       = NULL;
+       tpl_gthread  *new_thread = NULL;
+
+       context = g_main_context_new();
+       if (!context) {
+               TPL_ERR("Failed to create GMainContext");
+               return NULL;
+       }
+
+       loop = g_main_loop_new(context, FALSE);
+       if (!loop) {
+               TPL_ERR("Failed to create GMainLoop");
+               g_main_context_unref(context);
+               return NULL;
+       }
+
+       g_main_context_unref(context);
+
+       new_thread = calloc(1, sizeof(tpl_gthread));
+       if (!new_thread) {
+               TPL_ERR("Failed to allocate tpl_gthread");
+               g_main_context_unref(context);
+               g_main_loop_unref(loop);
+       }
+
+       g_mutex_init(&new_thread->thread_mutex);
+       g_cond_init(&new_thread->thread_cond);
+
+
+       g_mutex_lock(&new_thread->thread_mutex);
+       new_thread->destroy_sig_source =
+               tpl_gsource_create(new_thread, new_thread, -1,
+                                                  &thread_destroy_funcs, TPL_TRUE);
+
+       new_thread->loop      = loop;
+       new_thread->init_func = init_func;
+       new_thread->func_data = func_data;
+       new_thread->thread    = g_thread_new(thread_name,
+                                                                            _tpl_gthread_init, new_thread);
+       g_cond_wait(&new_thread->thread_cond,
+                               &new_thread->thread_mutex);
+       g_mutex_unlock(&new_thread->thread_mutex);
+
+       return new_thread;
+}
+
+void
+tpl_gthread_destroy(tpl_gthread *thread, tpl_gthread_func deinit_func)
+{
+       g_mutex_lock(&thread->thread_mutex);
+       thread->deinit_func = deinit_func;
+       tpl_gsource_send_event(thread->destroy_sig_source, 1);
+       g_cond_wait(&thread->thread_cond, &thread->thread_mutex);
+
+       g_main_loop_quit(thread->loop);
+       g_thread_join(thread->thread);
+       g_main_loop_unref(thread->loop);
+
+       g_mutex_unlock(&thread->thread_mutex);
+       g_mutex_clear(&thread->thread_mutex);
+       g_cond_clear(&thread->thread_cond);
+
+       thread->func = NULL;
+
+       free(thread);
+}
+
+static gboolean
+_thread_source_prepare(GSource *source, gint *time)
+{
+       tpl_gsource *gsource = (tpl_gsource *)source;
+       tpl_bool_t ret       = TPL_FALSE;
+
+       if (gsource->gsource_funcs->prepare)
+               ret = gsource->gsource_funcs->prepare(gsource);
+
+       *time = -1;
+
+       return ret;
+}
+
+static gboolean
+_thread_source_check(GSource *source)
+{
+       tpl_gsource *gsource = (tpl_gsource *)source;
+       tpl_bool_t ret       = TPL_FALSE;
+
+       if (gsource->gsource_funcs->check)
+               ret = gsource->gsource_funcs->check(gsource);
+
+       return ret;
+}
+
+static gboolean
+_thread_source_dispatch(GSource *source, GSourceFunc cb, gpointer data)
+{
+       tpl_gsource *gsource = (tpl_gsource *)source;
+       tpl_bool_t ret       = TPL_GSOURCE_CONTINUE;
+       GIOCondition cond    = g_source_query_unix_fd(source, gsource->tag);
+
+       TPL_IGNORE(cb);
+       TPL_IGNORE(data);
+
+       if (cond & G_IO_IN) {
+               if (gsource->gsource_funcs->dispatch)
+                       ret = gsource->gsource_funcs->dispatch(gsource);
+       } else {
+               /* When some io errors occur, it is not considered as a critical error.
+                * There may be problems with the screen, but it does not affect the operation. */
+               TPL_WARN("Invalid GIOCondition occured. tpl_gsource(%p) fd(%d) cond(%d)",
+                                gsource, gsource->fd, cond);
+       }
+
+       if (gsource->is_disposable)
+               ret = TPL_GSOURCE_REMOVE;
+
+       return ret;
+}
+
+static void
+_thread_source_finalize(GSource *source)
+{
+       tpl_gsource *gsource = (tpl_gsource *)source;
+
+       if (gsource->gsource_funcs->finalize)
+               gsource->gsource_funcs->finalize(gsource);
+
+       if (gsource->is_eventfd)
+               close(gsource->fd);
+
+       gsource->fd = -1;
+       gsource->thread = NULL;
+       gsource->gsource_funcs = NULL;
+       gsource->data = NULL;
+}
+
+static GSourceFuncs _thread_source_funcs = {
+       .prepare = _thread_source_prepare,
+       .check = _thread_source_check,
+       .dispatch = _thread_source_dispatch,
+       .finalize = _thread_source_finalize,
+};
+
+tpl_gsource *
+tpl_gsource_create(tpl_gthread *thread, void *data, int fd,
+                                  tpl_gsource_functions *funcs, tpl_bool_t is_disposable)
+{
+       tpl_gsource *new_gsource = NULL;
+
+       new_gsource = (tpl_gsource *)g_source_new(&_thread_source_funcs,
+                                 sizeof(tpl_gsource));
+       if (!new_gsource) {
+               TPL_ERR("Failed to create new tpl_gsource");
+               return NULL;
+       }
+
+       if (fd < 0) {
+               new_gsource->fd = eventfd(0, EFD_CLOEXEC);
+               if (new_gsource->fd < 0) {
+                       TPL_ERR("Failed to create eventfd. errno(%d)", errno);
+                       g_source_unref(&new_gsource->gsource);
+                       return NULL;
+               }
+
+               new_gsource->is_eventfd = TPL_TRUE;
+       } else {
+               new_gsource->fd = fd;
+       }
+
+       new_gsource->thread        = thread;
+       new_gsource->gsource_funcs = funcs;
+       new_gsource->data          = data;
+       new_gsource->is_disposable = is_disposable;
+
+       new_gsource->tag = g_source_add_unix_fd(&new_gsource->gsource,
+                                                                                       new_gsource->fd,
+                                                                                       G_IO_IN | G_IO_ERR);
+       g_source_attach(&new_gsource->gsource,
+                                       g_main_loop_get_context(thread->loop));
+
+       return new_gsource;
+}
+
+void
+tpl_gsource_destroy(tpl_gsource *source)
+{
+       g_source_remove_unix_fd(&source->gsource, source->tag);
+       g_source_destroy(&source->gsource);
+       g_source_unref(&source->gsource);
+}
+
+void
+tpl_gsource_send_event(tpl_gsource *source, uint64_t message)
+{
+       uint64_t value = message;
+       int ret;
+
+       ret = write(del_source->event_fd, &value, sizeof(uint64_t));
+       if (ret == -1) {
+               TPL_ERR("failed to send devent. tpl_gsource(%p)",
+                               source);
+       }
+}
+
+void *
+tpl_gsource_get_data(tpl_gsource *source)
+{
+       if (source && source->data)
+               return source->data;
+}
+
+void
+tpl_gmutex_init(tpl_gmutex *gmutex)
+{
+       g_mutex_init(gmutex);
+}
+
+void
+tpl_gmutex_clear(tpl_gmutex *gmutex)
+{
+       g_mutex_clear(gmutex);
+}
+
+void
+tpl_gmutex_lock(tpl_gmutex *gmutex)
+{
+       g_mutex_lock(gmutex);
+}
+
+void
+tpl_gmutex_unlock(tpl_gmutex *gmutex)
+{
+       g_mutex_unlock(gmutex);
+}
+
+void
+tpl_gcond_init(tpl_gcond *gcond)
+{
+       g_cond_init(gcond);
+}
+
+void
+tpl_gcond_clear(tpl_gcond *gcond)
+{
+       g_cond_clear(gcond);
+}
+
+void
+tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex)
+{
+       g_cond_wait(gcond, gmutex);
+}
+
+void
+tpl_gcond_signal(tpl_gcond *gcond)
+{
+       g_cond_signal(gcond);
+}
\ No newline at end of file
diff --git a/src/tpl_utils_gthread.h b/src/tpl_utils_gthread.h
new file mode 100644 (file)
index 0000000..1886609
--- /dev/null
@@ -0,0 +1,176 @@
+
+#include <sys/eventfd.h>
+#include <glib.h>
+#include <glib-unix.h>
+
+#include <stdlib.h>
+
+#include "tpl_utils.h"
+
+typedef struct _tpl_gthread tpl_gthread;
+typedef struct _tpl_gsource tpl_gsource;
+typedef struct _tpl_gsource_functions tpl_gsource_functions;
+
+typedef TPL_TRUE TPL_GSOURCE_CONTINUE;
+typedef TPL_FALSE TPL_GSOURCE_REMOVE;
+
+typedef void (*tpl_gthread_func) (void *user_data);
+
+typedef GMutex tpl_gmutex;
+typedef GCond tpl_gcond;
+
+struct _tpl_gsource_functions {
+       tpl_bool_t (*prepare)  (tpl_gsource *source);
+       tpl_bool_t (*check)    (tpl_gsource *source);
+       tpl_bool_t (*dispatch) (tpl_gsource *source);
+       void       (*finalize) (tpl_gsource *source);
+};
+
+/**
+ * Create a new tpl_gthread
+ *
+ * This creates a new g_thread and guarantees up to g_main_loop_run.
+ *
+ * @param thread_name The name of new thread can be useful for discriminating threads in a debugger.
+ * @param init_func Function pointer to be called initially in the created thread.
+ * @param data Parameter passed when func is called.
+ * @return Pointer to newly created tpl_gthread.
+ *
+ * The returned tpl_gthread* must call tpl_gthread_destroy when terminating the thread.
+ */
+tpl_gthread *
+tpl_gthread_create(const char *thread_name,
+                                  tpl_gthread_init_func init_func, void *func_data);
+
+/**
+ * Stop thread and Destroy tpl_gthread
+ *
+ * After waiting for the ongoing operation in the thread to complete,
+ * all resources created in tpl_gthread_create are freed.
+ *
+ * @param thread Pointer to tpl_gthread created with tpl_gthread_create().
+ * @param deinit_func Function Pointer to be called in thread destroying.
+ *
+ * @see tpl_gthread_create()
+ */
+void
+tpl_gthread_destroy(tpl_gthread *thread, tpl_gthread_func deinit_func);
+
+/**
+ * Create a new tpl_gsource
+ *
+ * This creates a new tpl_gsource to be attached the thread loop.
+ *
+ * @param thread Pointer to tpl_gthread to attach new tpl_gsource.
+ * @param data Pointer to some handle used by its user.
+ * @param fd fd to poll. If the value is more than 0, the passed value will be polled.
+ *  If it is -1, eventfd is created in this function.
+ * @param funcs Pointer to tpl_gsource_functions.
+ *  This structure corresponds to GSourceFuncs, and dispatch and finalize are required.
+ * @param is_disposable If it is intended to be used for single use, TRUE should be passed,
+ *  and FALSE should be passed to keep it.
+ *  In the case of disposable, it is not necessary to call tpl_gsource_destroy.
+ * @return Pointer to newly created tpl_gsource.
+ *
+ * All created tpl_gsource resources will be freed in the thread.
+ * @see tpl_gsource_destroy
+ */
+tpl_gsource *
+tpl_gsource_create(tpl_gthread *thread, void *data, int fd,
+                                  tpl_gsource_functions *funcs, tpl_bool_t is_disposable);
+
+/**
+ * Detach the passed tpl_gsource from thread and destroy it.
+ *
+ * @param source Pointer to tpl_gsource to destroy.
+ */
+void
+tpl_gsource_destroy(tpl_gsource *source);
+
+/**
+ * Send an event to dispatch the gsource attached to the thread.
+ *
+ * @param source Pointer to tpl_gsource to send event.
+ * @param message Value to be read in thread..
+ */
+void
+tpl_gsource_send_event(tpl_gsource *source, uint64_t message);
+
+/**
+ * Get user data from passed tpl_gsource
+ *
+ * @param source Pointer to tpl_gsource to get its user data.
+ * @return Pointer to user data passed to tpl_gsource_create().
+ */
+void *
+tpl_gsource_get_data(tpl_gsource *source);
+
+/**
+ * wrapping g_mutex_init()
+ *
+ * @param gmutex Pointer to tpl_gmutex.
+ */
+void
+tpl_gmutex_init(tpl_gmutex *gmutex);
+
+/**
+ * wrapping g_mutex_clear()
+ *
+ * @param gmutex Pointer to tpl_gmutex.
+ */
+void
+tpl_gmutex_clear(tpl_gmutex *gmutex);
+
+/**
+ * wrapping g_mutex_lock()
+ *
+ * @param gmutex Pointer to tpl_gmutex.
+ */
+void
+tpl_gmutex_lock(tpl_gmutex *gmutex);
+
+/**
+ * wrapping g_mutex_unlock()
+ *
+ * @param gmutex Pointer to tpl_gmutex.
+ */
+void
+tpl_gmutex_unlock(tpl_gmutex *gmutex);
+
+/**
+ * wrapping g_cond_init()
+ *
+ * @param gmutex Pointer to tpl_gcond.
+ */
+void
+tpl_gcond_init(tpl_gcond *gcond);
+
+/**
+ * wrapping g_cond_clear()
+ *
+ * @param gmutex Pointer to tpl_gcond.
+ */
+void
+tpl_gcond_clear(tpl_gcond *gcond);
+
+/**
+ * wrapping g_cond_wait()
+ *
+ * @param gmutex Pointer to tpl_gcond.
+ */
+void
+tpl_gcond_wait(tpl_gcond *gcond, tpl_gmutex *gmutex);
+
+/**
+ * wrapping g_cond_signal()
+ *
+ * @param gmutex Pointer to tpl_gcond.
+ */
+void
+tpl_gcond_signal(tpl_gcond *gcond);
+
+
+
+
+
+
diff --git a/src/tpl_wl_egl.c b/src/tpl_wl_egl.c
new file mode 100644 (file)
index 0000000..7b41f7e
--- /dev/null
@@ -0,0 +1,1436 @@
+
+#include "tpl_internal.h"
+
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <tbm_bufmgr.h>
+#include <tbm_surface.h>
+#include <tbm_surface_internal.h>
+#include <tbm_surface_queue.h>
+
+#include <wayland-client.h>
+#include <wayland-tbm-server.h>
+#include <wayland-tbm-client.h>
+#include <wayland-egl-backend.h>
+
+#include <tdm_client.h>
+
+#include "wayland-egl-tizen/wayland-egl-tizen.h"
+#include "wayland-egl-tizen/wayland-egl-tizen-priv.h"
+
+#include <tizen-surface-client-protocol.h>
+#include <presentation-time-client-protocol.h>
+#include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
+
+#include "tpl_utils_gthread.h"
+
+static int buffer_info_key;
+#define KEY_BUFFER_INFO (unsigned long)(&buffer_info_key)
+
+/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
+#define CLIENT_QUEUE_SIZE 3
+
+typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
+typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
+
+struct _tpl_wl_egl_display {
+       tpl_gsource                  *disp_source;
+       tpl_gthread                  *thread;
+       tpl_gmutex                    wl_event_mutex;
+
+    struct wl_display            *wl_display;
+       struct wl_event_queue        *ev_queue;
+       struct wayland_tbm_client    *wl_tbm_client;
+       int                           last_error; /* errno of the last wl_display error*/
+
+       tdm_client                   *tdm_client;
+       tpl_gsource                  *tdm_source;
+
+       tpl_bool_t                    use_wait_vblank;
+       tpl_bool_t                    use_explicit_sync;
+       tpl_bool_t                    prepared;
+
+       struct tizen_surface_shm     *tss; /* used for surface buffer_flush */
+       struct wp_presentation       *presentation;
+       struct zwp_linux_explicit_synchronization_v1 *explicit_sync;
+};
+
+struct _tpl_wl_egl_surface {
+       tpl_gsource                  *surf_source;
+
+       tbm_surface_queue_h           tbm_queue;
+
+       struct wl_surface            *surf;
+       struct wl_egl_window         *wl_egl_window;
+       struct zwp_linux_surface_synchronization_v1 *surface_sync;
+       struct tizen_surface_shm_flusher *tss_flusher;
+
+       /* surface information */
+       int                           latest_transform;
+       int                           rotation;
+       int                           format;
+       int                           render_done_cnt;
+       unsigned int                  serial;
+
+
+       tpl_wl_egl_display_t         *wl_egl_display;
+
+       /* the lists for buffer tracing */
+       tpl_list_t                   *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */
+       tpl_list_t                   *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */
+       tpl_list_t                   *fence_waiting_sources; /* Trace fence_wait_source from ENQUEUE to fence signaled */
+       tpl_list_t                   *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
+       tpl_list_t                   *render_done_fences; /* for attaching to twe_thread with fences passed by enqueue */
+
+       tdm_client_vblank            *vblank;
+
+       tbm_fd                        commit_sync_timeline;
+       int                           commit_sync_timestamp;
+       unsigned int                  commit_sync_fence_number;
+
+       tbm_fd                        presentation_sync_timeline;
+       int                           presentation_sync_timestamp;
+       int                           presentation_sync_ts_backup;
+       int                           presentation_sync_req_cnt;
+
+       tpl_gmutex                    pst_mutex;
+       tpl_gmutex                    surf_mutex;
+       tpl_gmutex                    free_queue_mutex;
+       tpl_gcond                     free_queue_cond;
+
+       /* for waiting draw done */
+       tpl_bool_t                    use_sync_fence;
+
+       /* to use zwp_linux_surface_synchronization */
+       tpl_bool_t                    use_surface_sync;
+
+       tpl_bool_t                    is_activated;
+       tpl_bool_t                    reset; /* TRUE if queue reseted by external  */
+       tpl_bool_t                    need_to_enqueue;
+       tpl_bool_t                    rotation_capability;
+       tpl_bool_t                    vblank_done;
+       tpl_bool_t                    is_destroying;
+       tpl_bool_t                    set_serial_is_used; /* Will be deprecated */
+
+       int post_interval;
+};
+
+struct _tpl_wl_egl_bufer {
+       tbm_surface_h tbm_surface;
+
+       struct wl_proxy              *wl_buffer;
+       int                           dx, dy;
+       int                           width, height;
+
+       tpl_wl_egl_surface_t         *wl_egl_surface;
+
+       /* for wayland_tbm_client_set_buffer_transform */
+       int                           w_transform;
+       tpl_bool_t                    w_rotated;
+
+       /* for wl_surface_set_buffer_transform */
+       int                           transform;
+
+       /* for damage region */
+       int                           num_rects;
+       int                          *rects;
+
+       unsigned int                  commit_sync_ts_backup;
+
+       /* for wayland_tbm_client_set_buffer_serial */
+       unsigned int                  serial;
+
+       /* for checking need_to_commit (frontbuffer mode) */
+       tpl_bool_t                    need_to_commit;
+
+       /* for checking need to release */
+       tpl_bool_t                    need_to_release;
+
+       /* for checking draw done */
+       tpl_bool_t                    draw_done;
+
+
+       /* to get release event via zwp_linux_buffer_release_v1 */
+       struct zwp_linux_buffer_release_v1 *buffer_release;
+
+       /* each buffers own its release_fence_fd, until it passes ownership
+        * to it to EGL */
+       int                           release_fence_fd;
+
+       /* each buffers own its acquire_fence_fd. until it passes ownership
+        * to it to SERVER */
+       int                           acquire_fence_fd;
+};
+
+struct sync_info {
+       tbm_surface_h                 tbm_surface;
+       int                           sync_fd;
+};
+
+struct _twe_fence_wait_source {
+       tpl_gsource                  *fence_source;
+       tbm_fd                        fence_fd;
+       tbm_surface_h                 tbm_surface;
+       tpl_wl_egl_surface_t         *wl_egl_surface;
+};
+
+tpl_bool_t
+_check_native_handle_is_wl_display(tpl_handle_t display)
+{
+       struct wl_interface *wl_egl_native_dpy = *(void **) display;
+
+       if (!wl_egl_native_dpy) {
+               TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy);
+               return TPL_FALSE;
+       }
+
+       /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
+          is a memory address pointing the structure of wl_display_interface. */
+       if (wl_egl_native_dpy == &wl_display_interface)
+               return TPL_TRUE;
+
+       if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
+                               strlen(wl_display_interface.name)) == 0) {
+               return TPL_TRUE;
+       }
+
+       return TPL_FALSE;
+}
+
+static tpl_bool_t
+__thread_func_tdm_dispatch(tpl_gsource *gsource)
+{
+       tpl_wl_egl_display_t       *wl_egl_display = NULL;
+       tdm_error                   tdm_err = TDM_ERROR_NONE;
+
+       wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+       if (!wl_egl_display) {
+               TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource);
+               TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
+               return TPL_GSOURCE_REMOVE;
+       }
+
+       tdm_err = tdm_client_handle_events(wl_egl_display->tdm_client);
+
+       /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
+        * When tdm_source is no longer available due to an unexpected situation,
+        * twe_thread must remove it from the thread and destroy it.
+        * In that case, tdm_vblank can no longer be used for surfaces and displays
+        * that used this tdm_source. */
+       if (tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
+                               tdm_err);
+               TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
+
+               tpl_gsource_destroy(gsource);
+
+               wl_egl_display->tdm_source = NULL;
+
+               return G_SOURCE_REMOVE;
+       }
+
+       return G_SOURCE_CONTINUE;
+}
+
+static void
+__thread_func_tdm_finalize(tpl_gsource *gsource)
+{
+       tpl_wl_egl_display_t *wl_egl_display = NULL;
+       twe_tdm_source *tdm_source = (twe_tdm_source *)source;
+
+       wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+
+       TPL_LOG_T(BACKEND, "tdm_destroy| tdm_source(%p) tdm_client(%p)",
+                         gsource, wl_egl_display->tdm_client);
+
+       if (wl_egl_display->tdm_client) {
+               tdm_client_destroy(wl_egl_display->tdm_client);
+               wl_egl_display->tdm_client = NULL;
+       }
+}
+
+static tpl_gsource_functions tdm_funcs = {
+       .prepare  = NULL,
+       .check    = NULL,
+       .dispatch = __thread_func_tdm_dispatch,
+       .finalize = __thread_func_tdm_finalize,
+};
+
+tpl_result_t
+_thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display)
+{
+       tpl_gsource      *tdm_source = NULL;
+       tdm_client       *client = NULL;
+       int               tdm_display_fd = -1;
+       tdm_error         tdm_err = TDM_ERROR_NONE;
+
+       if (!wl_egl_display->thread) {
+               TPL_ERR("thread should be created before init tdm_client.");
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       client = tdm_client_create(&tdm_err);
+       if (!client || tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       tdm_err = tdm_client_get_fd(client, &tdm_fd);
+       if (tdm_fd < 0 || tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
+               tdm_client_destroy(client);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       tdm_source = tpl_gsource_create(wl_egl_display->thread,
+                                                                       (void *)wl_egl_display,
+                                                                       &tdm_funcs, TPL_FALSE);
+       if (!tdm_source) {
+               TPL_ERR("Failed to create tdm_gsource\n");
+               tdm_client_destroy(client);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       wl_egl_display->tdm_client = client;
+       wl_egl_display->tdm_source = tdm_source;
+
+       TPL_LOG_T(BACKEND, "TPL_WAIT_VBLANK:DEFAULT_ENABLED");
+       TPL_LOG_T(BACKEND, "wl_egl_display(%p) tdm_source(%p) tdm_client(%p)",
+                         wl_egl_display, tdm_source, client);
+
+       return TPL_ERROR_NONE;
+}
+
+#define IMPL_TIZEN_SURFACE_SHM_VERSION 2
+
+void
+__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
+                                                         uint32_t name, const char *interface,
+                                                         uint32_t version)
+{
+       tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
+
+       if (!strcmp(interface, "tizen_surface_shm")) {
+               wl_egl_display->tss = wl_registry_bind(wl_registry,
+                                                                                       name,
+                                                                                       &tizen_surface_shm_interface,
+                                                                                       ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ?
+                                                                                        version : IMPL_TIZEN_SURFACE_SHM_VERSION));
+       } else if (!strcmp(interface, wp_presentation_interface.name)) {
+               wl_egl_display->presentation =
+                                       wl_registry_bind(wl_registry,
+                                                                        name, &wp_presentation_interface, 1);
+               TPL_DEBUG("bind wp_presentation_interface");
+       } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
+               char *env = tpl_getenv("TPL_EFS");
+               if (env && atoi(env)) {
+                       wl_egl_display->explicit_sync =
+                                       wl_registry_bind(wl_registry, name,
+                                                                        &zwp_linux_explicit_synchronization_v1_interface, 1);
+                       wl_egl_display->use_explicit_sync = TPL_TRUE;
+                       TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
+               } else {
+                       wl_egl_display->use_explicit_sync = TPL_FALSE;
+               }
+       }
+}
+
+void
+__cb_wl_resistry_global_remove_callback(void *data,
+                                                                        struct wl_registry *wl_registry,
+                                                                        uint32_t name)
+{
+}
+
+static const struct wl_registry_listener registry_listener = {
+       __cb_wl_resistry_global_callback,
+       __cb_wl_resistry_global_remove_callback
+};
+
+static void
+_wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display,
+                                         const char *func_name)
+{
+       int dpy_err;
+       char buf[1024];
+       strerror_r(errno, buf, sizeof(buf));
+
+       if (wl_egl_display->last_error == errno)
+               return;
+
+       TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
+
+       dpy_err = wl_display_get_error(wl_egl_display->wl_display);
+       if (dpy_err == EPROTO) {
+               const struct wl_interface *err_interface;
+               uint32_t err_proxy_id, err_code;
+               err_code = wl_display_get_protocol_error(wl_egl_display->wl_display,
+                                                                                                &err_interface,
+                                                                                                &err_proxy_id);
+               TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
+                               err_interface->name, err_code, err_proxy_id);
+       }
+
+       wl_egl_display->last_error = errno;
+}
+
+tpl_result_t
+_thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display)
+{
+       struct wl_registry *registry = NULL;
+       struct wl_event_queue *queue = NULL;
+       struct wl_display *display_wrapper = NULL;
+       int ret;
+       tpl_result_t result = TPL_ERROR_NONE;
+
+       queue = wl_display_create_queue(wl_egl_display->wl_display);
+       if (!queue) {
+               TPL_ERR("Failed to create wl_queue wl_display(%p)",
+                               wl_egl_display->wl_display);
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+       display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display);
+       if (!display_wrapper) {
+               TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
+                               wl_egl_display->wl_display);
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+       wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
+
+       registry = wl_display_get_registry(display_wrapper);
+       if (!registry) {
+               TPL_ERR("Failed to create wl_registry");
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+       wl_proxy_wrapper_destroy(display_wrapper);
+       display_wrapper = NULL;
+
+       if (wl_registry_add_listener(registry, &registry_listener,
+                                                                wl_egl_display)) {
+               TPL_ERR("Failed to wl_registry_add_listener");
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+       ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue);
+       if (ret == -1) {
+               _twe_display_print_err(wl_egl_display, "roundtrip_queue");
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+       /* set tizen_surface_shm's queue as client's private queue */
+       if (wl_egl_display->tss) {
+               wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss,
+                                                  wl_egl_display->ev_queue);
+               TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss);
+       }
+
+       if (wl_egl_display->presentation) {
+               wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation,
+                                                  wl_egl_display->ev_queue);
+               TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.",
+                                 wl_egl_display->presentation);
+       }
+
+       if (wl_egl_display->explicit_sync) {
+               wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync,
+                                                  wl_egl_display->ev_queue);
+               TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.",
+                                 wl_egl_display->explicit_sync);
+       }
+
+fini:
+       if (display_wrapper)
+               wl_proxy_wrapper_destroy(display_wrapper);
+       if (registry)
+               wl_registry_destroy(registry);
+       if (queue)
+               wl_event_queue_destroy(queue);
+
+       return result;
+}
+
+static void*
+_thread_init(void *data)
+{
+       tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
+
+       if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) {
+               TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)",
+                               wl_egl_display, wl_egl_display->wl_display);
+       }
+
+       if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
+               TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
+       }
+
+       return wl_egl_display;
+}
+
+static gboolean
+__thread_func_disp_prepare(tpl_gsource *gsource)
+{
+       tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)gsource->data;
+
+       /* If this wl_egl_display is already prepared,
+        * do nothing in this function. */
+       if (wl_egl_display->prepared)
+               return FALSE;
+
+       /* If there is a last_error, there is no need to poll,
+        * so skip directly to dispatch.
+        * prepare -> dispatch */
+       if (wl_egl_display->last_error)
+               return TRUE;
+
+       while (wl_display_prepare_read_queue(wl_egl_display->wl_display,
+                                                                                wl_egl_display->ev_queue) != 0) {
+               if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
+                                                                                         wl_egl_display->ev_queue) == -1) {
+                       _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
+               }
+       }
+
+       wl_egl_display->prepared = TPL_TRUE;
+
+       wl_display_flush(wl_egl_display->wl_display);
+
+       return FALSE;
+}
+
+static gboolean
+_twe_thread_wl_disp_check(GSource *source)
+{
+       twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source;
+       gboolean ret = FALSE;
+
+       if (!disp_source->prepared)
+               return ret;
+
+       /* If prepared, but last_error is set,
+        * cancel_read is executed and FALSE is returned.
+        * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
+        * and skipping disp_check from prepare to disp_dispatch.
+        * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
+       if (disp_source->prepared && disp_source->last_error) {
+               wl_display_cancel_read(disp_source->disp);
+               return ret;
+       }
+
+       if (disp_source->gfd.revents & G_IO_IN) {
+               if (wl_display_read_events(disp_source->disp) == -1)
+                       _wl_display_print_err(disp_source, "read_event.");
+               ret = TRUE;
+       } else {
+               wl_display_cancel_read(disp_source->disp);
+               ret = FALSE;
+       }
+
+       disp_source->prepared = TPL_FALSE;
+
+       return ret;
+}
+
+static gboolean
+_twe_thread_wl_disp_dispatch(GSource *source, GSourceFunc cb, gpointer data)
+{
+       twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source;
+
+       /* If there is last_error, G_SOURCE_REMOVE should be returned
+        * to remove the gsource from the main loop.
+        * This is because disp_source is not valid since last_error was set.*/
+       if (disp_source->last_error) {
+               return G_SOURCE_REMOVE;
+       }
+
+       g_mutex_lock(&disp_source->wl_event_mutex);
+       if (disp_source->gfd.revents & G_IO_IN) {
+               if (wl_display_dispatch_queue_pending(disp_source->disp,
+                                                                                         disp_source->ev_queue) == -1) {
+                       _wl_display_print_err(disp_source, "dispatch_queue_pending");
+               }
+       }
+
+       wl_display_flush(disp_source->disp);
+       g_mutex_unlock(&disp_source->wl_event_mutex);
+
+       return G_SOURCE_CONTINUE;
+}
+
+static void
+_twe_thread_wl_disp_finalize(GSource *source)
+{
+       TPL_LOG_T(BACKEND, "finalize| disp_source(%p)", source);
+
+       return;
+}
+
+static tpl_gsource_functions disp_funcs = {
+       .prepare  = __thread_func_disp_prepare,
+       .check    = __thread_func_disp_check,
+       .dispatch = __thread_func_disp_dispatch,
+       .finalize = __thread_func_disp_finalize,
+};
+
+static tpl_result_t
+__tpl_wl_egl_display_init(tpl_display_t *display)
+{
+       tpl_wl_egl_display_t *wl_egl_display = NULL;
+
+       TPL_ASSERT(display);
+
+       /* Do not allow default display in wayland. */
+       if (!display->native_handle) {
+               TPL_ERR("Invalid native handle for display.");
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!_check_native_handle_is_wl_display(display->native_handle)) {
+               TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       wl_egl_display = (tpl_wl_egl_display_t *) calloc(1,
+                                                 sizeof(tpl_wl_egl_display_t));
+       if (!wl_egl_display) {
+               TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t.");
+               return TPL_ERROR_OUT_OF_MEMORY;
+       }
+
+       display->backend.data             = wl_egl_display;
+       display->bufmgr_fd                = -1;
+
+       wl_egl_display->wl_display        = (struct wl_display *)display->native_handle;
+       wl_egl_display->last_error        = 0;
+       wl_egl_display->use_explicit_sync = TPL_FALSE;   // default disabled
+       wl_egl_display->prepared          = TPL_FALSE;
+
+       /* Wayland Interfaces */
+       wl_egl_display->tss               = NULL;
+       wl_egl_display->presentation      = NULL;
+       wl_egl_display->explicit_sync     = NULL;
+
+       wl_egl_display->use_wait_vblank   = TPL_TRUE;    // default enabled
+       env = tpl_getenv("TPL_WAIT_VBLANK");
+       if (env && !atoi(env)) {
+               wl_egl_display->use_wait_vblank = TPL_FALSE;
+       }
+
+       /* Create gthread */
+       wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
+                                                                                               _thread_init, (void *)wl_egl_display);
+       if (!wl_egl_display->thread) {
+               TPL_ERR("Failed to create wl_egl_thread");
+               goto free_display;
+       }
+
+       wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread,
+                                                                                                        (void *)wl_egl_display,
+                                                                                                        wl_display_get_fd(wl_egl_display->wl_display),
+                                                                                                        &
+                                                                                                        )
+       if (!wl_egl_display->twe_display) {
+               TPL_ERR("Failed to add native_display(%p) to thread(%p)",
+                               display->native_handle,
+                               wl_egl_display->wl_egl_thread);
+               goto free_display;
+       }
+
+       TPL_LOG_T("WL_EGL",
+                         "[INIT DISPLAY] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
+                         wl_egl_display,
+                         wl_egl_display->thread,
+                         wl_egl_display->wl_display);
+
+       TPL_LOG_T("WL_EGL",
+                         "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%S) USE_EXPLICIT_SYNC(%s)",
+                         wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE",
+                         wl_egl_display->tss ? "TRUE" : "FALSE",
+                         wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE");
+
+       return TPL_ERROR_NONE;
+
+free_display:
+       if (wl_egl_display->thread)
+               tpl_gthread_destroy(wl_egl_display->thread);
+
+       wl_egl_display->thread = NULL;
+       free(wl_egl_display);
+
+       display->backend.data = NULL;
+       return TPL_ERROR_INVALID_OPERATION;
+}
+
+static void
+__tpl_wl_egl_display_fini(tpl_display_t *display)
+{
+       tpl_wl_egl_display_t *wl_egl_display;
+
+       TPL_ASSERT(display);
+
+       wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data;
+       if (wl_egl_display) {
+
+               TPL_LOG_T("WL_EGL",
+                                 "[FINI] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
+                                 wl_egl_display,
+                                 wl_egl_display->thread,
+                                 wl_egl_display->wl_display);
+
+               if (wl_egl_display->twe_display) {
+                       tpl_result_t ret = TPL_ERROR_NONE;
+                       ret = twe_display_del(wl_egl_display->twe_display);
+                       if (ret != TPL_ERROR_NONE)
+                               TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)",
+                                               wl_egl_display->twe_display,
+                                               wl_egl_display->wl_egl_thread);
+                       wl_egl_display->twe_display = NULL;
+               }
+
+               if (wl_egl_display->wl_egl_thread) {
+                       twe_thread_destroy(wl_egl_display->wl_egl_thread);
+                       wl_egl_display->wl_egl_thread = NULL;
+               }
+
+               free(wl_egl_display);
+       }
+
+       display->backend.data = NULL;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_query_config(tpl_display_t *display,
+                                                                          tpl_surface_type_t surface_type,
+                                                                          int red_size, int green_size,
+                                                                          int blue_size, int alpha_size,
+                                                                          int color_depth, int *native_visual_id,
+                                                                          tpl_bool_t *is_slow)
+{
+       TPL_ASSERT(display);
+
+       if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
+                       green_size == 8 && blue_size == 8 &&
+                       (color_depth == 32 || color_depth == 24)) {
+
+               if (alpha_size == 8) {
+                       if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
+                       if (is_slow) *is_slow = TPL_FALSE;
+                       return TPL_ERROR_NONE;
+               }
+               if (alpha_size == 0) {
+                       if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
+                       if (is_slow) *is_slow = TPL_FALSE;
+                       return TPL_ERROR_NONE;
+               }
+       }
+
+       return TPL_ERROR_INVALID_PARAMETER;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
+                                                                               int alpha_size)
+{
+       TPL_IGNORE(display);
+       TPL_IGNORE(visual_id);
+       TPL_IGNORE(alpha_size);
+       return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_get_window_info(tpl_display_t *display,
+               tpl_handle_t window, int *width,
+               int *height, tbm_format *format,
+               int depth, int a_size)
+{
+       tpl_result_t ret = TPL_ERROR_NONE;
+
+       TPL_ASSERT(display);
+       TPL_ASSERT(window);
+
+       if ((ret = twe_get_native_window_info(window, width, height, format, a_size))
+                       != TPL_ERROR_NONE) {
+               TPL_ERR("Failed to get size info of native_window(%p)", window);
+       }
+
+       return ret;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display,
+               tpl_handle_t pixmap, int *width,
+               int *height, tbm_format *format)
+{
+       tbm_surface_h   tbm_surface = NULL;
+
+       tbm_surface = twe_get_native_buffer_from_pixmap(pixmap);
+       if (!tbm_surface) {
+               TPL_ERR("Failed to get tbm_surface_h from native pixmap.");
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       if (width) *width = tbm_surface_get_width(tbm_surface);
+       if (height) *height = tbm_surface_get_height(tbm_surface);
+       if (format) *format = tbm_surface_get_format(tbm_surface);
+
+       return TPL_ERROR_NONE;
+}
+
+static tbm_surface_h
+__tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
+{
+       tbm_surface_h tbm_surface = NULL;
+
+       TPL_ASSERT(pixmap);
+
+       tbm_surface = twe_get_native_buffer_from_pixmap(pixmap);
+       if (!tbm_surface) {
+               TPL_ERR("Failed to get tbm_surface_h from wayland_tbm.");
+               return NULL;
+       }
+
+       return tbm_surface;
+}
+
+static void
+__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue,
+                                                                         void *data)
+{
+       tpl_surface_t *surface = NULL;
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+       tpl_bool_t is_activated = TPL_FALSE;
+       int width, height;
+
+       surface = (tpl_surface_t *)data;
+       TPL_CHECK_ON_NULL_RETURN(surface);
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+
+       /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
+        * the changed window size at the next frame. */
+       width = tbm_surface_queue_get_width(surface_queue);
+       height = tbm_surface_queue_get_height(surface_queue);
+       if (surface->width != width || surface->height != height) {
+               TPL_LOG_T("WL_EGL",
+                                 "[QUEUE_RESIZE_CB] wl_egl_surface(%p) tbm_queue(%p) (%dx%d)",
+                                 wl_egl_surface, surface_queue, width, height);
+       }
+
+       /* When queue_reset_callback is called, if is_activated is different from
+        * its previous state change the reset flag to TPL_TRUE to get a new buffer
+        * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
+       is_activated = twe_surface_check_activated(wl_egl_surface->twe_surface);
+       if (wl_egl_surface->is_activated != is_activated) {
+               if (is_activated) {
+                       TPL_LOG_T("WL_EGL",
+                                         "[ACTIVATED_CB] wl_egl_surface(%p) tbm_queue(%p)",
+                                         wl_egl_surface, surface_queue);
+               } else {
+                       TPL_LOG_T("WL_EGL",
+                                         "[DEACTIVATED_CB] wl_egl_surface(%p) tbm_queue(%p)",
+                                         wl_egl_surface, surface_queue);
+               }
+       }
+
+       wl_egl_surface->reset = TPL_TRUE;
+
+       if (surface->reset_cb)
+               surface->reset_cb(surface->reset_data);
+}
+
+void __cb_window_rotate_callback(void *data)
+{
+       tpl_surface_t *surface = (tpl_surface_t *)data;
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+       int rotation;
+
+       if (!surface) {
+               TPL_ERR("Inavlid parameter. surface is NULL.");
+               return;
+       }
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid parameter. surface->backend.data is NULL");
+               return;
+       }
+
+       rotation = twe_surface_get_rotation(wl_egl_surface->twe_surface);
+
+       surface->rotation = rotation;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_init(tpl_surface_t *surface)
+{
+       tpl_wl_egl_display_t *wl_egl_display = NULL;
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+       tbm_surface_queue_h tbm_queue = NULL;
+       twe_surface_h twe_surface = NULL;
+       tpl_result_t ret = TPL_ERROR_NONE;
+
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->display);
+       TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
+       TPL_ASSERT(surface->native_handle);
+
+       wl_egl_display =
+               (tpl_wl_egl_display_t *)surface->display->backend.data;
+       if (!wl_egl_display) {
+               TPL_ERR("Invalid parameter. wl_egl_display(%p)",
+                               wl_egl_display);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1,
+                                                 sizeof(tpl_wl_egl_surface_t));
+       if (!wl_egl_surface) {
+               TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
+               return TPL_ERROR_OUT_OF_MEMORY;
+       }
+
+       surface->backend.data = (void *)wl_egl_surface;
+
+       if (__tpl_object_init(&wl_egl_surface->base,
+                                                 TPL_OBJECT_SURFACE,
+                                                 NULL) != TPL_ERROR_NONE) {
+               TPL_ERR("Failed to initialize backend surface's base object!");
+               goto object_init_fail;
+       }
+
+       twe_surface = twe_surface_add(wl_egl_display->wl_egl_thread,
+                                                                 wl_egl_display->twe_display,
+                                                                 surface->native_handle,
+                                                                 surface->format, surface->num_buffers);
+       if (!twe_surface) {
+               TPL_ERR("Failed to add native_window(%p) to thread(%p)",
+                               surface->native_handle, wl_egl_display->wl_egl_thread);
+               goto create_twe_surface_fail;
+       }
+
+       tbm_queue = twe_surface_get_tbm_queue(twe_surface);
+       if (!tbm_queue) {
+               TPL_ERR("Failed to get tbm_queue from twe_surface(%p)", twe_surface);
+               goto queue_create_fail;
+       }
+
+       /* Set reset_callback to tbm_queue */
+       if (tbm_surface_queue_add_reset_cb(tbm_queue,
+                                  __cb_tbm_surface_queue_reset_callback,
+                                  (void *)surface)) {
+               TPL_ERR("TBM surface queue add reset cb failed!");
+               goto add_reset_cb_fail;
+       }
+
+       wl_egl_surface->reset = TPL_FALSE;
+       wl_egl_surface->twe_surface = twe_surface;
+       wl_egl_surface->tbm_queue = tbm_queue;
+       wl_egl_surface->is_activated = TPL_FALSE;
+       wl_egl_surface->need_to_enqueue = TPL_TRUE;
+
+       surface->width = tbm_surface_queue_get_width(tbm_queue);
+       surface->height = tbm_surface_queue_get_height(tbm_queue);
+       surface->rotation = twe_surface_get_rotation(twe_surface);
+
+       ret = twe_surface_set_rotate_callback(twe_surface, (void *)surface,
+                                               (tpl_surface_cb_func_t)__cb_window_rotate_callback);
+       if (ret != TPL_ERROR_NONE) {
+               TPL_WARN("Failed to register rotate callback.");
+       }
+
+       TPL_LOG_T("WL_EGL",
+                         "[INIT1/2]tpl_surface(%p) tpl_wl_egl_surface(%p) twe_surface(%p)",
+                         surface, wl_egl_surface, twe_surface);
+       TPL_LOG_T("WL_EGL",
+                         "[INIT2/2]size(%dx%d)rot(%d)|tbm_queue(%p)|native_window(%p)",
+                         surface->width, surface->height, surface->rotation,
+                         tbm_queue, surface->native_handle);
+
+       return TPL_ERROR_NONE;
+
+add_reset_cb_fail:
+queue_create_fail:
+       twe_surface_del(twe_surface);
+create_twe_surface_fail:
+object_init_fail:
+       free(wl_egl_surface);
+       surface->backend.data = NULL;
+       return TPL_ERROR_INVALID_OPERATION;
+}
+
+static void
+__tpl_wl_egl_surface_fini(tpl_surface_t *surface)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+       tpl_wl_egl_display_t *wl_egl_display = NULL;
+
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->display);
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data;
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+
+       TPL_OBJECT_LOCK(wl_egl_surface);
+
+       wl_egl_display = (tpl_wl_egl_display_t *)
+                                                 surface->display->backend.data;
+
+       if (wl_egl_display == NULL) {
+               TPL_ERR("check failed: wl_egl_display == NULL");
+               TPL_OBJECT_UNLOCK(wl_egl_surface);
+               return;
+       }
+
+       if (surface->type == TPL_SURFACE_TYPE_WINDOW) {
+               TPL_LOG_T("WL_EGL",
+                                 "[FINI] wl_egl_surface(%p) native_window(%p) twe_surface(%p)",
+                                 wl_egl_surface, surface->native_handle,
+                                 wl_egl_surface->twe_surface);
+
+               if (twe_surface_del(wl_egl_surface->twe_surface)
+                               != TPL_ERROR_NONE) {
+                       TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)",
+                                       wl_egl_surface->twe_surface,
+                                       wl_egl_display->wl_egl_thread);
+               }
+
+               wl_egl_surface->twe_surface = NULL;
+               wl_egl_surface->tbm_queue = NULL;
+       }
+
+       TPL_OBJECT_UNLOCK(wl_egl_surface);
+       __tpl_object_fini(&wl_egl_surface->base);
+       free(wl_egl_surface);
+       surface->backend.data = NULL;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
+                                                                                        tpl_bool_t set)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+
+       if (!surface) {
+               TPL_ERR("Invalid parameter. tpl_surface(%p)", surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)",
+                               surface, wl_egl_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!wl_egl_surface->twe_surface) {
+               TPL_ERR("Invalid parameter. wl_egl_surface(%p) twe_surface(%p)",
+                               wl_egl_surface, wl_egl_surface->twe_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       twe_surface_set_rotation_capablity(wl_egl_surface->twe_surface,
+                                                                                        set);
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
+                                                                          int post_interval)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+
+       if (!surface) {
+               TPL_ERR("Invalid parameter. tpl_surface(%p)", surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)",
+                               surface, wl_egl_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!wl_egl_surface->twe_surface) {
+               TPL_ERR("Invalid parameter. wl_egl_surface(%p) twe_surface(%p)",
+                               wl_egl_surface, wl_egl_surface->twe_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       twe_surface_set_post_interval(wl_egl_surface->twe_surface,
+                                                                 post_interval);
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
+               tbm_surface_h tbm_surface,
+               int num_rects, const int *rects, tbm_fd sync_fence)
+{
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->display);
+       TPL_ASSERT(tbm_surface);
+       TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
+
+       tpl_wl_egl_surface_t *wl_egl_surface =
+               (tpl_wl_egl_surface_t *) surface->backend.data;
+       tbm_surface_queue_error_e tsq_err;
+       tpl_result_t ret = TPL_ERROR_NONE;
+       int bo_name = 0;
+
+       TPL_OBJECT_LOCK(wl_egl_surface);
+
+       bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)",
+                               surface, wl_egl_surface);
+               TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+               TPL_OBJECT_UNLOCK(wl_egl_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!tbm_surface_internal_is_valid(tbm_surface)) {
+               TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
+                               tbm_surface);
+               TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+               TPL_OBJECT_UNLOCK(wl_egl_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
+
+       TPL_LOG_T("WL_EGL",
+                         "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)",
+                         wl_egl_surface, tbm_surface, bo_name, sync_fence);
+
+       /* If there are received region information,
+        * save it to buf_info in tbm_surface user_data using below API. */
+       if (num_rects && rects) {
+               ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects);
+               if (ret != TPL_ERROR_NONE) {
+                       TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)",
+                                        num_rects, rects);
+               }
+       }
+
+       if (!wl_egl_surface->need_to_enqueue ||
+               !twe_surface_check_commit_needed(wl_egl_surface->twe_surface,
+                                                                                tbm_surface)) {
+               TPL_LOG_T("WL_EGL",
+                                 "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
+                                 ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
+               TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+               TPL_OBJECT_UNLOCK(wl_egl_surface);
+               return TPL_ERROR_NONE;
+       }
+
+       /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
+        * commit if surface->frontbuffer that is already set and the tbm_surface
+        * client want to enqueue are the same.
+        */
+       if (surface->is_frontbuffer_mode) {
+               /* The first buffer to be activated in frontbuffer mode must be
+                * committed. Subsequence frames do not need to be committed because
+                * the buffer is already displayed.
+                */
+               if (surface->frontbuffer == tbm_surface)
+                       wl_egl_surface->need_to_enqueue = TPL_FALSE;
+
+               if (sync_fence != -1) {
+                       close(sync_fence);
+                       sync_fence = -1;
+               }
+       }
+
+       if (sync_fence != -1) {
+               ret = twe_surface_set_sync_fd(wl_egl_surface->twe_surface,
+                                                                         tbm_surface, sync_fence);
+               if (ret != TPL_ERROR_NONE) {
+                       TPL_WARN("Failed to set sync fd (%d). But it will continue.",
+                                        sync_fence);
+               }
+       }
+
+       tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
+                                                                               tbm_surface);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               tbm_surface_internal_unref(tbm_surface);
+               TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d",
+                               tbm_surface, surface, tsq_err);
+               TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+               TPL_OBJECT_UNLOCK(wl_egl_surface);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       tbm_surface_internal_unref(tbm_surface);
+
+       TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+       TPL_OBJECT_UNLOCK(wl_egl_surface);
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_bool_t
+__tpl_wl_egl_surface_validate(tpl_surface_t *surface)
+{
+       tpl_bool_t retval = TPL_TRUE;
+
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->backend.data);
+
+       tpl_wl_egl_surface_t *wl_egl_surface =
+               (tpl_wl_egl_surface_t *)surface->backend.data;
+
+       retval = !(wl_egl_surface->reset);
+
+       return retval;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+                                                                                       tbm_surface_h tbm_surface)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid backend surface. surface(%p) wl_egl_surface(%p)",
+                               surface, wl_egl_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!tbm_surface_internal_is_valid(tbm_surface)) {
+               TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       tbm_surface_internal_unref(tbm_surface);
+
+       tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
+                                                                                          tbm_surface);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
+                               tbm_surface, surface);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
+                         surface, tbm_surface);
+
+       return TPL_ERROR_NONE;
+}
+
+#define CAN_DEQUEUE_TIMEOUT_MS 10000
+
+static tbm_surface_h
+__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
+                                                                       tbm_fd *sync_fence)
+{
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->backend.data);
+       TPL_ASSERT(surface->display);
+       TPL_ASSERT(surface->display->backend.data);
+       TPL_OBJECT_CHECK_RETURN(surface, NULL);
+
+       tbm_surface_h tbm_surface = NULL;
+       tpl_wl_egl_surface_t *wl_egl_surface =
+               (tpl_wl_egl_surface_t *)surface->backend.data;
+       tpl_wl_egl_display_t *wl_egl_display =
+               (tpl_wl_egl_display_t *)surface->display->backend.data;
+       tbm_surface_queue_error_e tsq_err = 0;
+       int is_activated = 0;
+       int bo_name = 0;
+       tpl_result_t lock_ret = TPL_FALSE;
+
+       TPL_OBJECT_UNLOCK(surface);
+       tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
+                               wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
+       TPL_OBJECT_LOCK(surface);
+
+       /* After the can dequeue state, call twe_display_lock to prevent other
+        * events from being processed in wayland_egl_thread
+        * during below dequeue procedure. */
+       lock_ret = twe_display_lock(wl_egl_display->twe_display);
+
+       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
+               TPL_ERR("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
+                               wl_egl_surface->tbm_queue, surface);
+               if (twe_surface_queue_force_flush(wl_egl_surface->twe_surface)
+                       != TPL_ERROR_NONE) {
+                       TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
+                                       wl_egl_surface->tbm_queue, surface);
+                       if (lock_ret == TPL_ERROR_NONE)
+                               twe_display_unlock(wl_egl_display->twe_display);
+                       return NULL;
+               } else {
+                       tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+               }
+       }
+
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
+                               wl_egl_surface->tbm_queue, surface);
+               if (lock_ret == TPL_ERROR_NONE)
+                       twe_display_unlock(wl_egl_display->twe_display);
+               return NULL;
+       }
+
+       /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
+        * below function [wayland_tbm_client_queue_check_activate()].
+        * This function has to be called before tbm_surface_queue_dequeue()
+        * in order to know what state the buffer will be dequeued next.
+        *
+        * ACTIVATED state means non-composite mode. Client can get buffers which
+           can be displayed directly(without compositing).
+        * DEACTIVATED state means composite mode. Client's buffer will be displayed
+           by compositor(E20) with compositing.
+        */
+       is_activated = twe_surface_check_activated(wl_egl_surface->twe_surface);
+       wl_egl_surface->is_activated = is_activated;
+
+       surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
+       surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
+
+       if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
+               /* If surface->frontbuffer is already set in frontbuffer mode,
+                * it will return that frontbuffer if it is still activated,
+                * otherwise dequeue the new buffer after initializing
+                * surface->frontbuffer to NULL. */
+               if (is_activated && !wl_egl_surface->reset) {
+                       TPL_LOG_T("WL_EGL",
+                                         "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
+                                         surface->frontbuffer,
+                                         tbm_bo_export(tbm_surface_internal_get_bo(
+                                                                       surface->frontbuffer, 0)));
+                       TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
+                                                         "[DEQ]~[ENQ] BO_NAME:%d",
+                                                         tbm_bo_export(tbm_surface_internal_get_bo(
+                                                                                               surface->frontbuffer, 0)));
+                       if (lock_ret == TPL_ERROR_NONE)
+                               twe_display_unlock(wl_egl_display->twe_display);
+                       return surface->frontbuffer;
+               } else {
+                       surface->frontbuffer = NULL;
+                       wl_egl_surface->need_to_enqueue = TPL_TRUE;
+               }
+       } else {
+               surface->frontbuffer = NULL;
+       }
+
+       tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue,
+                                                                               &tbm_surface);
+       if (!tbm_surface) {
+               TPL_ERR("Failed to dequeue from tbm_queue(%p) surface(%p)| tsq_err = %d",
+                               wl_egl_surface->tbm_queue, surface, tsq_err);
+               if (lock_ret == TPL_ERROR_NONE)
+                       twe_display_unlock(wl_egl_display->twe_display);
+               return NULL;
+       }
+
+       tbm_surface_internal_ref(tbm_surface);
+
+       /* If twe_surface_get_buffer_release_fence_fd return -1,
+        * the tbm_surface can be used immediately.
+        * If not, user(EGL) have to wait until signaled. */
+       if (sync_fence) {
+               *sync_fence = twe_surface_get_buffer_release_fence_fd(
+                                                       wl_egl_surface->twe_surface, tbm_surface);
+       }
+
+       bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+
+       if (surface->is_frontbuffer_mode && is_activated)
+               surface->frontbuffer = tbm_surface;
+
+       wl_egl_surface->reset = TPL_FALSE;
+
+       TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name);
+       TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+       TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)",
+                         tbm_surface, bo_name, sync_fence ? *sync_fence : -1);
+
+       if (lock_ret == TPL_ERROR_NONE)
+               twe_display_unlock(wl_egl_display->twe_display);
+
+       return tbm_surface;
+}
+
+void
+__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface =
+               (tpl_wl_egl_surface_t *)surface->backend.data;
+
+       if (width)
+               *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
+       if (height)
+               *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
+}
+
+
+tpl_bool_t
+__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
+{
+       if (!native_dpy) return TPL_FALSE;
+
+       if (twe_check_native_handle_is_wl_display(native_dpy))
+               return TPL_TRUE;
+
+       return TPL_FALSE;
+}
+
+void
+__tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend)
+{
+       TPL_ASSERT(backend);
+
+       backend->type = TPL_BACKEND_WAYLAND_THREAD;
+       backend->data = NULL;
+
+       backend->init = __tpl_wl_egl_display_init;
+       backend->fini = __tpl_wl_egl_display_fini;
+       backend->query_config = __tpl_wl_egl_display_query_config;
+       backend->filter_config = __tpl_wl_egl_display_filter_config;
+       backend->get_window_info = __tpl_wl_egl_display_get_window_info;
+       backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
+       backend->get_buffer_from_native_pixmap =
+               __tpl_wl_egl_display_get_buffer_from_native_pixmap;
+}
+
+void
+__tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
+{
+       TPL_ASSERT(backend);
+
+       backend->type = TPL_BACKEND_WAYLAND_THREAD;
+       backend->data = NULL;
+
+       backend->init = __tpl_wl_egl_surface_init;
+       backend->fini = __tpl_wl_egl_surface_fini;
+       backend->validate = __tpl_wl_egl_surface_validate;
+       backend->cancel_dequeued_buffer =
+               __tpl_wl_egl_surface_cancel_dequeued_buffer;
+       backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
+       backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
+       backend->set_rotation_capability =
+               __tpl_wl_egl_surface_set_rotation_capability;
+       backend->set_post_interval =
+               __tpl_wl_egl_surface_set_post_interval;
+       backend->get_size =
+               __tpl_wl_egl_surface_get_size;
+}
+