wl_egl: Add defense code for fake signal in buffer_clear
[platform/core/uifw/libtpl-egl.git] / src / tpl_wl_egl_thread.c
old mode 100644 (file)
new mode 100755 (executable)
index f819b45..67c14a1
-#define inline __inline__
-
-#undef inline
 
 #include "tpl_internal.h"
 
 #include <string.h>
 #include <fcntl.h>
 #include <unistd.h>
+#include <sys/eventfd.h>
 
 #include <tbm_bufmgr.h>
 #include <tbm_surface.h>
 #include <tbm_surface_internal.h>
 #include <tbm_surface_queue.h>
 
-#include "tpl_wayland_egl_thread.h"
+#include <wayland-client.h>
+#include <wayland-tbm-server.h>
+#include <wayland-tbm-client.h>
+#include <wayland-egl-backend.h>
 
-/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
-#define CLIENT_QUEUE_SIZE 3
+#include <tdm_client.h>
+
+#include "wayland-egl-tizen/wayland-egl-tizen.h"
+#include "wayland-egl-tizen/wayland-egl-tizen-priv.h"
+
+#ifndef TIZEN_FEATURE_ENABLE
+#define TIZEN_FEATURE_ENABLE 1
+#endif
 
-typedef struct _tpl_wayland_egl_display tpl_wayland_egl_display_t;
-typedef struct _tpl_wayland_egl_surface tpl_wayland_egl_surface_t;
+#if TIZEN_FEATURE_ENABLE
+#include <tizen-surface-client-protocol.h>
+#include <presentation-time-client-protocol.h>
+#include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
+#endif
 
-struct _tpl_wayland_egl_display {
-       twe_thread *wl_egl_thread;
-       twe_display_h twe_display;
+#include "tpl_utils_gthread.h"
+
+static int wl_egl_buffer_key;
+#define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key)
+
+/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
+#define BUFFER_ARRAY_SIZE 9
+
+typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
+typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
+typedef struct _tpl_wl_egl_buffer  tpl_wl_egl_buffer_t;
+typedef struct _surface_vblank     tpl_surface_vblank_t;
+
+struct _tpl_wl_egl_display {
+       tpl_gsource                  *disp_source;
+       tpl_gthread                  *thread;
+       tpl_gmutex                    wl_event_mutex;
+
+       struct wl_display            *wl_display;
+       struct wl_event_queue        *ev_queue;
+       struct wayland_tbm_client    *wl_tbm_client;
+       int                           last_error; /* errno of the last wl_display error*/
+
+       tpl_bool_t                    wl_initialized;
+
+       tpl_bool_t                    use_wait_vblank;
+       tpl_bool_t                    use_explicit_sync;
+       tpl_bool_t                    use_tss;
+       tpl_bool_t                    prepared;
+       /* To make sure that tpl_gsource has been successfully finalized. */
+       tpl_bool_t                    gsource_finalized;
+       tpl_gmutex                    disp_mutex;
+       tpl_gcond                     disp_cond;
+       struct {
+               tdm_client               *tdm_client;
+               tpl_gsource              *tdm_source;
+               int                       tdm_display_fd;
+               tpl_bool_t                tdm_initialized;
+               tpl_list_t               *surface_vblanks;
+
+               /* To make sure that tpl_gsource has been successfully finalized. */
+               tpl_bool_t                gsource_finalized;
+               tpl_gmutex                tdm_mutex;
+               tpl_gcond                 tdm_cond;
+       } tdm;
+
+#if TIZEN_FEATURE_ENABLE
+       struct tizen_surface_shm     *tss; /* used for surface buffer_flush */
+       struct wp_presentation       *presentation; /* for presentation feedback */
+       struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
+#endif
 };
 
-struct _tpl_wayland_egl_surface {
-       tpl_object_t base;
-       twe_surface_h twe_surface;
-       tbm_surface_queue_h tbm_queue;
-       tpl_bool_t is_activated;
-       tpl_bool_t reset; /* TRUE if queue reseted by external  */
+typedef enum surf_message {
+       NONE_MESSAGE = 0,
+       INIT_SURFACE,
+       ACQUIRABLE,
+} surf_message;
+
+struct _tpl_wl_egl_surface {
+       tpl_gsource                  *surf_source;
+
+       tbm_surface_queue_h           tbm_queue;
+       int                           num_buffers;
+
+       struct wl_egl_window         *wl_egl_window;
+       struct wl_surface            *wl_surface;
+
+#if TIZEN_FEATURE_ENABLE
+       struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
+       struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */
+#endif
+
+       tpl_surface_vblank_t         *vblank;
+
+       /* surface information */
+       int                           render_done_cnt;
+       unsigned int                  serial;
+
+       int                           width;
+       int                           height;
+       int                           format;
+       int                           latest_transform;
+       int                           rotation;
+       int                           post_interval;
+
+       tpl_wl_egl_display_t         *wl_egl_display;
+       tpl_surface_t                *tpl_surface;
+
+       /* wl_egl_buffer array for buffer tracing */
+       tpl_wl_egl_buffer_t          *buffers[BUFFER_ARRAY_SIZE];
+       int                           buffer_cnt; /* the number of using wl_egl_buffers */
+       tpl_gmutex                    buffers_mutex;
+       tbm_surface_h                 last_enq_buffer;
+
+       tpl_list_t                   *presentation_feedbacks; /* for tracing presentation feedbacks */
+
+       struct {
+               tpl_gmutex                mutex;
+               int                       fd;
+       } commit_sync;
+
+       struct {
+               tpl_gmutex                mutex;
+               int                       fd;
+       } presentation_sync;
+
+       tpl_gmutex                    surf_mutex;
+       tpl_gcond                     surf_cond;
+
+       surf_message                  sent_message;
+
+       /* for waiting draw done */
+       tpl_bool_t                    use_render_done_fence;
+       tpl_bool_t                    is_activated;
+       tpl_bool_t                    reset; /* TRUE if queue reseted by external  */
+       tpl_bool_t                    need_to_enqueue;
+       tpl_bool_t                    prerotation_capability;
+       tpl_bool_t                    vblank_done;
+       tpl_bool_t                    set_serial_is_used;
+       tpl_bool_t                    initialized_in_thread;
+
+       /* To make sure that tpl_gsource has been successfully finalized. */
+       tpl_bool_t                    gsource_finalized;
 };
 
-static tpl_result_t
-__tpl_wl_egl_display_init(tpl_display_t *display)
-{
-       tpl_wayland_egl_display_t *wayland_egl_display = NULL;
+struct _surface_vblank {
+       tdm_client_vblank            *tdm_vblank;
+       tpl_wl_egl_surface_t         *wl_egl_surface;
+       tpl_list_t                   *waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
+       tpl_gmutex                    mutex;
+};
 
-       TPL_ASSERT(display);
+typedef enum buffer_status {
+       RELEASED = 0,             // 0
+       DEQUEUED,                 // 1
+       ENQUEUED,                 // 2
+       ACQUIRED,                 // 3
+       WAITING_SIGNALED,         // 4
+       WAITING_VBLANK,           // 5
+       COMMITTED,                // 6
+} buffer_status_t;
+
+static const char *status_to_string[7] = {
+       "RELEASED",                 // 0
+       "DEQUEUED",                 // 1
+       "ENQUEUED",                 // 2
+       "ACQUIRED",                 // 3
+       "WAITING_SIGNALED",         // 4
+       "WAITING_VBLANK",           // 5
+       "COMMITTED",                // 6
+};
 
-       /* Do not allow default display in wayland. */
-       if (!display->native_handle) {
-               TPL_ERR("Invalid native handle for display.");
-               return TPL_ERROR_INVALID_PARAMETER;
-       }
+struct _tpl_wl_egl_buffer {
+       tbm_surface_h                 tbm_surface;
+       int                           bo_name;
 
-       wayland_egl_display = (tpl_wayland_egl_display_t *) calloc(1,
-                                                 sizeof(tpl_wayland_egl_display_t));
-       if (!wayland_egl_display) {
-               TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_display_t.");
-               return TPL_ERROR_OUT_OF_MEMORY;
-       }
+       struct wl_proxy              *wl_buffer;
+       int                           dx, dy; /* position to attach to wl_surface */
+       int                           width, height; /* size to attach to wl_surface */
 
-       display->backend.data = wayland_egl_display;
-       display->bufmgr_fd = -1;
+       buffer_status_t               status; /* for tracing buffer status */
+       int                           idx; /* position index in buffers array of wl_egl_surface */
 
-       if (twe_check_native_handle_is_wl_display(display->native_handle)) {
-               wayland_egl_display->wl_egl_thread = twe_thread_create();
-               if (!wayland_egl_display->wl_egl_thread) {
-                       TPL_ERR("Failed to create twe_thread.");
-                       goto free_display;
-               }
+       /* for damage region */
+       int                           num_rects;
+       int                          *rects;
 
-               wayland_egl_display->twe_display =
-                       twe_display_add(wayland_egl_display->wl_egl_thread,
-                                                       display->native_handle,
-                                                       display->backend.type);
-               if (!wayland_egl_display->twe_display) {
-                       TPL_ERR("Failed to add native_display(%p) to thread(%p)",
-                                       display->native_handle,
-                                       wayland_egl_display->wl_egl_thread);
-                       goto free_display;
-               }
+       /* for wayland_tbm_client_set_buffer_transform */
+       int                           w_transform;
+       tpl_bool_t                    w_rotated;
 
-       } else {
-               TPL_ERR("Invalid native handle for display.");
-               goto free_display;
-       }
+       /* for wl_surface_set_buffer_transform */
+       int                           transform;
 
-       TPL_LOG_T("WL_EGL",
-                         "[INIT DISPLAY] wayland_egl_display(%p) twe_thread(%p) twe_display(%p)",
-                         wayland_egl_display,
-                         wayland_egl_display->wl_egl_thread,
-                         wayland_egl_display->twe_display);
+       /* for wayland_tbm_client_set_buffer_serial */
+       unsigned int                  serial;
 
-       return TPL_ERROR_NONE;
+       /* for checking need_to_commit (frontbuffer mode) */
+       tpl_bool_t                    need_to_commit;
 
-free_display:
-       if (wayland_egl_display->twe_display)
-               twe_display_del(wayland_egl_display->twe_display);
-       if (wayland_egl_display->wl_egl_thread)
-               twe_thread_destroy(wayland_egl_display->wl_egl_thread);
-       wayland_egl_display->wl_egl_thread = NULL;
-       wayland_egl_display->twe_display = NULL;
-
-       free(wayland_egl_display);
-       display->backend.data = NULL;
-       return TPL_ERROR_INVALID_OPERATION;
-}
+       /* for checking draw done */
+       tpl_bool_t                    draw_done;
 
-static void
-__tpl_wl_egl_display_fini(tpl_display_t *display)
-{
-       tpl_wayland_egl_display_t *wayland_egl_display;
+#if TIZEN_FEATURE_ENABLE
+       /* to get release event via zwp_linux_buffer_release_v1 */
+       struct zwp_linux_buffer_release_v1 *buffer_release;
+#endif
+       /* each buffers own its release_fence_fd, until it passes ownership
+        * to it to EGL */
+       int32_t                       release_fence_fd;
 
-       TPL_ASSERT(display);
+       /* each buffers own its acquire_fence_fd.
+        * If it use zwp_linux_buffer_release_v1 the ownership of this fd
+        * will be passed to display server
+        * Otherwise it will be used as a fence waiting for render done
+        * on tpl thread */
+       int32_t                       acquire_fence_fd;
 
-       wayland_egl_display = (tpl_wayland_egl_display_t *)display->backend.data;
-       if (wayland_egl_display) {
+       /* Fd to send a signal when wl_surface_commit with this buffer */
+       int32_t                       commit_sync_fd;
 
-               TPL_LOG_T("WL_EGL",
-                                 "[FINI] wayland_egl_display(%p) twe_thread(%p) twe_display(%p)",
-                                 wayland_egl_display,
-                                 wayland_egl_display->wl_egl_thread,
-                                 wayland_egl_display->twe_display);
+       /* Fd to send a siganl when receive the
+        * presentation feedback from display server */
+       int32_t                       presentation_sync_fd;
 
-               if (wayland_egl_display->twe_display) {
-                       tpl_result_t ret = TPL_ERROR_NONE;
-                       ret = twe_display_del(wayland_egl_display->twe_display);
-                       if (ret != TPL_ERROR_NONE)
-                               TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)",
-                                               wayland_egl_display->twe_display,
-                                               wayland_egl_display->wl_egl_thread);
-                       wayland_egl_display->twe_display = NULL;
-               }
+       tpl_gsource                  *waiting_source;
 
-               if (wayland_egl_display->wl_egl_thread) {
-                       twe_thread_destroy(wayland_egl_display->wl_egl_thread);
-                       wayland_egl_display->wl_egl_thread = NULL;
-               }
+       tpl_gmutex                    mutex;
+       tpl_gcond                     cond;
 
-               free(wayland_egl_display);
-       }
+       tpl_wl_egl_surface_t         *wl_egl_surface;
+};
 
-       display->backend.data = NULL;
-}
+#if TIZEN_FEATURE_ENABLE
+struct pst_feedback {
+       /* to get presentation feedback from display server */
+       struct wp_presentation_feedback *presentation_feedback;
 
-static tpl_result_t
-__tpl_wl_egl_display_query_config(tpl_display_t *display,
-                                                                          tpl_surface_type_t surface_type,
-                                                                          int red_size, int green_size,
-                                                                          int blue_size, int alpha_size,
-                                                                          int color_depth, int *native_visual_id,
-                                                                          tpl_bool_t *is_slow)
-{
-       TPL_ASSERT(display);
+       int32_t                          pst_sync_fd;
 
-       if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
-                       green_size == 8 && blue_size == 8 &&
-                       (color_depth == 32 || color_depth == 24)) {
+       int                              bo_name;
+       tpl_wl_egl_surface_t            *wl_egl_surface;
 
-               if (alpha_size == 8) {
-                       if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
-                       if (is_slow) *is_slow = TPL_FALSE;
-                       return TPL_ERROR_NONE;
-               }
-               if (alpha_size == 0) {
-                       if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
-                       if (is_slow) *is_slow = TPL_FALSE;
-                       return TPL_ERROR_NONE;
-               }
-       }
+};
+#endif
 
-       return TPL_ERROR_INVALID_PARAMETER;
-}
+static const struct wl_buffer_listener wl_buffer_release_listener;
 
+static int
+_get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
+static void
+_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface);
+static tpl_bool_t
+_check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface);
+static void
+__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer);
+static tpl_wl_egl_buffer_t *
+_get_wl_egl_buffer(tbm_surface_h tbm_surface);
+static int
+_write_to_eventfd(int eventfd);
+static void
+_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface);
 static tpl_result_t
-__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
-                                                                               int alpha_size)
+_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface);
+static void
+_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
+                                                 tpl_wl_egl_buffer_t *wl_egl_buffer);
+static void
+__cb_surface_vblank_free(void *data);
+
+static struct tizen_private *
+tizen_private_create()
 {
-       TPL_IGNORE(display);
-       TPL_IGNORE(visual_id);
-       TPL_IGNORE(alpha_size);
-       return TPL_ERROR_NONE;
+       struct tizen_private *private = NULL;
+       private = (struct tizen_private *)calloc(1, sizeof(struct tizen_private));
+       if (private) {
+               private->magic = WL_EGL_TIZEN_MAGIC;
+               private->rotation = 0;
+               private->frontbuffer_mode = 0;
+               private->transform = 0;
+               private->window_transform = 0;
+               private->serial = 0;
+
+               private->data = NULL;
+               private->rotate_callback = NULL;
+               private->get_rotation_capability = NULL;
+               private->set_window_serial_callback = NULL;
+               private->set_frontbuffer_callback = NULL;
+               private->create_commit_sync_fd = NULL;
+               private->create_presentation_sync_fd = NULL;
+               private->merge_sync_fds = NULL;
+       }
+
+       return private;
 }
 
-static tpl_result_t
-__tpl_wl_egl_display_get_window_info(tpl_display_t *display,
-               tpl_handle_t window, int *width,
-               int *height, tbm_format *format,
-               int depth, int a_size)
+static tpl_bool_t
+_check_native_handle_is_wl_display(tpl_handle_t display)
 {
-       tpl_result_t ret = TPL_ERROR_NONE;
-
-       TPL_ASSERT(display);
-       TPL_ASSERT(window);
+       struct wl_interface *wl_egl_native_dpy = *(void **) display;
 
-       if (format) {
-               if (a_size == 8)
-                       *format = TBM_FORMAT_ARGB8888;
-               else
-                       *format = TBM_FORMAT_XRGB8888;
+       if (!wl_egl_native_dpy) {
+               TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy);
+               return TPL_FALSE;
        }
 
-       if ((ret = twe_get_native_window_info(window, width, height))
-                       != TPL_ERROR_NONE) {
-               TPL_ERR("Failed to get size info of native_window(%p)", window);
+       /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
+          is a memory address pointing the structure of wl_display_interface. */
+       if (wl_egl_native_dpy == &wl_display_interface)
+               return TPL_TRUE;
+
+       if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
+                               strlen(wl_display_interface.name)) == 0) {
+               return TPL_TRUE;
        }
 
-       return ret;
+       return TPL_FALSE;
 }
 
-static tpl_result_t
-__tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display,
-               tpl_handle_t pixmap, int *width,
-               int *height, tbm_format *format)
+static tpl_bool_t
+__thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
 {
-       tbm_surface_h   tbm_surface = NULL;
+       tpl_wl_egl_display_t       *wl_egl_display = NULL;
+       tdm_error                   tdm_err = TDM_ERROR_NONE;
 
-       tbm_surface = twe_get_native_buffer_from_pixmap(pixmap);
-       if (!tbm_surface) {
-               TPL_ERR("Failed to get tbm_surface_h from native pixmap.");
-               return TPL_ERROR_INVALID_OPERATION;
+       TPL_IGNORE(message);
+
+       wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+       if (!wl_egl_display) {
+               TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource);
+               TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
+               return TPL_FALSE;
        }
 
-       if (width) *width = tbm_surface_get_width(tbm_surface);
-       if (height) *height = tbm_surface_get_height(tbm_surface);
-       if (format) *format = tbm_surface_get_format(tbm_surface);
+       tdm_err = tdm_client_handle_events(wl_egl_display->tdm.tdm_client);
 
-       return TPL_ERROR_NONE;
-}
+       /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
+        * When tdm_source is no longer available due to an unexpected situation,
+        * wl_egl_thread must remove it from the thread and destroy it.
+        * In that case, tdm_vblank can no longer be used for surfaces and displays
+        * that used this tdm_source. */
+       if (tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
+                               tdm_err);
+               TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
 
-static tbm_surface_h
-__tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
-{
-       tbm_surface_h tbm_surface = NULL;
+               tpl_gsource_destroy(gsource, TPL_FALSE);
 
-       TPL_ASSERT(pixmap);
+               wl_egl_display->tdm.tdm_source = NULL;
 
-       tbm_surface = twe_get_native_buffer_from_pixmap(pixmap);
-       if (!tbm_surface) {
-               TPL_ERR("Failed to get tbm_surface_h from wayland_tbm.");
-               return NULL;
+               return TPL_FALSE;
        }
 
-       return tbm_surface;
+       return TPL_TRUE;
 }
 
 static void
-__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue,
-                                                                         void *data)
+__thread_func_tdm_finalize(tpl_gsource *gsource)
 {
-       tpl_surface_t *surface = NULL;
-       tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
-       tpl_bool_t is_activated = TPL_FALSE;
-       int width, height;
+       tpl_wl_egl_display_t *wl_egl_display = NULL;
 
-       surface = (tpl_surface_t *)data;
-       TPL_CHECK_ON_NULL_RETURN(surface);
+       wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
 
-       wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
-       TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface);
+       tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
 
-       /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
-        * the changed window size at the next frame. */
-       width = tbm_surface_queue_get_width(surface_queue);
-       height = tbm_surface_queue_get_height(surface_queue);
-       if (surface->width != width || surface->height != height) {
-               TPL_LOG_T("WL_EGL",
-                                 "[QUEUE_RESIZE_CB] wayland_egl_surface(%p) tbm_queue(%p) (%dx%d)",
-                                 wayland_egl_surface, surface_queue, width, height);
-               wayland_egl_surface->reset = TPL_TRUE;
-       }
+       TPL_INFO("[TDM_CLIENT_FINI]",
+                        "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
+                        wl_egl_display, wl_egl_display->tdm.tdm_client,
+                        wl_egl_display->tdm.tdm_display_fd);
 
-       /* When queue_reset_callback is called, if is_activated is different from
-        * its previous state change the reset flag to TPL_TRUE to get a new buffer
-        * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
-       is_activated = twe_surface_check_activated(wayland_egl_surface->twe_surface);
-       if (wayland_egl_surface->is_activated != is_activated) {
-               if (is_activated) {
-                       TPL_LOG_T("WL_EGL",
-                                         "[ACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)",
-                                         wayland_egl_surface, surface_queue);
-               } else {
-                       TPL_LOG_T("WL_EGL",
-                                         "[DEACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)",
-                                         wayland_egl_surface, surface_queue);
+       if (wl_egl_display->tdm.tdm_client) {
+
+               if (wl_egl_display->tdm.surface_vblanks) {
+                       __tpl_list_free(wl_egl_display->tdm.surface_vblanks,
+                                   __cb_surface_vblank_free);
+                       wl_egl_display->tdm.surface_vblanks = NULL;
                }
-               wayland_egl_surface->reset = TPL_TRUE;
+
+               tdm_client_destroy(wl_egl_display->tdm.tdm_client);
+               wl_egl_display->tdm.tdm_client = NULL;
+               wl_egl_display->tdm.tdm_display_fd = -1;
+               wl_egl_display->tdm.tdm_source = NULL;
        }
 
-       if (surface->reset_cb)
-               surface->reset_cb(surface->reset_data);
+       wl_egl_display->use_wait_vblank = TPL_FALSE;
+       wl_egl_display->tdm.tdm_initialized = TPL_FALSE;
+       wl_egl_display->tdm.gsource_finalized = TPL_TRUE;
+
+       tpl_gcond_signal(&wl_egl_display->tdm.tdm_cond);
+       tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
 }
 
-void __cb_window_rotate_callback(void *data)
+static tpl_gsource_functions tdm_funcs = {
+       .prepare  = NULL,
+       .check    = NULL,
+       .dispatch = __thread_func_tdm_dispatch,
+       .finalize = __thread_func_tdm_finalize,
+};
+
+tpl_result_t
+_thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display)
 {
-       tpl_surface_t *surface = (tpl_surface_t *)data;
-       tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
-       int rotation;
+       tdm_client       *tdm_client = NULL;
+       int               tdm_display_fd = -1;
+       tdm_error         tdm_err = TDM_ERROR_NONE;
 
-       if (!surface) {
-               TPL_ERR("Inavlid parameter. surface is NULL.");
-               return;
+       tdm_client = tdm_client_create(&tdm_err);
+       if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
+               return TPL_ERROR_INVALID_OPERATION;
        }
 
-       wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
-       if (!wayland_egl_surface) {
-               TPL_ERR("Invalid parameter. surface->backend.data is NULL");
-               return;
+       tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
+       if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
+               tdm_client_destroy(tdm_client);
+               return TPL_ERROR_INVALID_OPERATION;
        }
 
-       rotation = twe_surface_get_rotation(wayland_egl_surface->twe_surface);
+       wl_egl_display->tdm.tdm_display_fd  = tdm_display_fd;
+       wl_egl_display->tdm.tdm_client      = tdm_client;
+       wl_egl_display->tdm.tdm_source      = NULL;
+       wl_egl_display->tdm.tdm_initialized = TPL_TRUE;
+       wl_egl_display->tdm.surface_vblanks = __tpl_list_alloc();
 
-       surface->rotation = rotation;
-}
+       TPL_INFO("[TDM_CLIENT_INIT]",
+                        "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
+                        wl_egl_display, tdm_client, tdm_display_fd);
 
-static tpl_result_t
-__tpl_wl_egl_surface_init(tpl_surface_t *surface)
-{
-       tpl_wayland_egl_display_t *wayland_egl_display = NULL;
-       tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
-       tbm_surface_queue_h tbm_queue = NULL;
-       twe_surface_h twe_surface = NULL;
-       tpl_result_t ret = TPL_ERROR_NONE;
+       return TPL_ERROR_NONE;
+}
 
-       TPL_ASSERT(surface);
-       TPL_ASSERT(surface->display);
-       TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
-       TPL_ASSERT(surface->native_handle);
+#define IMPL_TIZEN_SURFACE_SHM_VERSION 2
 
-       wayland_egl_display =
-               (tpl_wayland_egl_display_t *)surface->display->backend.data;
-       if (!wayland_egl_display) {
-               TPL_ERR("Invalid parameter. wayland_egl_display(%p)",
-                               wayland_egl_display);
-               return TPL_ERROR_INVALID_PARAMETER;
-       }
 
-       wayland_egl_surface = (tpl_wayland_egl_surface_t *) calloc(1,
-                                                 sizeof(tpl_wayland_egl_surface_t));
-       if (!wayland_egl_surface) {
-               TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_surface_t.");
-               return TPL_ERROR_OUT_OF_MEMORY;
+static void
+__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
+                                                         uint32_t name, const char *interface,
+                                                         uint32_t version)
+{
+#if TIZEN_FEATURE_ENABLE
+       tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
+
+       if (!strcmp(interface, "tizen_surface_shm")) {
+               wl_egl_display->tss =
+                       wl_registry_bind(wl_registry,
+                                                        name,
+                                                        &tizen_surface_shm_interface,
+                                                        ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ?
+                                                        version : IMPL_TIZEN_SURFACE_SHM_VERSION));
+               wl_egl_display->use_tss = TPL_TRUE;
+       } else if (!strcmp(interface, wp_presentation_interface.name)) {
+               wl_egl_display->presentation =
+                                       wl_registry_bind(wl_registry,
+                                                                        name, &wp_presentation_interface, 1);
+               TPL_DEBUG("bind wp_presentation_interface");
+       } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
+               char *env = tpl_getenv("TPL_EFS");
+               if (env && !atoi(env)) {
+                       wl_egl_display->use_explicit_sync = TPL_FALSE;
+               } else {
+                       wl_egl_display->explicit_sync =
+                                       wl_registry_bind(wl_registry, name,
+                                                                        &zwp_linux_explicit_synchronization_v1_interface, 1);
+                       wl_egl_display->use_explicit_sync = TPL_TRUE;
+                       TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
+               }
        }
+#endif
+}
 
-       surface->backend.data = (void *)wayland_egl_surface;
+static void
+__cb_wl_resistry_global_remove_callback(void *data,
+                                                                        struct wl_registry *wl_registry,
+                                                                        uint32_t name)
+{
+}
 
-       if (__tpl_object_init(&wayland_egl_surface->base,
-                                                 TPL_OBJECT_SURFACE,
-                                                 NULL) != TPL_ERROR_NONE) {
-               TPL_ERR("Failed to initialize backend surface's base object!");
-               goto object_init_fail;
-       }
+static const struct wl_registry_listener registry_listener = {
+       __cb_wl_resistry_global_callback,
+       __cb_wl_resistry_global_remove_callback
+};
 
-       twe_surface = twe_surface_add(wayland_egl_display->wl_egl_thread,
-                                                                 wayland_egl_display->twe_display,
-                                                                 surface->native_handle,
-                                                                 surface->format, surface->num_buffers);
-       if (!twe_surface) {
-               TPL_ERR("Failed to add native_window(%p) to thread(%p)",
-                               surface->native_handle, wayland_egl_display->wl_egl_thread);
-               goto create_twe_surface_fail;
-       }
+static void
+_wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display,
+                                         const char *func_name)
+{
+       int dpy_err;
+       char buf[1024];
+       strerror_r(errno, buf, sizeof(buf));
 
-       tbm_queue = twe_surface_get_tbm_queue(twe_surface);
-       if (!tbm_queue) {
-               TPL_ERR("Failed to get tbm_queue from twe_surface(%p)", twe_surface);
-               goto queue_create_fail;
-       }
+       if (wl_egl_display->last_error == errno)
+               return;
 
-       /* Set reset_callback to tbm_queue */
-       if (tbm_surface_queue_add_reset_cb(tbm_queue,
-                                  __cb_tbm_surface_queue_reset_callback,
-                                  (void *)surface)) {
-               TPL_ERR("TBM surface queue add reset cb failed!");
-               goto add_reset_cb_fail;
+       TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
+
+       dpy_err = wl_display_get_error(wl_egl_display->wl_display);
+       if (dpy_err == EPROTO) {
+               const struct wl_interface *err_interface;
+               uint32_t err_proxy_id, err_code;
+               err_code = wl_display_get_protocol_error(wl_egl_display->wl_display,
+                                                                                                &err_interface,
+                                                                                                &err_proxy_id);
+               TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
+                               err_interface->name, err_code, err_proxy_id);
        }
 
-       wayland_egl_surface->reset = TPL_FALSE;
-       wayland_egl_surface->twe_surface = twe_surface;
-       wayland_egl_surface->tbm_queue = tbm_queue;
-       wayland_egl_surface->is_activated = TPL_FALSE;
-
-       surface->width = tbm_surface_queue_get_width(tbm_queue);
-       surface->height = tbm_surface_queue_get_height(tbm_queue);
-       surface->rotation = twe_surface_get_rotation(twe_surface);
+       wl_egl_display->last_error = errno;
+}
 
-       ret = twe_surface_set_rotate_callback(twe_surface, (void *)surface,
-                                               (tpl_surface_cb_func_t)__cb_window_rotate_callback);
-       if (ret != TPL_ERROR_NONE) {
-               TPL_WARN("Failed to register rotate callback.");
+tpl_result_t
+_thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display)
+{
+       struct wl_registry *registry                = NULL;
+       struct wl_event_queue *queue                = NULL;
+       struct wl_display *display_wrapper          = NULL;
+       struct wl_proxy *wl_tbm                     = NULL;
+       struct wayland_tbm_client *wl_tbm_client    = NULL;
+       int ret;
+       tpl_result_t result = TPL_ERROR_NONE;
+
+       queue = wl_display_create_queue(wl_egl_display->wl_display);
+       if (!queue) {
+               TPL_ERR("Failed to create wl_queue wl_display(%p)",
+                               wl_egl_display->wl_display);
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
        }
 
-       TPL_LOG_T("WL_EGL",
-                         "[INIT1/2]tpl_surface(%p) tpl_wayland_egl_surface(%p) twe_surface(%p)",
-                         surface, wayland_egl_surface, twe_surface);
-       TPL_LOG_T("WL_EGL",
-                         "[INIT2/2]size(%dx%d)rot(%d)|tbm_queue(%p)|native_window(%p)",
-                         surface->width, surface->height, surface->rotation,
-                         tbm_queue, surface->native_handle);
+       wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display);
+       if (!wl_egl_display->ev_queue) {
+               TPL_ERR("Failed to create wl_queue wl_display(%p)",
+                               wl_egl_display->wl_display);
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
 
-       return TPL_ERROR_NONE;
+       display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display);
+       if (!display_wrapper) {
+               TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
+                               wl_egl_display->wl_display);
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
 
-add_reset_cb_fail:
-queue_create_fail:
-       twe_surface_del(twe_surface);
-create_twe_surface_fail:
-object_init_fail:
-       free(wayland_egl_surface);
-       surface->backend.data = NULL;
-       return TPL_ERROR_INVALID_OPERATION;
-}
+       wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
 
-static void
-__tpl_wl_egl_surface_fini(tpl_surface_t *surface)
-{
-       tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
-       tpl_wayland_egl_display_t *wayland_egl_display = NULL;
+       registry = wl_display_get_registry(display_wrapper);
+       if (!registry) {
+               TPL_ERR("Failed to create wl_registry");
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
 
-       TPL_ASSERT(surface);
-       TPL_ASSERT(surface->display);
+       wl_proxy_wrapper_destroy(display_wrapper);
+       display_wrapper = NULL;
 
-       wayland_egl_surface = (tpl_wayland_egl_surface_t *) surface->backend.data;
-       TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface);
+       wl_tbm_client = wayland_tbm_client_init(wl_egl_display->wl_display);
+       if (!wl_tbm_client) {
+               TPL_ERR("Failed to initialize wl_tbm_client.");
+               result = TPL_ERROR_INVALID_CONNECTION;
+               goto fini;
+       }
 
-       TPL_OBJECT_LOCK(wayland_egl_surface);
+       wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
+       if (!wl_tbm) {
+               TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
+               result = TPL_ERROR_INVALID_CONNECTION;
+               goto fini;
+       }
 
-       wayland_egl_display = (tpl_wayland_egl_display_t *)
-                                                 surface->display->backend.data;
+       wl_proxy_set_queue(wl_tbm, wl_egl_display->ev_queue);
+       wl_egl_display->wl_tbm_client = wl_tbm_client;
 
-       if (wayland_egl_display == NULL) {
-               TPL_ERR("check failed: wayland_egl_display == NULL");
-               TPL_OBJECT_UNLOCK(wayland_egl_surface);
-               return;
+       if (wl_registry_add_listener(registry, &registry_listener,
+                                                                wl_egl_display)) {
+               TPL_ERR("Failed to wl_registry_add_listener");
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
        }
 
-       if (surface->type == TPL_SURFACE_TYPE_WINDOW) {
-               TPL_LOG_T("WL_EGL",
-                                 "[FINI] wayland_egl_surface(%p) native_window(%p) twe_surface(%p)",
-                                 wayland_egl_surface, surface->native_handle,
-                                 wayland_egl_surface->twe_surface);
+       ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue);
+       if (ret == -1) {
+               _wl_display_print_err(wl_egl_display, "roundtrip_queue");
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
 
-               if (twe_surface_del(wayland_egl_surface->twe_surface)
-                               != TPL_ERROR_NONE) {
-                       TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)",
-                                       wayland_egl_surface->twe_surface,
-                                       wayland_egl_display->wl_egl_thread);
-               }
+#if TIZEN_FEATURE_ENABLE
+       /* set tizen_surface_shm's queue as client's private queue */
+       if (wl_egl_display->tss) {
+               wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss,
+                                                  wl_egl_display->ev_queue);
+               TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss);
+       }
 
-               wayland_egl_surface->twe_surface = NULL;
-               wayland_egl_surface->tbm_queue = NULL;
+       if (wl_egl_display->presentation) {
+               wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation,
+                                                  wl_egl_display->ev_queue);
+               TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.",
+                                 wl_egl_display->presentation);
        }
 
-       TPL_OBJECT_UNLOCK(wayland_egl_surface);
-       __tpl_object_fini(&wayland_egl_surface->base);
-       free(wayland_egl_surface);
-       surface->backend.data = NULL;
+       if (wl_egl_display->explicit_sync) {
+               wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync,
+                                                  wl_egl_display->ev_queue);
+               TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.",
+                                 wl_egl_display->explicit_sync);
+       }
+#endif
+       wl_egl_display->wl_initialized = TPL_TRUE;
+
+       TPL_INFO("[WAYLAND_INIT]",
+                        "wl_egl_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
+                        wl_egl_display, wl_egl_display->wl_display,
+                        wl_egl_display->wl_tbm_client, wl_egl_display->ev_queue);
+#if TIZEN_FEATURE_ENABLE
+       TPL_INFO("[WAYLAND_INIT]",
+                        "tizen_surface_shm(%p) wp_presentation(%p) explicit_sync(%p)",
+                        wl_egl_display->tss, wl_egl_display->presentation,
+                        wl_egl_display->explicit_sync);
+#endif
+fini:
+       if (display_wrapper)
+               wl_proxy_wrapper_destroy(display_wrapper);
+       if (registry)
+               wl_registry_destroy(registry);
+       if (queue)
+               wl_event_queue_destroy(queue);
+
+       return result;
 }
 
-static tpl_result_t
-__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
-                                                                                                 tpl_bool_t set)
+void
+_thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display)
 {
-       tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+       /* If wl_egl_display is in prepared state, cancel it */
+       if (wl_egl_display->prepared) {
+               wl_display_cancel_read(wl_egl_display->wl_display);
+               wl_egl_display->prepared = TPL_FALSE;
+       }
 
-       if (!surface) {
-               TPL_ERR("Invalid parameter. tpl_surface(%p)", surface);
-               return TPL_ERROR_INVALID_PARAMETER;
+       if (wl_display_roundtrip_queue(wl_egl_display->wl_display,
+                                                                  wl_egl_display->ev_queue) == -1) {
+               _wl_display_print_err(wl_egl_display, "roundtrip_queue");
        }
 
-       wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
-       if (!wayland_egl_surface) {
-               TPL_ERR("Invalid parameter. wayland_egl_surface(%p)",
-                               wayland_egl_surface);
-               return TPL_ERROR_INVALID_PARAMETER;
+#if TIZEN_FEATURE_ENABLE
+       if (wl_egl_display->tss) {
+               TPL_INFO("[TIZEN_SURFACE_SHM_DESTROY]",
+                                "wl_egl_display(%p) tizen_surface_shm(%p) fini.",
+                                wl_egl_display, wl_egl_display->tss);
+               tizen_surface_shm_destroy(wl_egl_display->tss);
+               wl_egl_display->tss = NULL;
        }
 
-       if (!wayland_egl_surface->twe_surface) {
-               TPL_ERR("Invalid parameter. wayland_egl_surface(%p) twe_surface(%p)",
-                               wayland_egl_surface, wayland_egl_surface->twe_surface);
-               return TPL_ERROR_INVALID_PARAMETER;
+       if (wl_egl_display->presentation) {
+               TPL_INFO("[WP_PRESENTATION_DESTROY]",
+                                "wl_egl_display(%p) wp_presentation(%p) fini.",
+                                wl_egl_display, wl_egl_display->presentation);
+               wp_presentation_destroy(wl_egl_display->presentation);
+               wl_egl_display->presentation = NULL;
        }
 
-       twe_surface_set_rotation_capablity(wayland_egl_surface->twe_surface,
-                                                                                        set);
+       if (wl_egl_display->explicit_sync) {
+               TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
+                                "wl_egl_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
+                                wl_egl_display, wl_egl_display->explicit_sync);
+               zwp_linux_explicit_synchronization_v1_destroy(wl_egl_display->explicit_sync);
+               wl_egl_display->explicit_sync = NULL;
+       }
+#endif
+       if (wl_egl_display->wl_tbm_client) {
+               struct wl_proxy *wl_tbm = NULL;
+
+               wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
+                                                                               wl_egl_display->wl_tbm_client);
+               if (wl_tbm) {
+                       wl_proxy_set_queue(wl_tbm, NULL);
+               }
 
-       return TPL_ERROR_NONE;
-}
+               TPL_INFO("[WL_TBM_DEINIT]",
+                                "wl_egl_display(%p) wl_tbm_client(%p)",
+                                wl_egl_display, wl_egl_display->wl_tbm_client);
+               wayland_tbm_client_deinit(wl_egl_display->wl_tbm_client);
+               wl_egl_display->wl_tbm_client = NULL;
+       }
 
-static tpl_result_t
-__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
-               tbm_surface_h tbm_surface,
-               int num_rects, const int *rects, tbm_fd sync_fence)
-{
-       TPL_ASSERT(surface);
-       TPL_ASSERT(surface->display);
-       TPL_ASSERT(tbm_surface);
-       TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
+       wl_event_queue_destroy(wl_egl_display->ev_queue);
 
-       tpl_wayland_egl_surface_t *wayland_egl_surface =
-               (tpl_wayland_egl_surface_t *) surface->backend.data;
-       tbm_surface_queue_error_e tsq_err;
-       tpl_result_t ret = TPL_ERROR_NONE;
-       int bo_name = 0;
+       wl_egl_display->ev_queue = NULL;
+       wl_egl_display->wl_initialized = TPL_FALSE;
 
-       TPL_OBJECT_LOCK(wayland_egl_surface);
+       TPL_INFO("[DISPLAY_FINI]", "wl_egl_display(%p) wl_display(%p)",
+                        wl_egl_display, wl_egl_display->wl_display);
+}
 
-       bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+static void*
+_thread_init(void *data)
+{
+       tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
 
-       if (!wayland_egl_surface) {
-               TPL_ERR("Invalid parameter. wayland_egl_surface(%p)",
-                               wayland_egl_surface);
-               TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
-               TPL_OBJECT_UNLOCK(wayland_egl_surface);
-               return TPL_ERROR_INVALID_PARAMETER;
+       if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) {
+               TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)",
+                               wl_egl_display, wl_egl_display->wl_display);
        }
 
-       if (!tbm_surface_internal_is_valid(tbm_surface)) {
-               TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
-                               tbm_surface);
-               TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
-               TPL_OBJECT_UNLOCK(wayland_egl_surface);
-               return TPL_ERROR_INVALID_PARAMETER;
+       if (wl_egl_display->use_wait_vblank &&
+               _thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
+               TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
        }
 
-       TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
+       return wl_egl_display;
+}
 
-       TPL_LOG_T("WL_EGL",
-                         "[ENQ] wayland_egl_surface(%p) tbm_surface(%p) bo(%d)",
-                         wayland_egl_surface, tbm_surface, bo_name);
+static tpl_bool_t
+__thread_func_disp_prepare(tpl_gsource *gsource)
+{
+       tpl_wl_egl_display_t *wl_egl_display =
+               (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+
+       /* If this wl_egl_display is already prepared,
+        * do nothing in this function. */
+       if (wl_egl_display->prepared)
+               return TPL_FALSE;
+
+       /* If there is a last_error, there is no need to poll,
+        * so skip directly to dispatch.
+        * prepare -> dispatch */
+       if (wl_egl_display->last_error)
+               return TPL_TRUE;
 
-       /* If there are received region information,
-        * save it to buf_info in tbm_surface user_data using below API. */
-       if (num_rects && rects) {
-               ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects);
-               if (ret != TPL_ERROR_NONE) {
-                       TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)",
-                                        num_rects, rects);
+       while (wl_display_prepare_read_queue(wl_egl_display->wl_display,
+                                                                                wl_egl_display->ev_queue) != 0) {
+               if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
+                                                                                         wl_egl_display->ev_queue) == -1) {
+                       _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
                }
        }
 
-       /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
-        * commit if surface->frontbuffer that is already set and the tbm_surface
-        * client want to enqueue are the same.
-        */
-       if (surface->is_frontbuffer_mode && surface->frontbuffer == tbm_surface) {
-               TPL_LOG_T("WL_EGL",
-                                 "[ENQ_SKIP][F] Client already uses frontbuffer(%p)",
-                                 surface->frontbuffer);
+       wl_egl_display->prepared = TPL_TRUE;
 
-               /* The first buffer to be activated in frontbuffer mode muse be
-                * committed. Subsequence frames do not need to be committed because
-                * the buffer is already displayed.
-                */
-               if (!twe_surface_check_commit_needed(wayland_egl_surface->twe_surface,
-                                                                                        surface->frontbuffer)) {
-                       TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
-                       TPL_OBJECT_UNLOCK(wayland_egl_surface);
-                       return TPL_ERROR_NONE;
-               }
-       }
+       wl_display_flush(wl_egl_display->wl_display);
+
+       return TPL_FALSE;
+}
 
-       if (sync_fence != -1) {
-               tbm_sync_fence_wait(sync_fence, -1);
-               close(sync_fence);
+static tpl_bool_t
+__thread_func_disp_check(tpl_gsource *gsource)
+{
+       tpl_wl_egl_display_t *wl_egl_display =
+               (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+       tpl_bool_t ret = TPL_FALSE;
+
+       if (!wl_egl_display->prepared)
+               return ret;
+
+       /* If prepared, but last_error is set,
+        * cancel_read is executed and FALSE is returned.
+        * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
+        * and skipping disp_check from prepare to disp_dispatch.
+        * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
+       if (wl_egl_display->prepared && wl_egl_display->last_error) {
+               wl_display_cancel_read(wl_egl_display->wl_display);
+               return ret;
        }
 
-       tsq_err = tbm_surface_queue_enqueue(wayland_egl_surface->tbm_queue,
-                                                                               tbm_surface);
-       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) {
-               /*
-                * If tbm_surface_queue has not been reset, tbm_surface_queue_enqueue
-                * will return ERROR_NONE. Otherwise, queue has been reset
-                * this tbm_surface may have only one ref_count. So we need to
-                * unreference this tbm_surface after getting ERROR_NONE result from
-                * tbm_surface_queue_enqueue in order to prevent destruction.
-                */
-               tbm_surface_internal_unref(tbm_surface);
+       if (tpl_gsource_check_io_condition(gsource)) {
+               if (wl_display_read_events(wl_egl_display->wl_display) == -1)
+                       _wl_display_print_err(wl_egl_display, "read_event");
+               ret = TPL_TRUE;
        } else {
-               TPL_ERR("Failed to enqueue tbm_surface(%p). tsq_err=%d",
-                               tbm_surface, tsq_err);
-               TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
-               TPL_OBJECT_UNLOCK(wayland_egl_surface);
-               return TPL_ERROR_INVALID_OPERATION;
+               wl_display_cancel_read(wl_egl_display->wl_display);
+               ret = TPL_FALSE;
        }
 
-       TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
-       TPL_OBJECT_UNLOCK(wayland_egl_surface);
+       wl_egl_display->prepared = TPL_FALSE;
 
-       return TPL_ERROR_NONE;
+       return ret;
 }
 
 static tpl_bool_t
-__tpl_wl_egl_surface_validate(tpl_surface_t *surface)
+__thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
 {
-       tpl_bool_t retval = TPL_TRUE;
+       tpl_wl_egl_display_t *wl_egl_display =
+               (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
 
-       TPL_ASSERT(surface);
-       TPL_ASSERT(surface->backend.data);
+       TPL_IGNORE(message);
 
-       tpl_wayland_egl_surface_t *wayland_egl_surface =
-               (tpl_wayland_egl_surface_t *)surface->backend.data;
+       /* If there is last_error, SOURCE_REMOVE should be returned
+        * to remove the gsource from the main loop.
+        * This is because wl_egl_display is not valid since last_error was set.*/
+       if (wl_egl_display->last_error) {
+               return TPL_FALSE;
+       }
+
+       tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
+       if (tpl_gsource_check_io_condition(gsource)) {
+               if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
+                                                                                         wl_egl_display->ev_queue) == -1) {
+                       _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
+               }
+       }
+
+       wl_display_flush(wl_egl_display->wl_display);
+       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+
+       return TPL_TRUE;
+}
+
+static void
+__thread_func_disp_finalize(tpl_gsource *gsource)
+{
+       tpl_wl_egl_display_t *wl_egl_display =
+               (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+
+       tpl_gmutex_lock(&wl_egl_display->disp_mutex);
+       TPL_DEBUG("[FINALIZE] wl_egl_display(%p) tpl_gsource(%p)",
+                         wl_egl_display, gsource);
+
+       if (wl_egl_display->wl_initialized)
+               _thread_wl_display_fini(wl_egl_display);
+
+       wl_egl_display->gsource_finalized = TPL_TRUE;
+
+       tpl_gcond_signal(&wl_egl_display->disp_cond);
+       tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
+
+       return;
+}
+
+
+static tpl_gsource_functions disp_funcs = {
+       .prepare  = __thread_func_disp_prepare,
+       .check    = __thread_func_disp_check,
+       .dispatch = __thread_func_disp_dispatch,
+       .finalize = __thread_func_disp_finalize,
+};
+
+static tpl_result_t
+__tpl_wl_egl_display_init(tpl_display_t *display)
+{
+       tpl_wl_egl_display_t *wl_egl_display    = NULL;
+
+       TPL_ASSERT(display);
+
+       /* Do not allow default display in wayland. */
+       if (!display->native_handle) {
+               TPL_ERR("Invalid native handle for display.");
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!_check_native_handle_is_wl_display(display->native_handle)) {
+               TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       wl_egl_display = (tpl_wl_egl_display_t *) calloc(1,
+                                                 sizeof(tpl_wl_egl_display_t));
+       if (!wl_egl_display) {
+               TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t.");
+               return TPL_ERROR_OUT_OF_MEMORY;
+       }
+
+       display->backend.data             = wl_egl_display;
+       display->bufmgr_fd                = -1;
+
+       wl_egl_display->tdm.tdm_initialized   = TPL_FALSE;
+       wl_egl_display->tdm.tdm_client        = NULL;
+       wl_egl_display->tdm.tdm_display_fd    = -1;
+       wl_egl_display->tdm.tdm_source        = NULL;
+
+       wl_egl_display->wl_initialized    = TPL_FALSE;
+
+       wl_egl_display->ev_queue          = NULL;
+       wl_egl_display->wl_display        = (struct wl_display *)display->native_handle;
+       wl_egl_display->last_error        = 0;
+       wl_egl_display->use_tss           = TPL_FALSE;
+       wl_egl_display->use_explicit_sync = TPL_FALSE;   // default disabled
+       wl_egl_display->prepared          = TPL_FALSE;
+       wl_egl_display->gsource_finalized = TPL_FALSE;
+
+#if TIZEN_FEATURE_ENABLE
+       /* Wayland Interfaces */
+       wl_egl_display->tss               = NULL;
+       wl_egl_display->presentation      = NULL;
+       wl_egl_display->explicit_sync     = NULL;
+#endif
+       wl_egl_display->wl_tbm_client     = NULL;
+
+       wl_egl_display->use_wait_vblank   = TPL_TRUE;   // default enabled
+       {
+               char *env = tpl_getenv("TPL_WAIT_VBLANK");
+               if (env && !atoi(env)) {
+                       wl_egl_display->use_wait_vblank = TPL_FALSE;
+               }
+       }
+
+       tpl_gmutex_init(&wl_egl_display->wl_event_mutex);
+
+       tpl_gmutex_init(&wl_egl_display->disp_mutex);
+       tpl_gcond_init(&wl_egl_display->disp_cond);
+
+       /* Create gthread */
+       wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
+                                                                                               (tpl_gthread_func)_thread_init,
+                                                                                               (void *)wl_egl_display);
+       if (!wl_egl_display->thread) {
+               TPL_ERR("Failed to create wl_egl_thread");
+               goto free_display;
+       }
+
+       wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread,
+                                                                                                        (void *)wl_egl_display,
+                                                                                                        wl_display_get_fd(wl_egl_display->wl_display),
+                                                                                                        &disp_funcs, SOURCE_TYPE_NORMAL);
+       if (!wl_egl_display->disp_source) {
+               TPL_ERR("Failed to add native_display(%p) to thread(%p)",
+                               display->native_handle,
+                               wl_egl_display->thread);
+               goto free_display;
+       }
+
+       if (wl_egl_display->use_wait_vblank &&
+               wl_egl_display->tdm.tdm_initialized) {
+               tpl_gmutex_init(&wl_egl_display->tdm.tdm_mutex);
+               tpl_gcond_init(&wl_egl_display->tdm.tdm_cond);
+               wl_egl_display->tdm.tdm_source = tpl_gsource_create(wl_egl_display->thread,
+                                                                                                               (void *)wl_egl_display,
+                                                                                                               wl_egl_display->tdm.tdm_display_fd,
+                                                                                                               &tdm_funcs, SOURCE_TYPE_NORMAL);
+               wl_egl_display->tdm.gsource_finalized = TPL_FALSE;
+               if (!wl_egl_display->tdm.tdm_source) {
+                       TPL_ERR("Failed to create tdm_gsource\n");
+                       goto free_display;
+               }
+       }
+
+       wl_egl_display->use_wait_vblank = (wl_egl_display->tdm.tdm_initialized &&
+                                                                          (wl_egl_display->tdm.tdm_source != NULL));
+
+       TPL_INFO("[DISPLAY_INIT]",
+                        "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
+                        wl_egl_display,
+                        wl_egl_display->thread,
+                        wl_egl_display->wl_display);
+
+       TPL_INFO("[DISPLAY_INIT]",
+                        "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)",
+                        wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE",
+                        wl_egl_display->use_tss ? "TRUE" : "FALSE",
+                        wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE");
+
+       return TPL_ERROR_NONE;
+
+free_display:
+       if (wl_egl_display->tdm.tdm_source) {
+               tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
+               // Send destroy mesage to thread
+               tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
+               while (!wl_egl_display->tdm.gsource_finalized) {
+                       tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex);
+               }
+               tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
+       }
+
+       if (wl_egl_display->disp_source) {
+               tpl_gmutex_lock(&wl_egl_display->disp_mutex);
+               // Send destroy mesage to thread
+               tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
+               while (!wl_egl_display->gsource_finalized) {
+                       tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex);
+               }
+               tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
+       }
+
+       if (wl_egl_display->thread) {
+               tpl_gthread_destroy(wl_egl_display->thread);
+       }
+
+       tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond);
+       tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex);
+       tpl_gcond_clear(&wl_egl_display->disp_cond);
+       tpl_gmutex_clear(&wl_egl_display->disp_mutex);
+
+       wl_egl_display->thread = NULL;
+       free(wl_egl_display);
+
+       display->backend.data = NULL;
+       return TPL_ERROR_INVALID_OPERATION;
+}
+
+static void
+__tpl_wl_egl_display_fini(tpl_display_t *display)
+{
+       tpl_wl_egl_display_t *wl_egl_display;
+
+       TPL_ASSERT(display);
+
+       wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data;
+       if (wl_egl_display) {
+               TPL_INFO("[DISPLAY_FINI]",
+                                 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
+                                 wl_egl_display,
+                                 wl_egl_display->thread,
+                                 wl_egl_display->wl_display);
+
+               if (wl_egl_display->tdm.tdm_source && wl_egl_display->tdm.tdm_initialized) {
+                       /* This is a protection to prevent problems that arise in unexpected situations
+                        * that g_cond_wait cannot work normally.
+                        * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+                        * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+                        * */
+                       tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
+                       // Send destroy mesage to thread
+                       tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
+                       while (!wl_egl_display->tdm.gsource_finalized) {
+                               tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex);
+                       }
+                       wl_egl_display->tdm.tdm_source = NULL;
+                       tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
+               }
+
+               if (wl_egl_display->disp_source) {
+                       tpl_gmutex_lock(&wl_egl_display->disp_mutex);
+                       // Send destroy mesage to thread
+                       tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
+                       /* This is a protection to prevent problems that arise in unexpected situations
+                        * that g_cond_wait cannot work normally.
+                        * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+                        * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+                        * */
+                       while (!wl_egl_display->gsource_finalized) {
+                               tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex);
+                       }
+                       wl_egl_display->disp_source = NULL;
+                       tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
+               }
+
+               if (wl_egl_display->thread) {
+                       tpl_gthread_destroy(wl_egl_display->thread);
+                       wl_egl_display->thread = NULL;
+               }
+
+               tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond);
+               tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex);
+               tpl_gcond_clear(&wl_egl_display->disp_cond);
+               tpl_gmutex_clear(&wl_egl_display->disp_mutex);
+
+               tpl_gmutex_clear(&wl_egl_display->wl_event_mutex);
+
+               free(wl_egl_display);
+       }
+
+       display->backend.data = NULL;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_query_config(tpl_display_t *display,
+                                                                 tpl_surface_type_t surface_type,
+                                                                 int red_size, int green_size,
+                                                                 int blue_size, int alpha_size,
+                                                                 int color_depth, int *native_visual_id,
+                                                                 tpl_bool_t *is_slow)
+{
+       TPL_ASSERT(display);
+
+       if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
+                       green_size == 8 && blue_size == 8 &&
+                       (color_depth == 32 || color_depth == 24)) {
+
+               if (alpha_size == 8) {
+                       if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
+                       if (is_slow) *is_slow = TPL_FALSE;
+                       return TPL_ERROR_NONE;
+               }
+               if (alpha_size == 0) {
+                       if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
+                       if (is_slow) *is_slow = TPL_FALSE;
+                       return TPL_ERROR_NONE;
+               }
+       }
+
+       return TPL_ERROR_INVALID_PARAMETER;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
+                                                                  int alpha_size)
+{
+       TPL_IGNORE(display);
+       TPL_IGNORE(visual_id);
+       TPL_IGNORE(alpha_size);
+       return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_get_window_info(tpl_display_t *display,
+                                                                        tpl_handle_t window, int *width,
+                                                                        int *height, tbm_format *format,
+                                                                        int depth, int a_size)
+{
+       tpl_result_t ret = TPL_ERROR_NONE;
+       struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window;
+
+       TPL_ASSERT(display);
+       TPL_ASSERT(window);
+
+       if (!wl_egl_window) {
+               TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (width) *width = wl_egl_window->width;
+       if (height) *height = wl_egl_window->height;
+       if (format) {
+               struct tizen_private *tizen_private =
+                               (struct tizen_private *)wl_egl_window->driver_private;
+               if (tizen_private && tizen_private->data) {
+                       tpl_wl_egl_surface_t *wl_egl_surface =
+                               (tpl_wl_egl_surface_t *)tizen_private->data;
+                       *format = wl_egl_surface->format;
+               } else {
+                       if (a_size == 8)
+                               *format = TBM_FORMAT_ARGB8888;
+                       else
+                               *format = TBM_FORMAT_XRGB8888;
+               }
+       }
+
+       return ret;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display,
+                                                                        tpl_handle_t pixmap, int *width,
+                                                                        int *height, tbm_format *format)
+{
+       tbm_surface_h   tbm_surface = NULL;
+
+       if (!pixmap) {
+               TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       tbm_surface = wayland_tbm_server_get_surface(NULL,
+                                                                                                (struct wl_resource *)pixmap);
+       if (!tbm_surface) {
+               TPL_ERR("Failed to get tbm_surface from wayland_tbm.");
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (width) *width = tbm_surface_get_width(tbm_surface);
+       if (height) *height = tbm_surface_get_height(tbm_surface);
+       if (format) *format = tbm_surface_get_format(tbm_surface);
+
+       return TPL_ERROR_NONE;
+}
+
+static tbm_surface_h
+__tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
+{
+       tbm_surface_h tbm_surface = NULL;
+
+       TPL_ASSERT(pixmap);
+
+       tbm_surface = wayland_tbm_server_get_surface(NULL,
+                                                                                                (struct wl_resource *)pixmap);
+       if (!tbm_surface) {
+               TPL_ERR("Failed to get tbm_surface_h from wayland_tbm.");
+               return NULL;
+       }
+
+       return tbm_surface;
+}
+
+tpl_bool_t
+__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
+{
+       struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE);
+
+       /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
+          is a memory address pointing the structure of wl_display_interface. */
+       if (wl_egl_native_dpy == &wl_display_interface)
+               return TPL_TRUE;
+
+       if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
+                               strlen(wl_display_interface.name)) == 0) {
+               return TPL_TRUE;
+       }
+
+       return TPL_FALSE;
+}
+
+/* -- BEGIN -- wl_egl_window callback functions */
+static void
+__cb_destroy_callback(void *private)
+{
+       struct tizen_private *tizen_private  = (struct tizen_private *)private;
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+
+       if (!tizen_private) {
+               TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface");
+               return;
+       }
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+       if (wl_egl_surface) {
+               TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.",
+                                wl_egl_surface->wl_egl_window);
+               TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface.");
+
+               tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+               wl_egl_surface->wl_egl_window->destroy_window_callback = NULL;
+               wl_egl_surface->wl_egl_window->resize_callback = NULL;
+               wl_egl_surface->wl_egl_window->driver_private = NULL;
+               wl_egl_surface->wl_egl_window = NULL;
+               wl_egl_surface->wl_surface = NULL;
+
+               tizen_private->set_window_serial_callback = NULL;
+               tizen_private->rotate_callback = NULL;
+               tizen_private->get_rotation_capability = NULL;
+               tizen_private->set_frontbuffer_callback = NULL;
+               tizen_private->create_commit_sync_fd = NULL;
+               tizen_private->create_presentation_sync_fd = NULL;
+               tizen_private->data = NULL;
+
+               free(tizen_private);
+               tizen_private = NULL;
+               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+       }
+}
+
+static void
+__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
+{
+       TPL_ASSERT(private);
+       TPL_ASSERT(wl_egl_window);
+
+       struct tizen_private *tizen_private  = (struct tizen_private *)private;
+       tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+       int cur_w, cur_h, req_w, req_h, format;
+
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+                               wl_egl_window);
+               return;
+       }
+
+       format = wl_egl_surface->format;
+       cur_w = wl_egl_surface->width;
+       cur_h = wl_egl_surface->height;
+       req_w = wl_egl_window->width;
+       req_h = wl_egl_window->height;
+
+       TPL_INFO("[WINDOW_RESIZE]",
+                        "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)",
+                        wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h);
+
+       if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
+                       != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue);
+               return;
+       }
+}
+/* -- END -- wl_egl_window callback functions */
+
+/* -- BEGIN -- wl_egl_window tizen private callback functions */
+
+/* There is no usecase for using prerotation callback below */
+static void
+__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
+{
+       TPL_ASSERT(private);
+       TPL_ASSERT(wl_egl_window);
+
+       struct tizen_private *tizen_private  = (struct tizen_private *)private;
+       tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+       int rotation = tizen_private->rotation;
+
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+                               wl_egl_window);
+               return;
+       }
+
+       TPL_INFO("[WINDOW_ROTATE]",
+                        "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)",
+                        wl_egl_surface, wl_egl_window,
+                        wl_egl_surface->rotation, rotation);
+
+       wl_egl_surface->rotation = rotation;
+}
+
+/* There is no usecase for using prerotation callback below */
+static int
+__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
+                                                        void *private)
+{
+       TPL_ASSERT(private);
+       TPL_ASSERT(wl_egl_window);
+
+       int rotation_capability              = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
+       struct tizen_private *tizen_private  = (struct tizen_private *)private;
+       tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+                               wl_egl_window);
+               return rotation_capability;
+       }
+
+       if (wl_egl_surface->prerotation_capability == TPL_TRUE)
+               rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
+       else
+               rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
+
+
+       return rotation_capability;
+}
+
+static void
+__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
+                                                               void *private, unsigned int serial)
+{
+       TPL_ASSERT(private);
+       TPL_ASSERT(wl_egl_window);
+
+       struct tizen_private *tizen_private  = (struct tizen_private *)private;
+       tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+                               wl_egl_window);
+               return;
+       }
+
+       wl_egl_surface->set_serial_is_used = TPL_TRUE;
+       wl_egl_surface->serial = serial;
+}
+
+static int
+__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
+{
+       TPL_ASSERT(private);
+       TPL_ASSERT(wl_egl_window);
+
+       int commit_sync_fd = -1;
+
+       struct tizen_private *tizen_private  = (struct tizen_private *)private;
+       tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid parameter. wl_egl_surface(%p) is NULL", wl_egl_surface);
+               return -1;
+       }
+
+       tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
+
+       if (wl_egl_surface->commit_sync.fd != -1) {
+               commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
+               TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
+                                  wl_egl_surface->commit_sync.fd, commit_sync_fd);
+               TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)",
+                                 wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd);
+               tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+               return commit_sync_fd;
+       }
+
+       wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
+       if (wl_egl_surface->commit_sync.fd == -1) {
+               TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)",
+                               wl_egl_surface);
+               tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+               return -1;
+       }
+
+       commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
+
+       TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
+                          wl_egl_surface->commit_sync.fd, commit_sync_fd);
+       TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)",
+                         wl_egl_surface, commit_sync_fd);
+
+       tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+
+       return commit_sync_fd;
+}
+
+#if TIZEN_FEATURE_ENABLE
+static int
+__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
+{
+       TPL_ASSERT(private);
+       TPL_ASSERT(wl_egl_window);
+
+       int presentation_sync_fd = -1;
+
+       struct tizen_private *tizen_private  = (struct tizen_private *)private;
+       tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+       if (!wl_egl_surface) {
+               TPL_ERR("Invalid parameter. wl_egl_surface is NULL");
+               return -1;
+       }
+
+       tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+       if (wl_egl_surface->presentation_sync.fd != -1) {
+               presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
+               TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
+                                  wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+               TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
+                                 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+               tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+               return presentation_sync_fd;
+       }
+
+       wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
+       if (wl_egl_surface->presentation_sync.fd == -1) {
+               TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)",
+                               wl_egl_surface);
+               tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+               return -1;
+       }
+
+       presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
+       TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
+                          wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+       TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
+                         wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+
+       tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+
+       return presentation_sync_fd;
+}
+/* -- END -- wl_egl_window tizen private callback functions */
+
+/* -- BEGIN -- tizen_surface_shm_flusher_listener */
+static void __cb_tss_flusher_flush_callback(void *data,
+               struct tizen_surface_shm_flusher *tss_flusher)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
+                        wl_egl_surface, wl_egl_surface->tbm_queue);
+
+       _print_buffer_lists(wl_egl_surface);
+
+       tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
+               return;
+       }
+}
+
+static void __cb_tss_flusher_free_flush_callback(void *data,
+               struct tizen_surface_shm_flusher *tss_flusher)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+       tbm_surface_queue_error_e tsq_err    = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
+                        wl_egl_surface, wl_egl_surface->tbm_queue);
+
+       _print_buffer_lists(wl_egl_surface);
+
+       tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
+               return;
+       }
+}
+
+static const struct tizen_surface_shm_flusher_listener
+tss_flusher_listener = {
+       __cb_tss_flusher_flush_callback,
+       __cb_tss_flusher_free_flush_callback
+};
+/* -- END -- tizen_surface_shm_flusher_listener */
+#endif
+
+/* -- BEGIN -- tbm_surface_queue callback funstions */
+static void
+__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
+                                                                         void *data)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+       tpl_wl_egl_display_t *wl_egl_display = NULL;
+       tpl_surface_t *surface = NULL;
+       tpl_bool_t is_activated = TPL_FALSE;
+       int width, height;
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+
+       wl_egl_display = wl_egl_surface->wl_egl_display;
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
+
+       surface = wl_egl_surface->tpl_surface;
+       TPL_CHECK_ON_NULL_RETURN(surface);
+
+       /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
+        * the changed window size at the next frame. */
+       width = tbm_surface_queue_get_width(tbm_queue);
+       height = tbm_surface_queue_get_height(tbm_queue);
+       if (surface->width != width || surface->height != height) {
+               TPL_INFO("[QUEUE_RESIZE]",
+                                "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
+                                wl_egl_surface, tbm_queue,
+                                surface->width, surface->height, width, height);
+       }
+
+       /* When queue_reset_callback is called, if is_activated is different from
+        * its previous state change the reset flag to TPL_TRUE to get a new buffer
+        * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
+       is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client,
+                                                                                                                  wl_egl_surface->tbm_queue);
+       if (wl_egl_surface->is_activated != is_activated) {
+               if (is_activated) {
+                       TPL_INFO("[ACTIVATED]",
+                                         "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
+                                         wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
+               } else {
+                       TPL_LOG_T("[DEACTIVATED]",
+                                         " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
+                                         wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
+               }
+       }
+
+       wl_egl_surface->reset = TPL_TRUE;
+
+       if (surface->reset_cb)
+               surface->reset_cb(surface->reset_data);
+}
+
+static void
+__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
+                                                                  void *data)
+{
+       TPL_IGNORE(tbm_queue);
+
+       tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+       if (wl_egl_surface->sent_message == NONE_MESSAGE) {
+               wl_egl_surface->sent_message = ACQUIRABLE;
+               tpl_gsource_send_message(wl_egl_surface->surf_source,
+                                                        wl_egl_surface->sent_message);
+       }
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+}
+/* -- END -- tbm_surface_queue callback funstions */
+
+static void
+_thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+       tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+
+       TPL_INFO("[SURFACE_FINI]",
+                         "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
+                         wl_egl_surface, wl_egl_surface->wl_egl_window,
+                         wl_egl_surface->wl_surface);
+#if TIZEN_FEATURE_ENABLE
+       tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+
+       if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) {
+               while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) {
+                       struct pst_feedback *pst_feedback =
+                               (struct pst_feedback *)__tpl_list_pop_front(
+                                               wl_egl_surface->presentation_feedbacks, NULL);
+                       if (pst_feedback) {
+                               _write_to_eventfd(pst_feedback->pst_sync_fd);
+                               close(pst_feedback->pst_sync_fd);
+                               pst_feedback->pst_sync_fd = -1;
+
+                               wp_presentation_feedback_destroy(pst_feedback->presentation_feedback);
+                               pst_feedback->presentation_feedback = NULL;
+
+                               free(pst_feedback);
+                       }
+               }
+
+               __tpl_list_free(wl_egl_surface->presentation_feedbacks, NULL);
+               wl_egl_surface->presentation_feedbacks = NULL;
+       }
+
+       if (wl_egl_surface->presentation_sync.fd != -1) {
+               _write_to_eventfd(wl_egl_surface->presentation_sync.fd);
+               close(wl_egl_surface->presentation_sync.fd);
+               wl_egl_surface->presentation_sync.fd = -1;
+       }
+
+       tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+
+       if (wl_egl_surface->surface_sync) {
+               TPL_INFO("[SURFACE_SYNC_DESTROY]",
+                                "wl_egl_surface(%p) surface_sync(%p)",
+                                 wl_egl_surface, wl_egl_surface->surface_sync);
+               zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync);
+               wl_egl_surface->surface_sync = NULL;
+       }
+
+       if (wl_egl_surface->tss_flusher) {
+               TPL_INFO("[FLUSHER_DESTROY]",
+                                 "wl_egl_surface(%p) tss_flusher(%p)",
+                                 wl_egl_surface, wl_egl_surface->tss_flusher);
+               tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher);
+               wl_egl_surface->tss_flusher = NULL;
+       }
+#endif
+
+       if (wl_egl_surface->tbm_queue) {
+               TPL_INFO("[TBM_QUEUE_DESTROY]",
+                                "wl_egl_surface(%p) tbm_queue(%p)",
+                                wl_egl_surface, wl_egl_surface->tbm_queue);
+               tbm_surface_queue_destroy(wl_egl_surface->tbm_queue);
+               wl_egl_surface->tbm_queue = NULL;
+       }
+
+       if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
+               tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+               __tpl_list_free(wl_egl_surface->vblank->waiting_buffers, NULL);
+               wl_egl_surface->vblank->waiting_buffers = NULL;
+               tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+       }
+
+       if (wl_egl_surface->vblank) {
+               __tpl_list_remove_data(wl_egl_display->tdm.surface_vblanks,
+                                                          (void *)wl_egl_surface->vblank,
+                                                          TPL_FIRST,
+                                                          __cb_surface_vblank_free);
+               wl_egl_surface->vblank = NULL;
+       }
+}
+
+static tpl_bool_t
+__thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
+
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+       if (message == INIT_SURFACE) { /* Initialize surface */
+               TPL_DEBUG("wl_egl_surface(%p) initialize message received!",
+                                 wl_egl_surface);
+               _thread_wl_egl_surface_init(wl_egl_surface);
+               wl_egl_surface->initialized_in_thread = TPL_TRUE;
+               tpl_gcond_signal(&wl_egl_surface->surf_cond);
+       } else if (message == ACQUIRABLE) { /* Acquirable */
+               TPL_DEBUG("wl_egl_surface(%p) acquirable message received!",
+                                 wl_egl_surface);
+               _thread_surface_queue_acquire(wl_egl_surface);
+       }
+
+       wl_egl_surface->sent_message = NONE_MESSAGE;
+
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+       return TPL_TRUE;
+}
+
+static void
+__thread_func_surf_finalize(tpl_gsource *gsource)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+       TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)",
+                         wl_egl_surface, gsource);
+
+       _thread_wl_egl_surface_fini(wl_egl_surface);
+
+       wl_egl_surface->gsource_finalized = TPL_TRUE;
+
+       tpl_gcond_signal(&wl_egl_surface->surf_cond);
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+}
+
+static tpl_gsource_functions surf_funcs = {
+       .prepare = NULL,
+       .check = NULL,
+       .dispatch = __thread_func_surf_dispatch,
+       .finalize = __thread_func_surf_finalize,
+};
+
+static tpl_result_t
+__tpl_wl_egl_surface_init(tpl_surface_t *surface)
+{
+       tpl_wl_egl_display_t *wl_egl_display    = NULL;
+       tpl_wl_egl_surface_t *wl_egl_surface    = NULL;
+       tpl_gsource *surf_source                = NULL;
+
+       struct wl_egl_window *wl_egl_window =
+               (struct wl_egl_window *)surface->native_handle;
+
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->display);
+       TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
+       TPL_ASSERT(surface->native_handle);
+
+       wl_egl_display =
+               (tpl_wl_egl_display_t *)surface->display->backend.data;
+       if (!wl_egl_display) {
+               TPL_ERR("Invalid parameter. wl_egl_display(%p)",
+                               wl_egl_display);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1,
+                                                 sizeof(tpl_wl_egl_surface_t));
+       if (!wl_egl_surface) {
+               TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
+               return TPL_ERROR_OUT_OF_MEMORY;
+       }
+
+       surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface,
+                                                                        -1, &surf_funcs, SOURCE_TYPE_NORMAL);
+       if (!surf_source) {
+               TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)",
+                               wl_egl_surface);
+               goto surf_source_create_fail;
+       }
+
+       surface->backend.data = (void *)wl_egl_surface;
+       surface->width        = wl_egl_window->width;
+       surface->height       = wl_egl_window->height;
+       surface->rotation     = 0;
+
+       wl_egl_surface->tpl_surface            = surface;
+       wl_egl_surface->width                  = wl_egl_window->width;
+       wl_egl_surface->height                 = wl_egl_window->height;
+       wl_egl_surface->format                 = surface->format;
+       wl_egl_surface->num_buffers            = surface->num_buffers;
+
+       wl_egl_surface->surf_source            = surf_source;
+       wl_egl_surface->wl_egl_window          = wl_egl_window;
+       wl_egl_surface->wl_surface             = wl_egl_window->surface;
+
+       wl_egl_surface->wl_egl_display         = wl_egl_display;
+
+       wl_egl_surface->reset                  = TPL_FALSE;
+       wl_egl_surface->is_activated           = TPL_FALSE;
+       wl_egl_surface->need_to_enqueue        = TPL_TRUE;
+       wl_egl_surface->prerotation_capability = TPL_FALSE;
+       wl_egl_surface->vblank_done            = TPL_TRUE;
+       wl_egl_surface->use_render_done_fence  = TPL_FALSE;
+       wl_egl_surface->set_serial_is_used     = TPL_FALSE;
+       wl_egl_surface->gsource_finalized      = TPL_FALSE;
+       wl_egl_surface->initialized_in_thread  = TPL_FALSE;
+
+       wl_egl_surface->latest_transform       = -1;
+       wl_egl_surface->render_done_cnt        = 0;
+       wl_egl_surface->serial                 = 0;
+
+       wl_egl_surface->vblank                 = NULL;
+#if TIZEN_FEATURE_ENABLE
+       wl_egl_surface->tss_flusher            = NULL;
+       wl_egl_surface->surface_sync           = NULL;
+#endif
+
+       wl_egl_surface->post_interval          = surface->post_interval;
+
+       wl_egl_surface->commit_sync.fd         = -1;
+       wl_egl_surface->presentation_sync.fd   = -1;
+
+       wl_egl_surface->sent_message           = NONE_MESSAGE;
+
+       {
+               int i = 0;
+               for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
+                       wl_egl_surface->buffers[i]     = NULL;
+               wl_egl_surface->buffer_cnt         = 0;
+       }
+
+       wl_egl_surface->last_enq_buffer        = NULL;
+
+       {
+               struct tizen_private *tizen_private = NULL;
+
+               if (wl_egl_window->driver_private)
+                       tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
+               else {
+                       tizen_private = tizen_private_create();
+                       wl_egl_window->driver_private = (void *)tizen_private;
+               }
+
+               if (tizen_private) {
+                       tizen_private->data = (void *)wl_egl_surface;
+                       tizen_private->rotate_callback = (void *)__cb_rotate_callback;
+                       tizen_private->get_rotation_capability = (void *)
+                               __cb_get_rotation_capability;
+                       tizen_private->set_window_serial_callback = (void *)
+                               __cb_set_window_serial_callback;
+                       tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
+#if TIZEN_FEATURE_ENABLE
+                       tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
+#else
+                       tizen_private->create_presentation_sync_fd = NULL;
+#endif
+
+                       wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
+                       wl_egl_window->resize_callback = (void *)__cb_resize_callback;
+               }
+       }
+
+       tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
+       tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
+
+       tpl_gmutex_init(&wl_egl_surface->buffers_mutex);
+
+       tpl_gmutex_init(&wl_egl_surface->surf_mutex);
+       tpl_gcond_init(&wl_egl_surface->surf_cond);
+
+       /* Initialize in thread */
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+       wl_egl_surface->sent_message = INIT_SURFACE;
+       tpl_gsource_send_message(wl_egl_surface->surf_source,
+                                                        wl_egl_surface->sent_message);
+       while (!wl_egl_surface->initialized_in_thread)
+               tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+       TPL_ASSERT(wl_egl_surface->tbm_queue);
+
+       TPL_INFO("[SURFACE_INIT]",
+                         "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)",
+                         surface, wl_egl_surface, wl_egl_surface->surf_source);
+
+       return TPL_ERROR_NONE;
+
+surf_source_create_fail:
+       free(wl_egl_surface);
+       surface->backend.data = NULL;
+       return TPL_ERROR_INVALID_OPERATION;
+}
+
+static tbm_surface_queue_h
+_thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface,
+                                                struct wayland_tbm_client *wl_tbm_client,
+                                                int num_buffers)
+{
+       tbm_surface_queue_h tbm_queue = NULL;
+       tbm_bufmgr bufmgr             = NULL;
+       unsigned int capability;
+
+       struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
+       int width = wl_egl_surface->width;
+       int height = wl_egl_surface->height;
+       int format = wl_egl_surface->format;
+
+       if (!wl_tbm_client || !wl_surface) {
+               TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)",
+                               wl_tbm_client, wl_surface);
+               return NULL;
+       }
+
+       bufmgr = tbm_bufmgr_init(-1);
+       capability = tbm_bufmgr_get_capability(bufmgr);
+       tbm_bufmgr_deinit(bufmgr);
+
+       if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
+               tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
+                                               wl_tbm_client,
+                                               wl_surface,
+                                               num_buffers,
+                                               width,
+                                               height,
+                                               format);
+       } else {
+               tbm_queue = wayland_tbm_client_create_surface_queue(
+                                               wl_tbm_client,
+                                               wl_surface,
+                                               num_buffers,
+                                               width,
+                                               height,
+                                               format);
+       }
+
+       if (!tbm_queue) {
+               TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)",
+                               wl_tbm_client);
+               return NULL;
+       }
+
+       if (tbm_surface_queue_set_modes(
+                       tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
+                               TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
+                               tbm_queue);
+               tbm_surface_queue_destroy(tbm_queue);
+               return NULL;
+       }
+
+       if (tbm_surface_queue_add_reset_cb(
+                       tbm_queue,
+                       __cb_tbm_queue_reset_callback,
+                       (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
+                               tbm_queue);
+               tbm_surface_queue_destroy(tbm_queue);
+               return NULL;
+       }
+
+       if (tbm_surface_queue_add_acquirable_cb(
+                       tbm_queue,
+                       __cb_tbm_queue_acquirable_callback,
+                       (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
+                               tbm_queue);
+               tbm_surface_queue_destroy(tbm_queue);
+               return NULL;
+       }
+
+       return tbm_queue;
+}
+
+static tdm_client_vblank*
+_thread_create_tdm_client_vblank(tdm_client *tdm_client)
+{
+       tdm_client_vblank *tdm_vblank = NULL;
+       tdm_client_output *tdm_output = NULL;
+       tdm_error tdm_err = TDM_ERROR_NONE;
+
+       if (!tdm_client) {
+               TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
+               return NULL;
+       }
+
+       tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
+       if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
+               return NULL;
+       }
+
+       tdm_vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
+       if (!tdm_vblank || tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("Failed to create tdm_vblank. tdm_err(%d)", tdm_err);
+               return NULL;
+       }
+
+       tdm_client_vblank_set_enable_fake(tdm_vblank, 1);
+       tdm_client_vblank_set_sync(tdm_vblank, 0);
+
+       return tdm_vblank;
+}
+
+static void
+__cb_surface_vblank_free(void *data)
+{
+       TPL_CHECK_ON_NULL_RETURN(data);
+
+       tpl_surface_vblank_t *vblank = (tpl_surface_vblank_t *)data;
+       tpl_wl_egl_surface_t *wl_egl_surface = vblank->wl_egl_surface;
+
+       TPL_INFO("[VBLANK_DESTROY]",
+                        "wl_egl_surface(%p) surface_vblank(%p) tdm_vblank(%p)",
+                        wl_egl_surface, vblank,
+                        vblank->tdm_vblank);
+
+       tdm_client_vblank_destroy(vblank->tdm_vblank);
+       vblank->tdm_vblank = NULL;
+       vblank->wl_egl_surface = NULL;
+       tpl_gmutex_clear(&vblank->mutex);
+
+       free(vblank);
+
+       wl_egl_surface->vblank = NULL;
+}
+
+static void
+_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+       tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+       tpl_surface_vblank_t *vblank         = NULL;
+
+       wl_egl_surface->tbm_queue = _thread_create_tbm_queue(
+                                                                       wl_egl_surface,
+                                                                       wl_egl_display->wl_tbm_client,
+                                                                       wl_egl_surface->num_buffers);
+       if (!wl_egl_surface->tbm_queue) {
+               TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)",
+                               wl_egl_surface, wl_egl_display->wl_tbm_client);
+               return;
+       }
+
+       TPL_INFO("[QUEUE_CREATION]",
+                        "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)",
+                        wl_egl_surface, wl_egl_surface->wl_surface,
+                        wl_egl_display->wl_tbm_client);
+       TPL_INFO("[QUEUE_CREATION]",
+                        "tbm_queue(%p) size(%d x %d) X %d format(%d)",
+                        wl_egl_surface->tbm_queue,
+                        wl_egl_surface->width,
+                        wl_egl_surface->height,
+                        wl_egl_surface->num_buffers,
+                        wl_egl_surface->format);
+
+       if (wl_egl_display->use_wait_vblank) {
+               vblank = (tpl_surface_vblank_t *)calloc(1, sizeof(tpl_surface_vblank_t));
+               if (vblank) {
+                       vblank->tdm_vblank = _thread_create_tdm_client_vblank(
+                                                                       wl_egl_display->tdm.tdm_client);
+                       if (!vblank->tdm_vblank) {
+                               TPL_ERR("Failed to create tdm_vblank from tdm_client(%p)",
+                                               wl_egl_display->tdm.tdm_client);
+                               free(vblank);
+                               vblank = NULL;
+                       } else {
+                               vblank->waiting_buffers = __tpl_list_alloc();
+                               vblank->wl_egl_surface = wl_egl_surface;
+                               tpl_gmutex_init(&vblank->mutex);
+
+                               __tpl_list_push_back(wl_egl_display->tdm.surface_vblanks,
+                                                                        (void *)vblank);
+
+                               TPL_INFO("[VBLANK_INIT]",
+                                                "wl_egl_surface(%p) tdm_client(%p) tdm_vblank(%p)",
+                                                wl_egl_surface, wl_egl_display->tdm.tdm_client,
+                                                vblank->tdm_vblank);
+                       }
+               }
+       }
+
+       wl_egl_surface->vblank = vblank;
+#if TIZEN_FEATURE_ENABLE
+       if (wl_egl_display->tss) {
+               wl_egl_surface->tss_flusher =
+                       tizen_surface_shm_get_flusher(wl_egl_display->tss,
+                                                                                 wl_egl_surface->wl_surface);
+       }
+
+       if (wl_egl_surface->tss_flusher) {
+               tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher,
+                                                                                          &tss_flusher_listener,
+                                                                                          wl_egl_surface);
+               TPL_INFO("[FLUSHER_INIT]",
+                                "wl_egl_surface(%p) tss_flusher(%p)",
+                                wl_egl_surface, wl_egl_surface->tss_flusher);
+       }
+
+       if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) {
+               wl_egl_surface->surface_sync =
+                       zwp_linux_explicit_synchronization_v1_get_synchronization(
+                                       wl_egl_display->explicit_sync, wl_egl_surface->wl_surface);
+               if (wl_egl_surface->surface_sync) {
+                       TPL_INFO("[EXPLICIT_SYNC_INIT]",
+                                        "wl_egl_surface(%p) surface_sync(%p)",
+                                        wl_egl_surface, wl_egl_surface->surface_sync);
+               } else {
+                       TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)",
+                                        wl_egl_surface);
+                       wl_egl_display->use_explicit_sync = TPL_FALSE;
+               }
+       }
+#endif
+       wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
+}
+
+static void
+_tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+       tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
+       tpl_wl_egl_display_t *wl_egl_display    = wl_egl_surface->wl_egl_display;
+       tpl_wl_egl_buffer_t *wl_egl_buffer      = NULL;
+       tpl_bool_t need_to_release              = TPL_FALSE;
+       tpl_bool_t need_to_cancel               = TPL_FALSE;
+       buffer_status_t status                  = RELEASED;
+       int idx                                 = 0;
+
+       while (wl_egl_surface->buffer_cnt) {
+               tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
+               tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+               wl_egl_buffer = wl_egl_surface->buffers[idx];
+
+               if (wl_egl_buffer) {
+                       wl_egl_surface->buffers[idx] = NULL;
+                       wl_egl_surface->buffer_cnt--;
+               } else {
+                       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+                       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+                       idx++;
+                       continue;
+               }
+
+               tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
+               status = wl_egl_buffer->status;
+
+               TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
+                                 idx, wl_egl_buffer,
+                                 wl_egl_buffer->tbm_surface,
+                                 status_to_string[status]);
+
+               if (status >= ENQUEUED) {
+                       tpl_result_t wait_result = TPL_ERROR_NONE;
+
+                       while (status < COMMITTED && wait_result != TPL_ERROR_TIME_OUT) {
+                               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+                               /* The lock/unlock order of buffer->mutex and display->wl_event_mutex
+                                * is important. display->mutex must surround buffer->mutex */
+                               wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond,
+                                                                                                  &wl_egl_display->wl_event_mutex,
+                                                                                                  200); /* 200ms */
+                               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+                               status = wl_egl_buffer->status; /* update status */
+
+                               if (wait_result == TPL_ERROR_TIME_OUT)
+                                       TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
+                                                        wl_egl_buffer);
+                       }
+               }
+
+               /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
+               /* It has been acquired but has not yet been released, so this
+                * buffer must be released. */
+               need_to_release = (status >= ACQUIRED && status <= COMMITTED);
+
+               /* After dequeue, it has not been enqueued yet
+                * so cancel_dequeue must be performed. */
+               need_to_cancel = (status == DEQUEUED);
+
+               if (need_to_release) {
+                       tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+                                                                                               wl_egl_buffer->tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+                                               wl_egl_buffer->tbm_surface, tsq_err);
+               }
+
+               if (need_to_cancel) {
+                       tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
+                                                                                                          wl_egl_buffer->tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
+                                               wl_egl_buffer->tbm_surface, tsq_err);
+               }
+
+               wl_egl_buffer->status = RELEASED;
+
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+               if (need_to_release || need_to_cancel)
+                       tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
+
+               tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+
+               idx++;
+       }
+}
+
+static void
+__tpl_wl_egl_surface_fini(tpl_surface_t *surface)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+       tpl_wl_egl_display_t *wl_egl_display = NULL;
+
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->display);
+
+       TPL_CHECK_ON_FALSE_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW);
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data;
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+
+       wl_egl_display = wl_egl_surface->wl_egl_display;
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
+
+       TPL_INFO("[SURFACE_FINI][BEGIN]",
+                        "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
+                        wl_egl_surface,
+                        wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
+
+       _tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
+
+       if (wl_egl_surface->surf_source) {
+               tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+               // Send destroy mesage to thread
+               tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
+               /* This is a protection to prevent problems that arise in unexpected situations
+                * that g_cond_wait cannot work normally.
+                * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+                * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+                * */
+               while (!wl_egl_surface->gsource_finalized) {
+                       tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
+               }
+               wl_egl_surface->surf_source = NULL;
+               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+       }
+
+       _print_buffer_lists(wl_egl_surface);
+
+       if (wl_egl_surface->wl_egl_window) {
+               struct tizen_private *tizen_private = NULL;
+               struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
+               TPL_INFO("[WL_EGL_WINDOW_FINI]",
+                                "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
+                                wl_egl_surface, wl_egl_window,
+                                wl_egl_surface->wl_surface);
+               tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
+               if (tizen_private) {
+                       tizen_private->set_window_serial_callback = NULL;
+                       tizen_private->rotate_callback = NULL;
+                       tizen_private->get_rotation_capability = NULL;
+                       tizen_private->create_presentation_sync_fd = NULL;
+                       tizen_private->create_commit_sync_fd = NULL;
+                       tizen_private->set_frontbuffer_callback = NULL;
+                       tizen_private->merge_sync_fds = NULL;
+                       tizen_private->data = NULL;
+                       free(tizen_private);
+
+                       wl_egl_window->driver_private = NULL;
+               }
+
+               wl_egl_window->destroy_window_callback = NULL;
+               wl_egl_window->resize_callback = NULL;
+
+               wl_egl_surface->wl_egl_window = NULL;
+       }
+
+       wl_egl_surface->last_enq_buffer = NULL;
+
+       wl_egl_surface->wl_surface = NULL;
+       wl_egl_surface->wl_egl_display = NULL;
+       wl_egl_surface->tpl_surface = NULL;
+
+       tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
+       tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+       tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex);
+
+       tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+       tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+       tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex);
+
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+       tpl_gmutex_clear(&wl_egl_surface->surf_mutex);
+       tpl_gcond_clear(&wl_egl_surface->surf_cond);
+
+       TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface);
+
+       free(wl_egl_surface);
+       surface->backend.data = NULL;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
+                                                                                        tpl_bool_t set)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
+
+       TPL_INFO("[SET_PREROTATION_CAPABILITY]",
+                        "wl_egl_surface(%p) prerotation capability set to [%s]",
+                        wl_egl_surface, (set ? "TRUE" : "FALSE"));
+
+       wl_egl_surface->prerotation_capability = set;
+       return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
+                                                                          int post_interval)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
+
+       wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
+
+       TPL_INFO("[SET_POST_INTERVAL]",
+                        "wl_egl_surface(%p) post_interval(%d -> %d)",
+                        wl_egl_surface, wl_egl_surface->post_interval, post_interval);
+
+       wl_egl_surface->post_interval = post_interval;
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_bool_t
+__tpl_wl_egl_surface_validate(tpl_surface_t *surface)
+{
+       tpl_bool_t retval = TPL_TRUE;
+
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->backend.data);
+
+       tpl_wl_egl_surface_t *wl_egl_surface =
+               (tpl_wl_egl_surface_t *)surface->backend.data;
+
+       retval = !(wl_egl_surface->reset);
+
+       return retval;
+}
+
+static void
+__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface =
+               (tpl_wl_egl_surface_t *)surface->backend.data;
+
+       if (width)
+               *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
+       if (height)
+               *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
+}
+
+#define CAN_DEQUEUE_TIMEOUT_MS 10000
+
+tpl_result_t
+_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       _print_buffer_lists(wl_egl_surface);
+
+       if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue))
+               != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
+                               wl_egl_surface->tbm_queue, tsq_err);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       {
+               int i;
+               tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+               for (i = 0; i < BUFFER_ARRAY_SIZE; i++) {
+                       buffer_status_t status;
+                       tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+                       wl_egl_buffer = wl_egl_surface->buffers[i];
+                       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+                       if (wl_egl_buffer) {
+                               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+                               status = wl_egl_buffer->status;
+                               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+                       } else {
+                               continue;
+                       }
+
+                       if (status > ENQUEUED && status <= COMMITTED) {
+                               tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+                                                                                                       wl_egl_buffer->tbm_surface);
+                               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                                       TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+                                                       wl_egl_buffer->tbm_surface, tsq_err);
+                               tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
+                       }
+               }
+       }
+
+       TPL_INFO("[FORCE_FLUSH]",
+                        "wl_egl_surface(%p) tbm_queue(%p)",
+                        wl_egl_surface, wl_egl_surface->tbm_queue);
+
+       _print_buffer_lists(wl_egl_surface);
+
+       return TPL_ERROR_NONE;
+}
+
+static void
+_wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer,
+                                       tpl_wl_egl_surface_t *wl_egl_surface)
+{
+       struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
+       struct tizen_private *tizen_private =
+               (struct tizen_private *)wl_egl_window->driver_private;
+
+       TPL_ASSERT(tizen_private);
+
+       wl_egl_buffer->draw_done                = TPL_FALSE;
+       wl_egl_buffer->need_to_commit           = TPL_TRUE;
+#if TIZEN_FEATURE_ENABLE
+       wl_egl_buffer->buffer_release           = NULL;
+#endif
+       wl_egl_buffer->transform                = tizen_private->transform;
+
+       if (wl_egl_buffer->w_transform != tizen_private->window_transform) {
+               wl_egl_buffer->w_transform          = tizen_private->window_transform;
+               wl_egl_buffer->w_rotated            = TPL_TRUE;
+       }
+
+       if (wl_egl_surface->set_serial_is_used) {
+               wl_egl_buffer->serial               = wl_egl_surface->serial;
+       } else {
+               wl_egl_buffer->serial               = ++tizen_private->serial;
+       }
+
+       if (wl_egl_buffer->rects) {
+               free(wl_egl_buffer->rects);
+               wl_egl_buffer->rects                = NULL;
+               wl_egl_buffer->num_rects            = 0;
+       }
+}
+
+static tpl_wl_egl_buffer_t *
+_get_wl_egl_buffer(tbm_surface_h tbm_surface)
+{
+       tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+       tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
+                                                                          (void **)&wl_egl_buffer);
+       return wl_egl_buffer;
+}
+
+static tpl_wl_egl_buffer_t *
+_wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
+                                         tbm_surface_h tbm_surface)
+{
+       tpl_wl_egl_buffer_t  *wl_egl_buffer  = NULL;
+       struct wl_egl_window *wl_egl_window  = wl_egl_surface->wl_egl_window;
+
+       wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+
+       if (!wl_egl_buffer) {
+               wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t));
+               TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL);
+
+               tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
+                                                                                  (tbm_data_free)__cb_wl_egl_buffer_free);
+               tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
+                                                                                  wl_egl_buffer);
+
+               wl_egl_buffer->wl_buffer                = NULL;
+               wl_egl_buffer->tbm_surface              = tbm_surface;
+               wl_egl_buffer->bo_name                  = _get_tbm_surface_bo_name(tbm_surface);
+               wl_egl_buffer->wl_egl_surface           = wl_egl_surface;
+
+               wl_egl_buffer->status                   = RELEASED;
+
+               wl_egl_buffer->acquire_fence_fd         = -1;
+               wl_egl_buffer->commit_sync_fd           = -1;
+               wl_egl_buffer->presentation_sync_fd     = -1;
+               wl_egl_buffer->release_fence_fd         = -1;
+
+               wl_egl_buffer->dx                       = wl_egl_window->dx;
+               wl_egl_buffer->dy                       = wl_egl_window->dy;
+               wl_egl_buffer->width                    = tbm_surface_get_width(tbm_surface);
+               wl_egl_buffer->height                   = tbm_surface_get_height(tbm_surface);
+
+               wl_egl_buffer->w_transform              = -1;
+
+               tpl_gmutex_init(&wl_egl_buffer->mutex);
+               tpl_gcond_init(&wl_egl_buffer->cond);
+
+               tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+               {
+                       int i;
+                       for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
+                               if (wl_egl_surface->buffers[i] == NULL) break;
+
+                       /* If this exception is reached,
+                        * it may be a critical memory leak problem. */
+                       if (i == BUFFER_ARRAY_SIZE) {
+                               tpl_wl_egl_buffer_t *evicted_buffer = NULL;
+                               int evicted_idx = 0; /* evict the frontmost buffer */
+
+                               evicted_buffer = wl_egl_surface->buffers[evicted_idx];
+
+                               TPL_WARN("wl_egl_surface(%p) buffers array is full. evict one.",
+                                                wl_egl_surface);
+                               TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
+                                                evicted_buffer, evicted_buffer->tbm_surface,
+                                                status_to_string[evicted_buffer->status]);
+
+                               /* [TODO] need to think about whether there will be
+                                * better modifications */
+                               wl_egl_surface->buffer_cnt--;
+                               wl_egl_surface->buffers[evicted_idx]      = NULL;
+
+                               i = evicted_idx;
+                       }
+
+                       wl_egl_surface->buffer_cnt++;
+                       wl_egl_surface->buffers[i]          = wl_egl_buffer;
+                       wl_egl_buffer->idx                  = i;
+               }
+               tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
+               TPL_INFO("[WL_EGL_BUFFER_CREATE]",
+                                "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
+                                wl_egl_surface, wl_egl_buffer, tbm_surface,
+                                wl_egl_buffer->bo_name);
+       }
+
+       _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface);
+
+       return wl_egl_buffer;
+}
+
+static tbm_surface_h
+__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
+                                                                       int32_t *release_fence)
+{
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->backend.data);
+       TPL_ASSERT(surface->display);
+       TPL_ASSERT(surface->display->backend.data);
+       TPL_OBJECT_CHECK_RETURN(surface, NULL);
+
+       tpl_wl_egl_surface_t *wl_egl_surface =
+               (tpl_wl_egl_surface_t *)surface->backend.data;
+       tpl_wl_egl_display_t *wl_egl_display =
+               (tpl_wl_egl_display_t *)surface->display->backend.data;
+       tpl_wl_egl_buffer_t *wl_egl_buffer   = NULL;
+
+       tbm_surface_queue_error_e tsq_err    = TBM_SURFACE_QUEUE_ERROR_NONE;
+       tpl_bool_t      is_activated         = 0;
+       int             bo_name              = 0;
+       tbm_surface_h   tbm_surface          = NULL;
+
+       TPL_OBJECT_UNLOCK(surface);
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+       if (wl_egl_surface->reset == TPL_TRUE) {
+               if (_check_buffer_validate(wl_egl_surface, wl_egl_surface->last_enq_buffer) &&
+                       tbm_surface_internal_is_valid(wl_egl_surface->last_enq_buffer)) {
+                       tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer;
+                       tpl_wl_egl_buffer_t *enqueued_buffer =
+                               _get_wl_egl_buffer(last_enq_buffer);
+
+                       if (enqueued_buffer) {
+                               tbm_surface_internal_ref(last_enq_buffer);
+                               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+                               tpl_gmutex_lock(&enqueued_buffer->mutex);
+                               while (enqueued_buffer->status >= ENQUEUED &&
+                                          enqueued_buffer->status < COMMITTED) {
+                                       tpl_result_t wait_result;
+                                       TPL_INFO("[DEQ_AFTER_RESET]",
+                                                        "waiting for previous wl_egl_buffer(%p) commit",
+                                                        enqueued_buffer);
+
+                                       wait_result = tpl_gcond_timed_wait(&enqueued_buffer->cond,
+                                                                                                         &enqueued_buffer->mutex,
+                                                                                                         200); /* 200ms */
+                                       if (wait_result == TPL_ERROR_TIME_OUT) {
+                                               TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
+                                                                enqueued_buffer);
+                                               break;
+                                       }
+                               }
+                               tpl_gmutex_unlock(&enqueued_buffer->mutex);
+                               tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+                               tbm_surface_internal_unref(last_enq_buffer);
+                       }
+               }
+
+               wl_egl_surface->last_enq_buffer = NULL;
+       }
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+       tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
+                               wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
+       TPL_OBJECT_LOCK(surface);
+
+       /* After the can dequeue state, lock the wl_event_mutex to prevent other
+        * events from being processed in wayland_egl_thread
+        * during below dequeue procedure. */
+       tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
+
+       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
+               TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
+                                wl_egl_surface->tbm_queue, surface);
+               if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
+                       TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
+                                       wl_egl_surface->tbm_queue, surface);
+                       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+                       return NULL;
+               } else {
+                       tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+               }
+       }
+
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
+                               wl_egl_surface->tbm_queue, surface);
+               tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+               return NULL;
+       }
+
+       /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
+        * below function [wayland_tbm_client_queue_check_activate()].
+        * This function has to be called before tbm_surface_queue_dequeue()
+        * in order to know what state the buffer will be dequeued next.
+        *
+        * ACTIVATED state means non-composite mode. Client can get buffers which
+           can be displayed directly(without compositing).
+        * DEACTIVATED state means composite mode. Client's buffer will be displayed
+           by compositor(E20) with compositing.
+        */
+       is_activated = wayland_tbm_client_queue_check_activate(
+                                               wl_egl_display->wl_tbm_client,
+                                               wl_egl_surface->tbm_queue);
+
+       wl_egl_surface->is_activated = is_activated;
+
+       surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
+       surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
+       wl_egl_surface->width = surface->width;
+       wl_egl_surface->height = surface->height;
+
+       if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
+               /* If surface->frontbuffer is already set in frontbuffer mode,
+                * it will return that frontbuffer if it is still activated,
+                * otherwise dequeue the new buffer after initializing
+                * surface->frontbuffer to NULL. */
+               if (is_activated && !wl_egl_surface->reset) {
+                       bo_name = _get_tbm_surface_bo_name(surface->frontbuffer);
+
+                       TPL_LOG_T("WL_EGL",
+                                         "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
+                                         surface->frontbuffer, bo_name);
+                       TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer,
+                                                         "[DEQ]~[ENQ] BO_NAME:%d",
+                                                         bo_name);
+                       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+                       return surface->frontbuffer;
+               } else {
+                       surface->frontbuffer = NULL;
+                       wl_egl_surface->need_to_enqueue = TPL_TRUE;
+               }
+       } else {
+               surface->frontbuffer = NULL;
+       }
+
+       tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue,
+                                                                               &tbm_surface);
+       if (!tbm_surface) {
+               TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d",
+                               wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err);
+               tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+               return NULL;
+       }
+
+       tbm_surface_internal_ref(tbm_surface);
+
+       wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
+       TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
+
+       tpl_gmutex_lock(&wl_egl_buffer->mutex);
+       wl_egl_buffer->status = DEQUEUED;
+
+       /* If wl_egl_buffer->release_fence_fd is -1,
+        * the tbm_surface can be used immediately.
+        * If not, user(EGL) have to wait until signaled. */
+       if (release_fence) {
+#if TIZEN_FEATURE_ENABLE
+               if (wl_egl_display->use_explicit_sync) {
+                       *release_fence = wl_egl_buffer->release_fence_fd;
+                       TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)",
+                                         wl_egl_surface, wl_egl_buffer, *release_fence);
+
+                       wl_egl_buffer->release_fence_fd = -1;
+               } else
+#endif
+               {
+                       *release_fence = -1;
+               }
+       }
+
+       if (surface->is_frontbuffer_mode && is_activated)
+               surface->frontbuffer = tbm_surface;
+
+       wl_egl_surface->reset = TPL_FALSE;
+
+       TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name);
+       TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d",
+                                         wl_egl_buffer->bo_name);
+       TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+                         wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name,
+                         release_fence ? *release_fence : -1);
+
+       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+       tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+
+       return tbm_surface;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_cancel_buffer(tpl_surface_t *surface,
+                                                                  tbm_surface_h tbm_surface)
+{
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->backend.data);
+
+       tpl_wl_egl_surface_t *wl_egl_surface    =
+               (tpl_wl_egl_surface_t *)surface->backend.data;
+       tpl_wl_egl_buffer_t *wl_egl_buffer      = NULL;
+       tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       if (!tbm_surface_internal_is_valid(tbm_surface)) {
+               TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+       if (wl_egl_buffer) {
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+               wl_egl_buffer->status = RELEASED;
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+       }
+
+       tbm_surface_internal_unref(tbm_surface);
+
+       tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
+                                                                                          tbm_surface);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
+                               tbm_surface, surface);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       TPL_INFO("[CANCEL_BUFFER]", "wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
+                         wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
+               tbm_surface_h tbm_surface,
+               int num_rects, const int *rects, int32_t acquire_fence)
+{
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->display);
+       TPL_ASSERT(surface->backend.data);
+       TPL_ASSERT(tbm_surface);
+       TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
+
+       tpl_wl_egl_surface_t *wl_egl_surface    =
+               (tpl_wl_egl_surface_t *) surface->backend.data;
+       tpl_wl_egl_buffer_t *wl_egl_buffer      = NULL;
+       tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
+       int bo_name                             = -1;
+
+       if (!tbm_surface_internal_is_valid(tbm_surface)) {
+               TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
+                               tbm_surface);
+               TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+       if (!wl_egl_buffer) {
+               TPL_ERR("Failed to get wl_egl_buffer from tbm_surface(%p)", tbm_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       bo_name = _get_tbm_surface_bo_name(tbm_surface);
+
+       TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
+
+       tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
+       /* If there are received region information, save it to wl_egl_buffer */
+       if (num_rects && rects) {
+               if (wl_egl_buffer->rects != NULL) {
+                       free(wl_egl_buffer->rects);
+                       wl_egl_buffer->rects = NULL;
+                       wl_egl_buffer->num_rects = 0;
+               }
+
+               wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
+               wl_egl_buffer->num_rects = num_rects;
+
+               if (!wl_egl_buffer->rects) {
+                       TPL_ERR("Failed to allocate memory fo damage rects info.");
+                       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+                       return TPL_ERROR_OUT_OF_MEMORY;
+               }
+
+               memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects);
+       }
+
+       if (!wl_egl_surface->need_to_enqueue ||
+               !wl_egl_buffer->need_to_commit) {
+               TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
+                                ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
+               TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+               return TPL_ERROR_NONE;
+       }
+
+       /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
+        * commit if surface->frontbuffer that is already set and the tbm_surface
+        * client want to enqueue are the same.
+        */
+       if (surface->is_frontbuffer_mode) {
+               /* The first buffer to be activated in frontbuffer mode must be
+                * committed. Subsequence frames do not need to be committed because
+                * the buffer is already displayed.
+                */
+               if (surface->frontbuffer == tbm_surface)
+                       wl_egl_surface->need_to_enqueue = TPL_FALSE;
+
+               if (acquire_fence != -1) {
+                       close(acquire_fence);
+                       acquire_fence = -1;
+               }
+       }
+
+       if (wl_egl_buffer->acquire_fence_fd != -1)
+               close(wl_egl_buffer->acquire_fence_fd);
+
+       wl_egl_buffer->acquire_fence_fd = acquire_fence;
+
+       tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+       if (wl_egl_surface->presentation_sync.fd != -1) {
+               wl_egl_buffer->presentation_sync_fd  = wl_egl_surface->presentation_sync.fd;
+               wl_egl_surface->presentation_sync.fd = -1;
+       }
+       tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+
+       tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
+       if (wl_egl_surface->commit_sync.fd != -1) {
+               wl_egl_buffer->commit_sync_fd  = wl_egl_surface->commit_sync.fd;
+               wl_egl_surface->commit_sync.fd = -1;
+               TRACE_ASYNC_BEGIN(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
+                                                 _get_tbm_surface_bo_name(tbm_surface));
+       }
+       tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+
+       wl_egl_buffer->status = ENQUEUED;
+       TPL_LOG_T("WL_EGL",
+                         "[ENQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+                         wl_egl_buffer, tbm_surface, bo_name, acquire_fence);
+
+       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+       tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
+                                                                               tbm_surface);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               tbm_surface_internal_unref(tbm_surface);
+               TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d",
+                               tbm_surface, wl_egl_surface, tsq_err);
+               TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+       wl_egl_surface->last_enq_buffer = tbm_surface;
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+       tbm_surface_internal_unref(tbm_surface);
+
+       TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_bool_t
+__thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message)
+{
+       tpl_wl_egl_buffer_t *wl_egl_buffer      =
+               (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
+       tpl_wl_egl_surface_t *wl_egl_surface    = wl_egl_buffer->wl_egl_surface;
+       tbm_surface_h tbm_surface               = wl_egl_buffer->tbm_surface;
+
+       wl_egl_surface->render_done_cnt++;
+
+       TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
+                                       wl_egl_buffer->acquire_fence_fd);
+
+       TPL_DEBUG("[RENDER DONE] wl_egl_buffer(%p) tbm_surface(%p)",
+                         wl_egl_buffer, tbm_surface);
+
+       tpl_gmutex_lock(&wl_egl_buffer->mutex);
+       wl_egl_buffer->status = WAITING_VBLANK;
+
+       TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
+                         wl_egl_buffer, wl_egl_buffer->waiting_source,
+                         wl_egl_buffer->acquire_fence_fd);
+
+       close(wl_egl_buffer->acquire_fence_fd);
+       wl_egl_buffer->acquire_fence_fd = -1;
+       wl_egl_buffer->waiting_source = NULL;
+
+       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+
+       if (wl_egl_surface->vblank == NULL || wl_egl_surface->vblank_done)
+               _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
+       else {
+               tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+               __tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers,
+                                                        wl_egl_buffer);
+               tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+       }
+
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+       return TPL_FALSE;
+}
+
+static void
+__thread_func_waiting_source_finalize(tpl_gsource *gsource)
+{
+       TPL_IGNORE(gsource);
+}
+
+static tpl_gsource_functions buffer_funcs = {
+       .prepare = NULL,
+       .check = NULL,
+       .dispatch = __thread_func_waiting_source_dispatch,
+       .finalize = __thread_func_waiting_source_finalize,
+};
 
-       retval = !(wayland_egl_surface->reset);
+static tpl_result_t
+_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+       tbm_surface_h tbm_surface            = NULL;
+       tbm_surface_queue_error_e tsq_err    = TBM_SURFACE_QUEUE_ERROR_NONE;
+       tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+       tpl_wl_egl_buffer_t *wl_egl_buffer   = NULL;
+       tpl_bool_t ready_to_commit           = TPL_FALSE;
+
+       while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) {
+               tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue,
+                                                                                       &tbm_surface);
+               if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+                       TPL_ERR("Failed to acquire from tbm_queue(%p)",
+                                       wl_egl_surface->tbm_queue);
+                       return TPL_ERROR_INVALID_OPERATION;
+               }
 
-       return retval;
+               tbm_surface_internal_ref(tbm_surface);
+
+               wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+               TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
+                                                                          "wl_egl_buffer sould be not NULL");
+
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
+               wl_egl_buffer->status = ACQUIRED;
+
+               TPL_LOG_T("WL_EGL", "[ACQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
+                                 wl_egl_buffer, tbm_surface,
+                                 _get_tbm_surface_bo_name(tbm_surface));
+
+               if (wl_egl_buffer->acquire_fence_fd != -1) {
+#if TIZEN_FEATURE_ENABLE
+                       if (wl_egl_display->use_explicit_sync)
+                               ready_to_commit = TPL_TRUE;
+                       else
+#endif
+                       {
+                               if (wl_egl_buffer->waiting_source) {
+                                       tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
+                                       wl_egl_buffer->waiting_source = NULL;
+                               }
+
+                               wl_egl_buffer->waiting_source =
+                                       tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
+                                                                          wl_egl_buffer->acquire_fence_fd, &buffer_funcs,
+                                                                          SOURCE_TYPE_DISPOSABLE);
+                               wl_egl_buffer->status = WAITING_SIGNALED;
+
+                               TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
+                                                                 wl_egl_buffer->acquire_fence_fd);
+
+                               ready_to_commit = TPL_FALSE;
+                       }
+               } else {
+                       ready_to_commit = TPL_TRUE;
+               }
+
+               if (ready_to_commit) {
+                       if (wl_egl_surface->vblank == NULL || wl_egl_surface->vblank_done)
+                               ready_to_commit = TPL_TRUE;
+                       else {
+                               wl_egl_buffer->status = WAITING_VBLANK;
+                               tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+                               __tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers, wl_egl_buffer);
+                               tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+                               ready_to_commit = TPL_FALSE;
+                       }
+               }
+
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+               if (ready_to_commit)
+                       _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
+       }
+
+       return TPL_ERROR_NONE;
 }
 
-static tpl_result_t
-__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
-                                                                                       tbm_surface_h tbm_surface)
+/* -- BEGIN -- tdm_client vblank callback function */
+static void
+__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
+                                          unsigned int sequence, unsigned int tv_sec,
+                                          unsigned int tv_usec, void *user_data)
 {
-       tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
-       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+       tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data;
+       tpl_wl_egl_buffer_t *wl_egl_buffer   = NULL;
+
+       TRACE_ASYNC_END((intptr_t)wl_egl_surface, "WAIT_VBLANK");
+       TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface);
+
+       if (error == TDM_ERROR_TIMEOUT)
+               TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)",
+                                wl_egl_surface);
+
+       wl_egl_surface->vblank_done = TPL_TRUE;
+
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+       if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
+               tpl_bool_t is_empty = TPL_TRUE;
+               do {
+                       tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+                       wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(
+                                                               wl_egl_surface->vblank->waiting_buffers,
+                                                               NULL);
+                       is_empty = __tpl_list_is_empty(wl_egl_surface->vblank->waiting_buffers);
+                       tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+
+                       if (!wl_egl_buffer) break;
+
+                       _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
+
+                       /* If tdm error such as TIMEOUT occured,
+                        * flush all vblank waiting buffers of its wl_egl_surface.
+                        * Otherwise, only one wl_egl_buffer will be commited per one vblank event.
+                        */
+                       if (error == TDM_ERROR_NONE) break;
+               } while (!is_empty);
+       }
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+}
+/* -- END -- tdm_client vblank callback function */
 
-       wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
-       if (!wayland_egl_surface) {
-               TPL_ERR("Invalid backend surface. surface(%p) wayland_egl_surface(%p)",
-                               surface, wayland_egl_surface);
-               return TPL_ERROR_INVALID_PARAMETER;
+#if TIZEN_FEATURE_ENABLE
+static void
+__cb_buffer_fenced_release(void *data,
+                               struct zwp_linux_buffer_release_v1 *release, int32_t fence)
+{
+       tpl_wl_egl_buffer_t *wl_egl_buffer  = (tpl_wl_egl_buffer_t *)data;
+       tbm_surface_h tbm_surface           = NULL;
+
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
+
+       tbm_surface = wl_egl_buffer->tbm_surface;
+
+       if (tbm_surface_internal_is_valid(tbm_surface)) {
+               tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+               if (wl_egl_buffer->status == COMMITTED) {
+                       tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+
+                       zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
+                       wl_egl_buffer->buffer_release = NULL;
+
+                       wl_egl_buffer->release_fence_fd = fence;
+                       wl_egl_buffer->status = RELEASED;
+
+                       TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
+                                          _get_tbm_surface_bo_name(tbm_surface),
+                                          fence);
+                       TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+                                                       _get_tbm_surface_bo_name(tbm_surface));
+
+                       TPL_LOG_T("WL_EGL",
+                                         "[FENCED_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+                                         wl_egl_buffer, tbm_surface,
+                                         _get_tbm_surface_bo_name(tbm_surface),
+                                         fence);
+
+                       tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+                                                                                               tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+               }
+
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+               if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
+                       tbm_surface_internal_unref(tbm_surface);
+
+       } else {
+               TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
        }
+}
 
-       if (!tbm_surface_internal_is_valid(tbm_surface)) {
-               TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
-               return TPL_ERROR_INVALID_PARAMETER;
+static void
+__cb_buffer_immediate_release(void *data,
+                                                         struct zwp_linux_buffer_release_v1 *release)
+{
+       tpl_wl_egl_buffer_t *wl_egl_buffer  = (tpl_wl_egl_buffer_t *)data;
+       tbm_surface_h tbm_surface           = NULL;
+
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
+
+       tbm_surface = wl_egl_buffer->tbm_surface;
+
+       if (tbm_surface_internal_is_valid(tbm_surface)) {
+               tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+               if (wl_egl_buffer->status == COMMITTED) {
+                       tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+
+                       zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
+                       wl_egl_buffer->buffer_release = NULL;
+
+                       wl_egl_buffer->release_fence_fd = -1;
+                       wl_egl_buffer->status = RELEASED;
+
+                       TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
+                                          _get_tbm_surface_bo_name(tbm_surface));
+                       TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+                                                       _get_tbm_surface_bo_name(tbm_surface));
+
+                       TPL_LOG_T("WL_EGL",
+                                         "[IMMEDIATE_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
+                                         wl_egl_buffer, tbm_surface,
+                                         _get_tbm_surface_bo_name(tbm_surface));
+
+                       tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+                                                                                               tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+               }
+
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+               if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
+                       tbm_surface_internal_unref(tbm_surface);
+
+       } else {
+               TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+       }
+}
+
+static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
+       __cb_buffer_fenced_release,
+       __cb_buffer_immediate_release,
+};
+#endif
+
+static void
+__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
+{
+       tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+       tbm_surface_h tbm_surface = NULL;
+
+       TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer)
+
+       tbm_surface = wl_egl_buffer->tbm_surface;
+
+       if (tbm_surface_internal_is_valid(tbm_surface)) {
+               tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+               tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+
+               tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
+               if (wl_egl_buffer->status == COMMITTED) {
+
+                       tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+                                                                                               tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+                       wl_egl_buffer->status = RELEASED;
+
+                       TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
+                       TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+                                                       _get_tbm_surface_bo_name(tbm_surface));
+
+                       TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+                                         wl_egl_buffer->wl_buffer, tbm_surface,
+                                         _get_tbm_surface_bo_name(tbm_surface));
+               }
+
+               tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+               if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
+                       tbm_surface_internal_unref(tbm_surface);
+       } else {
+               TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
        }
+}
 
-       tbm_surface_internal_unref(tbm_surface);
+static const struct wl_buffer_listener wl_buffer_release_listener = {
+       (void *)__cb_wl_buffer_release,
+};
+#if TIZEN_FEATURE_ENABLE
+static void
+__cb_presentation_feedback_sync_output(void *data,
+                       struct wp_presentation_feedback *presentation_feedback,
+                       struct wl_output *output)
+{
+       TPL_IGNORE(data);
+       TPL_IGNORE(presentation_feedback);
+       TPL_IGNORE(output);
+       /* Nothing to do */
+}
 
-       tsq_err = tbm_surface_queue_cancel_dequeue(wayland_egl_surface->tbm_queue,
-                                                                                          tbm_surface);
-       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
-               TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface);
-               return TPL_ERROR_INVALID_OPERATION;
+static void
+__cb_presentation_feedback_presented(void *data,
+                       struct wp_presentation_feedback *presentation_feedback,
+                       uint32_t tv_sec_hi,
+                       uint32_t tv_sec_lo,
+                       uint32_t tv_nsec,
+                       uint32_t refresh_nsec,
+                       uint32_t seq_hi,
+                       uint32_t seq_lo,
+                       uint32_t flags)
+{
+       TPL_IGNORE(tv_sec_hi);
+       TPL_IGNORE(tv_sec_lo);
+       TPL_IGNORE(tv_nsec);
+       TPL_IGNORE(refresh_nsec);
+       TPL_IGNORE(seq_hi);
+       TPL_IGNORE(seq_lo);
+       TPL_IGNORE(flags);
+
+       struct pst_feedback *pst_feedback       = (struct pst_feedback *)data;
+       tpl_wl_egl_surface_t *wl_egl_surface    = pst_feedback->wl_egl_surface;
+
+       tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+
+       TPL_DEBUG("[FEEDBACK][PRESENTED] pst_feedback(%p) presentation_feedback(%p) bo(%d)",
+                         pst_feedback, presentation_feedback, pst_feedback->bo_name);
+
+       if (pst_feedback->pst_sync_fd != -1) {
+               int ret = _write_to_eventfd(pst_feedback->pst_sync_fd);
+               if (ret == -1) {
+                       TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+                                       pst_feedback->pst_sync_fd);
+               }
+
+               TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
+                                               "[PRESENTATION_SYNC] bo(%d)",
+                                               pst_feedback->bo_name);
+
+               close(pst_feedback->pst_sync_fd);
+               pst_feedback->pst_sync_fd = -1;
        }
 
-       TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
-                         surface, tbm_surface);
+       wp_presentation_feedback_destroy(presentation_feedback);
 
-       return TPL_ERROR_NONE;
+       pst_feedback->presentation_feedback = NULL;
+       pst_feedback->wl_egl_surface        = NULL;
+       pst_feedback->bo_name               = 0;
+
+       __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
+                                                  TPL_FIRST, NULL);
+
+       free(pst_feedback);
+
+       tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
 }
 
-#define CAN_DEQUEUE_TIMEOUT_MS 5000
+static void
+__cb_presentation_feedback_discarded(void *data,
+                       struct wp_presentation_feedback *presentation_feedback)
+{
+       struct pst_feedback *pst_feedback       = (struct pst_feedback *)data;
+       tpl_wl_egl_surface_t *wl_egl_surface    = pst_feedback->wl_egl_surface;
+
+       tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
 
-static tbm_surface_h
-__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
-                                                                                tbm_fd *sync_fence)
+       TPL_DEBUG("[FEEDBACK][DISCARDED] pst_feedback(%p) presentation_feedback(%p) bo(%d)",
+                         pst_feedback, presentation_feedback, pst_feedback->bo_name);
+
+       if (pst_feedback->pst_sync_fd != -1) {
+               int ret = _write_to_eventfd(pst_feedback->pst_sync_fd);
+               if (ret == -1) {
+                       TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+                                       pst_feedback->pst_sync_fd);
+               }
+
+               TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
+                                               "[PRESENTATION_SYNC] bo(%d)",
+                                               pst_feedback->bo_name);
+
+               close(pst_feedback->pst_sync_fd);
+               pst_feedback->pst_sync_fd = -1;
+       }
+
+       wp_presentation_feedback_destroy(presentation_feedback);
+
+       pst_feedback->presentation_feedback = NULL;
+       pst_feedback->wl_egl_surface        = NULL;
+       pst_feedback->bo_name               = 0;
+
+       __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
+                                                  TPL_FIRST, NULL);
+
+       free(pst_feedback);
+
+       tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+}
+
+static const struct wp_presentation_feedback_listener feedback_listener = {
+       __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
+       __cb_presentation_feedback_presented,
+       __cb_presentation_feedback_discarded
+};
+#endif
+
+static tpl_result_t
+_thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface)
 {
-       TPL_ASSERT(surface);
-       TPL_ASSERT(surface->backend.data);
-       TPL_ASSERT(surface->display);
-       TPL_ASSERT(surface->display->backend.data);
-       TPL_OBJECT_CHECK_RETURN(surface, NULL);
+       tdm_error tdm_err                       = TDM_ERROR_NONE;
+       tpl_surface_vblank_t *vblank            = wl_egl_surface->vblank;
 
-       tbm_surface_h tbm_surface = NULL;
-       tpl_wayland_egl_surface_t *wayland_egl_surface =
-               (tpl_wayland_egl_surface_t *)surface->backend.data;
-       tpl_wayland_egl_display_t *wayland_egl_display =
-               (tpl_wayland_egl_display_t *)surface->display->backend.data;
-       tbm_surface_queue_error_e tsq_err = 0;
-       int is_activated = 0;
-       int bo_name = 0;
-       tpl_result_t lock_ret = TPL_FALSE;
-
-       if (sync_fence)
-               *sync_fence = -1;
+       tdm_err = tdm_client_vblank_wait(vblank->tdm_vblank,
+                       wl_egl_surface->post_interval,
+                       __cb_tdm_client_vblank,
+                       (void *)wl_egl_surface);
 
-       TPL_OBJECT_UNLOCK(surface);
-       tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
-                               wayland_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
-       TPL_OBJECT_LOCK(surface);
+       if (tdm_err == TDM_ERROR_NONE) {
+               wl_egl_surface->vblank_done = TPL_FALSE;
+               TRACE_ASYNC_BEGIN((intptr_t)wl_egl_surface, "WAIT_VBLANK");
+       } else {
+               TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
 
-       /* After the can dequeue state, call twe_display_lock to prevent other
-        * events from being processed in wayland_egl_thread
-        * during below dequeue procedure. */
-       lock_ret = twe_display_lock(wayland_egl_display->twe_display);
+       return TPL_ERROR_NONE;
+}
 
-       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
-               TPL_ERR("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset",
-                               wayland_egl_surface->tbm_queue);
-               if (twe_surface_queue_force_flush(wayland_egl_surface->twe_surface)
-                       != TPL_ERROR_NONE) {
-                       TPL_ERR("Failed to timeout reset. tbm_queue(%p)", wayland_egl_surface->tbm_queue);
-                       if (lock_ret == TPL_ERROR_NONE)
-                               twe_display_unlock(wayland_egl_display->twe_display);
-                       return NULL;
+static void
+_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
+                                                 tpl_wl_egl_buffer_t *wl_egl_buffer)
+{
+       tpl_wl_egl_display_t *wl_egl_display    = wl_egl_surface->wl_egl_display;
+       struct wl_surface *wl_surface           = wl_egl_surface->wl_surface;
+       struct wl_egl_window *wl_egl_window     = wl_egl_surface->wl_egl_window;
+       uint32_t version;
+
+       TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
+                                                                  "wl_egl_buffer sould be not NULL");
+
+       if (wl_egl_buffer->wl_buffer == NULL) {
+               wl_egl_buffer->wl_buffer =
+                       (struct wl_proxy *)wayland_tbm_client_create_buffer(
+                                               wl_egl_display->wl_tbm_client,
+                                               wl_egl_buffer->tbm_surface);
+
+               TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
+                                                                          "[FATAL] Failed to create wl_buffer");
+
+               TPL_INFO("[WL_BUFFER_CREATE]",
+                                "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
+                                wl_egl_buffer, wl_egl_buffer->wl_buffer,
+                                wl_egl_buffer->tbm_surface);
+
+#if TIZEN_FEATURE_ENABLE
+               if (!wl_egl_display->use_explicit_sync ||
+                       wl_egl_buffer->acquire_fence_fd == -1)
+#endif
+               {
+                       wl_buffer_add_listener((struct wl_buffer *)wl_egl_buffer->wl_buffer,
+                                                                  &wl_buffer_release_listener,
+                                                                  wl_egl_buffer);
+               }
+       }
+
+       version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
+
+#if TIZEN_FEATURE_ENABLE
+       /* create presentation feedback and add listener */
+       tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+       if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) {
+
+               struct pst_feedback *pst_feedback = NULL;
+               pst_feedback = (struct pst_feedback *) calloc(1, sizeof(struct pst_feedback));
+               if (pst_feedback) {
+                       pst_feedback->presentation_feedback =
+                               wp_presentation_feedback(wl_egl_display->presentation,
+                                                                                wl_surface);
+
+                       pst_feedback->wl_egl_surface        = wl_egl_surface;
+                       pst_feedback->bo_name               = wl_egl_buffer->bo_name;
+
+                       pst_feedback->pst_sync_fd           = wl_egl_buffer->presentation_sync_fd;
+                       wl_egl_buffer->presentation_sync_fd = -1;
+
+                       wp_presentation_feedback_add_listener(pst_feedback->presentation_feedback,
+                                                                                                 &feedback_listener, pst_feedback);
+                       __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, pst_feedback);
+                       TRACE_ASYNC_BEGIN(pst_feedback->pst_sync_fd,
+                                                         "[PRESENTATION_SYNC] bo(%d)",
+                                                         pst_feedback->bo_name);
                } else {
-                       tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+                       TPL_ERR("Failed to create presentation feedback. wl_egl_buffer(%p)",
+                                       wl_egl_buffer);
+                       _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
+                       close(wl_egl_buffer->presentation_sync_fd);
+                       wl_egl_buffer->presentation_sync_fd = -1;
+               }
+       }
+       tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+#endif
+
+       if (wl_egl_buffer->w_rotated == TPL_TRUE) {
+               if (version > 1) {
+                       wayland_tbm_client_set_buffer_transform(
+                                       wl_egl_display->wl_tbm_client,
+                                       (void *)wl_egl_buffer->wl_buffer,
+                                       wl_egl_buffer->w_transform);
+                       TPL_INFO("[W_TRANSFORM]",
+                                        "wl_egl_surface(%p) wl_egl_buffer(%p) w_transform(%d)",
+                                        wl_egl_surface, wl_egl_buffer, wl_egl_buffer->w_transform);
                }
+               wl_egl_buffer->w_rotated = TPL_FALSE;
        }
 
-       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
-               TPL_ERR("Failed to query can_dequeue. tbm_queue(%p)", wayland_egl_surface->tbm_queue);
-               if (lock_ret == TPL_ERROR_NONE)
-                       twe_display_unlock(wayland_egl_display->twe_display);
-               return NULL;
+       if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) {
+               if (version > 1) {
+                       wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
+                       TPL_INFO("[TRANSFORM]",
+                                        "wl_egl_surface(%p) wl_egl_buffer(%p) transform(%d -> %d)",
+                                        wl_egl_surface, wl_egl_buffer,
+                                        wl_egl_surface->latest_transform, wl_egl_buffer->transform);
+               }
+               wl_egl_surface->latest_transform = wl_egl_buffer->transform;
        }
 
-       /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
-        * below function [wayland_tbm_client_queue_check_activate()].
-        * This function has to be called before tbm_surface_queue_dequeue()
-        * in order to know what state the buffer will be dequeued next.
-        *
-        * ACTIVATED state means non-composite mode. Client can get buffers which
-           can be displayed directly(without compositing).
-        * DEACTIVATED state means composite mode. Client's buffer will be displayed
-           by compositor(E20) with compositing.
-        */
-       is_activated = twe_surface_check_activated(wayland_egl_surface->twe_surface);
-       wayland_egl_surface->is_activated = is_activated;
+       if (wl_egl_window) {
+               wl_egl_window->attached_width = wl_egl_buffer->width;
+               wl_egl_window->attached_height = wl_egl_buffer->height;
+       }
 
-       surface->width = tbm_surface_queue_get_width(wayland_egl_surface->tbm_queue);
-       surface->height = tbm_surface_queue_get_height(wayland_egl_surface->tbm_queue);
+       wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer,
+                                         wl_egl_buffer->dx, wl_egl_buffer->dy);
 
-       if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
-               /* If surface->frontbuffer is already set in frontbuffer mode,
-                * it will return that frontbuffer if it is still activated,
-                * otherwise dequeue the new buffer after initializing
-                * surface->frontbuffer to NULL. */
-               if (is_activated) {
-                       TPL_LOG_T("WL_EGL",
-                                         "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
-                                         surface->frontbuffer,
-                                         tbm_bo_export(tbm_surface_internal_get_bo(
-                                                                       surface->frontbuffer, 0)));
-                       TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
-                                                         "[DEQ]~[ENQ] BO_NAME:%d",
-                                                         tbm_bo_export(tbm_surface_internal_get_bo(
-                                                                                               surface->frontbuffer, 0)));
-                       if (lock_ret == TPL_ERROR_NONE)
-                               twe_display_unlock(wayland_egl_display->twe_display);
-                       return surface->frontbuffer;
-               } else
-                       surface->frontbuffer = NULL;
+       if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) {
+               if (version < 4) {
+                       wl_surface_damage(wl_surface,
+                                                         wl_egl_buffer->dx, wl_egl_buffer->dy,
+                                                         wl_egl_buffer->width, wl_egl_buffer->height);
+               } else {
+                       wl_surface_damage_buffer(wl_surface,
+                                                                        0, 0,
+                                                                        wl_egl_buffer->width, wl_egl_buffer->height);
+               }
        } else {
-               surface->frontbuffer = NULL;
+               int i;
+               for (i = 0; i < wl_egl_buffer->num_rects; i++) {
+                       int inverted_y =
+                               wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] +
+                                               wl_egl_buffer->rects[i * 4 + 3]);
+                       if (version < 4) {
+                               wl_surface_damage(wl_surface,
+                                                                 wl_egl_buffer->rects[i * 4 + 0],
+                                                                 inverted_y,
+                                                                 wl_egl_buffer->rects[i * 4 + 2],
+                                                                 wl_egl_buffer->rects[i * 4 + 3]);
+                       } else {
+                               wl_surface_damage_buffer(wl_surface,
+                                                                                wl_egl_buffer->rects[i * 4 + 0],
+                                                                                inverted_y,
+                                                                                wl_egl_buffer->rects[i * 4 + 2],
+                                                                                wl_egl_buffer->rects[i * 4 + 3]);
+                       }
+               }
        }
 
-       tsq_err = tbm_surface_queue_dequeue(wayland_egl_surface->tbm_queue,
-                                                                               &tbm_surface);
-       if (!tbm_surface) {
-               TPL_ERR("Failed to get tbm_surface from tbm_surface_queue | tsq_err = %d",
-                               tsq_err);
-               if (lock_ret == TPL_ERROR_NONE)
-                       twe_display_unlock(wayland_egl_display->twe_display);
-               return NULL;
+       wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client,
+                                               (void *)wl_egl_buffer->wl_buffer,
+                                               wl_egl_buffer->serial);
+#if TIZEN_FEATURE_ENABLE
+       if (wl_egl_display->use_explicit_sync &&
+               wl_egl_buffer->acquire_fence_fd != -1) {
+
+               zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync,
+                                                                                                                          wl_egl_buffer->acquire_fence_fd);
+               TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)",
+                                 wl_egl_surface, wl_egl_buffer->tbm_surface, wl_egl_buffer->acquire_fence_fd);
+               close(wl_egl_buffer->acquire_fence_fd);
+               wl_egl_buffer->acquire_fence_fd = -1;
+
+               wl_egl_buffer->buffer_release =
+                       zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync);
+               if (!wl_egl_buffer->buffer_release) {
+                       TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface);
+               } else {
+                       zwp_linux_buffer_release_v1_add_listener(
+                               wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer);
+                       TPL_DEBUG("add explicit_sync_release_listener.");
+               }
        }
+#endif
 
-       tbm_surface_internal_ref(tbm_surface);
-       bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+       wl_surface_commit(wl_surface);
 
-       if (surface->is_frontbuffer_mode && is_activated)
-               surface->frontbuffer = tbm_surface;
+       wl_display_flush(wl_egl_display->wl_display);
 
-       wayland_egl_surface->reset = TPL_FALSE;
+       TRACE_ASYNC_BEGIN((intptr_t)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+                                         wl_egl_buffer->bo_name);
 
-       TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name);
-       TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
-       TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d)",
-                         tbm_surface, bo_name);
+       tpl_gmutex_lock(&wl_egl_buffer->mutex);
 
-       if (lock_ret == TPL_ERROR_NONE)
-               twe_display_unlock(wayland_egl_display->twe_display);
+       wl_egl_buffer->need_to_commit   = TPL_FALSE;
+       wl_egl_buffer->status           = COMMITTED;
+       if (wl_egl_surface->last_enq_buffer == wl_egl_buffer->tbm_surface)
+               wl_egl_surface->last_enq_buffer = NULL;
 
-       return tbm_surface;
+       tpl_gcond_signal(&wl_egl_buffer->cond);
+
+       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+       TPL_LOG_T("WL_EGL",
+                         "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
+                         wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface,
+                         wl_egl_buffer->bo_name);
+
+       if (wl_egl_surface->vblank != NULL &&
+               _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
+               TPL_ERR("Failed to set wait vblank.");
+
+       tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
+
+       if (wl_egl_buffer->commit_sync_fd != -1) {
+               int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
+               if (ret == -1) {
+                       TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd);
+               }
+
+               TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
+                                               wl_egl_buffer->bo_name);
+               TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)",
+                                 wl_egl_surface, wl_egl_buffer->commit_sync_fd);
+
+               close(wl_egl_buffer->commit_sync_fd);
+               wl_egl_buffer->commit_sync_fd = -1;
+       }
+
+       tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
 }
 
-tpl_bool_t
-__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
+static int
+_write_to_eventfd(int eventfd)
 {
-       if (!native_dpy) return TPL_FALSE;
+       uint64_t value = 1;
+       int ret;
 
-       if (twe_check_native_handle_is_wl_display(native_dpy))
-               return TPL_TRUE;
+       if (eventfd == -1) {
+               TPL_ERR("Invalid fd(-1)");
+               return -1;
+       }
 
-       return TPL_FALSE;
+       ret = write(eventfd, &value, sizeof(uint64_t));
+       if (ret == -1) {
+               TPL_ERR("failed to write to fd(%d)", eventfd);
+               return ret;
+       }
+
+       return ret;
 }
 
 void
@@ -800,10 +3612,157 @@ __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
        backend->fini = __tpl_wl_egl_surface_fini;
        backend->validate = __tpl_wl_egl_surface_validate;
        backend->cancel_dequeued_buffer =
-               __tpl_wl_egl_surface_cancel_dequeued_buffer;
+               __tpl_wl_egl_surface_cancel_buffer;
        backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
        backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
        backend->set_rotation_capability =
                __tpl_wl_egl_surface_set_rotation_capability;
+       backend->set_post_interval =
+               __tpl_wl_egl_surface_set_post_interval;
+       backend->get_size =
+               __tpl_wl_egl_surface_get_size;
+}
+
+static void
+__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
+{
+       tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+       tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+
+       TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
+                        wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);
+
+       tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+       if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) {
+               wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL;
+               wl_egl_surface->buffer_cnt--;
+
+               wl_egl_buffer->idx = -1;
+       }
+       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
+       if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
+               tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+               __tpl_list_remove_data(wl_egl_surface->vblank->waiting_buffers,
+                                                          (void *)wl_egl_buffer,
+                                                          TPL_FIRST,
+                                                          NULL);
+               tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+       }
+
+       if (wl_egl_display) {
+               if (wl_egl_buffer->wl_buffer) {
+                       wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
+                                                                                         (void *)wl_egl_buffer->wl_buffer);
+                       wl_egl_buffer->wl_buffer = NULL;
+               }
+
+               wl_display_flush(wl_egl_display->wl_display);
+       }
+
+       tpl_gmutex_lock(&wl_egl_buffer->mutex);
+#if TIZEN_FEATURE_ENABLE
+       if (wl_egl_buffer->buffer_release) {
+               zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
+               wl_egl_buffer->buffer_release = NULL;
+       }
+
+       if (wl_egl_buffer->release_fence_fd != -1) {
+               close(wl_egl_buffer->release_fence_fd);
+               wl_egl_buffer->release_fence_fd = -1;
+       }
+#endif
+
+       if (wl_egl_buffer->waiting_source) {
+               tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
+               wl_egl_buffer->waiting_source = NULL;
+       }
+
+       if (wl_egl_buffer->commit_sync_fd != -1) {
+               int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
+               if (ret == -1)
+                       TPL_ERR("Failed to send commit_sync signal to fd(%d)",
+                                       wl_egl_buffer->commit_sync_fd);
+               close(wl_egl_buffer->commit_sync_fd);
+               wl_egl_buffer->commit_sync_fd = -1;
+       }
+
+       if (wl_egl_buffer->presentation_sync_fd != -1) {
+               int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
+               if (ret == -1)
+                       TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+                                       wl_egl_buffer->presentation_sync_fd);
+               close(wl_egl_buffer->presentation_sync_fd);
+               wl_egl_buffer->presentation_sync_fd = -1;
+       }
+
+       if (wl_egl_buffer->rects) {
+               free(wl_egl_buffer->rects);
+               wl_egl_buffer->rects = NULL;
+               wl_egl_buffer->num_rects = 0;
+       }
+
+       wl_egl_buffer->tbm_surface = NULL;
+       wl_egl_buffer->bo_name = -1;
+       wl_egl_buffer->status = RELEASED;
+
+       tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+       tpl_gmutex_clear(&wl_egl_buffer->mutex);
+       tpl_gcond_clear(&wl_egl_buffer->cond);
+       free(wl_egl_buffer);
+}
+
+static int
+_get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
+{
+       return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+}
+
+static void
+_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+       int idx = 0;
+
+       tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+       TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)",
+                        wl_egl_surface, wl_egl_surface->buffer_cnt);
+       for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
+               tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
+               if (wl_egl_buffer) {
+                       TPL_INFO("[INFO]",
+                                        "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
+                                        idx, wl_egl_buffer, wl_egl_buffer->tbm_surface,
+                                        wl_egl_buffer->bo_name,
+                                        status_to_string[wl_egl_buffer->status]);
+               }
+       }
+       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
 }
 
+static tpl_bool_t
+_check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface)
+{
+       int idx = 0;
+       tpl_bool_t ret = TPL_FALSE;
+
+       /* silent return */
+       if (!wl_egl_surface || !tbm_surface)
+               return ret;
+
+       tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+       for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
+               tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
+               if (wl_egl_buffer && wl_egl_buffer->tbm_surface == tbm_surface) {
+                       ret = TPL_TRUE;
+                       break;
+               }
+       }
+
+       if (ret == TPL_FALSE || idx == BUFFER_ARRAY_SIZE) {
+               TPL_ERR("tbm_surface(%p) is not owned by wl_egl_surface(%p)",
+                               tbm_surface, wl_egl_surface);
+       }
+       tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
+       return ret;
+}