Add missed initializing to trace tpl_surface.
[platform/core/uifw/libtpl-egl.git] / src / tpl_wl_vk_thread.c
index b1eaf75..40f7e81 100644 (file)
 
 #include "tpl_internal.h"
 
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/eventfd.h>
+
+#include <tbm_bufmgr.h>
 #include <tbm_surface.h>
 #include <tbm_surface_internal.h>
 #include <tbm_surface_queue.h>
 
-#include <tbm_sync.h>
+#include <wayland-client.h>
+#include <wayland-tbm-server.h>
+#include <wayland-tbm-client.h>
+
+#include <tdm_client.h>
+
+#ifndef TIZEN_FEATURE_ENABLE
+#define TIZEN_FEATURE_ENABLE 1
+#endif
+
+#if TIZEN_FEATURE_ENABLE
+#include <tizen-surface-client-protocol.h>
+#include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
+#endif
+
+#include "tpl_utils_gthread.h"
+
+#define BUFFER_ARRAY_SIZE 10
+#define VK_CLIENT_QUEUE_SIZE 3
+
+static int wl_vk_buffer_key;
+#define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
+
+typedef struct _tpl_wl_vk_display       tpl_wl_vk_display_t;
+typedef struct _tpl_wl_vk_surface       tpl_wl_vk_surface_t;
+typedef struct _tpl_wl_vk_swapchain     tpl_wl_vk_swapchain_t;
+typedef struct _tpl_wl_vk_buffer        tpl_wl_vk_buffer_t;
+
+struct _tpl_wl_vk_display {
+       tpl_gsource                  *disp_source;
+       tpl_gthread                  *thread;
+       tpl_gmutex                    wl_event_mutex;
+
+       struct wl_display            *wl_display;
+       struct wl_event_queue        *ev_queue;
+       struct wayland_tbm_client    *wl_tbm_client;
+       int                           last_error; /* errno of the last wl_display error*/
+
+       tpl_bool_t                    wl_initialized;
+       tpl_bool_t                    tdm_initialized;
+
+       tdm_client                   *tdm_client;
+       tpl_gsource                  *tdm_source;
+       int                           tdm_display_fd;
+
+       tpl_bool_t                    use_wait_vblank;
+       tpl_bool_t                    use_explicit_sync;
+       tpl_bool_t                    prepared;
+
+       /* device surface capabilities */
+       int                           min_buffer;
+       int                           max_buffer;
+       int                           present_modes;
+#if TIZEN_FEATURE_ENABLE
+       struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
+#endif
+};
+
+struct _tpl_wl_vk_swapchain {
+       tpl_wl_vk_surface_t          *wl_vk_surface;
 
-#include "tpl_wayland_egl_thread.h"
+       tbm_surface_queue_h           tbm_queue;
 
-typedef struct _tpl_wayland_vk_wsi_display tpl_wayland_vk_wsi_display_t;
-typedef struct _tpl_wayland_vk_wsi_surface tpl_wayland_vk_wsi_surface_t;
-typedef struct _tpl_wayland_vk_wsi_buffer tpl_wayland_vk_wsi_buffer_t;
+       struct {
+               int                       width;
+               int                       height;
+               tbm_format                format;
+               int                       buffer_count;
+               int                       present_mode;
+       } properties;
+
+       tbm_surface_h                *swapchain_buffers;
+
+       tpl_util_atomic_uint          ref_cnt;
+};
+
+typedef enum surf_message {
+       NONE_MESSAGE = 0,
+       INIT_SURFACE,
+       CREATE_QUEUE,
+       DESTROY_QUEUE,
+       ACQUIRABLE,
+} surf_message;
+
+struct _tpl_wl_vk_surface {
+       tpl_gsource                  *surf_source;
+
+       tpl_wl_vk_swapchain_t        *swapchain;
+
+       struct wl_surface            *wl_surface;
+#if TIZEN_FEATURE_ENABLE
+       struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
+#endif
+       tdm_client_vblank            *vblank;
+
+       /* surface information */
+       int                           render_done_cnt;
+
+       tpl_wl_vk_display_t          *wl_vk_display;
+       tpl_surface_t                *tpl_surface;
+
+       /* wl_vk_buffer array for buffer tracing */
+       tpl_wl_vk_buffer_t           *buffers[BUFFER_ARRAY_SIZE];
+       int                           buffer_cnt; /* the number of using wl_vk_buffers */
+       tpl_gmutex                    buffers_mutex;
+
+       tpl_list_t                   *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
+
+       tpl_gmutex                    surf_mutex;
+       tpl_gcond                     surf_cond;
+
+       /* for waiting draw done */
+       tpl_bool_t                    is_activated;
+       tpl_bool_t                    reset; /* TRUE if queue reseted by external  */
+       tpl_bool_t                    vblank_done;
+
+       surf_message                  sent_message;
+
+       int                           post_interval;
+};
 
-struct _tpl_wayland_vk_wsi_display {
-       twe_thread *wl_thread;
-       twe_display_h twe_display;
+typedef enum buffer_status {
+       RELEASED = 0,             // 0
+       DEQUEUED,                 // 1
+       ENQUEUED,                 // 2
+       ACQUIRED,                 // 3
+       WAITING_SIGNALED,         // 4
+       WAITING_VBLANK,           // 5
+       COMMITTED,                // 6
+} buffer_status_t;
+
+static const char *status_to_string[7] = {
+       "RELEASED",                 // 0
+       "DEQUEUED",                 // 1
+       "ENQUEUED",                 // 2
+       "ACQUIRED",                 // 3
+       "WAITING_SIGNALED",         // 4
+       "WAITING_VBLANK",           // 5
+       "COMMITTED",                // 6
 };
 
-struct _tpl_wayland_vk_wsi_surface {
-       twe_surface_h twe_surface;
-       tbm_surface_queue_h tbm_queue;
-       int buffer_count;
-       tpl_bool_t is_activated;
-       tpl_bool_t reset;
+struct _tpl_wl_vk_buffer {
+       tbm_surface_h                 tbm_surface;
+       int                           bo_name;
+
+       struct wl_buffer             *wl_buffer;
+       int                           dx, dy; /* position to attach to wl_surface */
+       int                           width, height; /* size to attach to wl_surface */
+
+       buffer_status_t               status; /* for tracing buffer status */
+       int                           idx; /* position index in buffers array of wl_vk_surface */
+
+       /* for damage region */
+       int                           num_rects;
+       int                          *rects;
+
+       /* for checking need_to_commit (frontbuffer mode) */
+       tpl_bool_t                    need_to_commit;
+
+#if TIZEN_FEATURE_ENABLE
+       /* to get release event via zwp_linux_buffer_release_v1 */
+       struct zwp_linux_buffer_release_v1 *buffer_release;
+#endif
+
+       /* each buffers own its release_fence_fd, until it passes ownership
+        * to it to EGL */
+       int32_t                       release_fence_fd;
+
+       /* each buffers own its acquire_fence_fd.
+        * If it use zwp_linux_buffer_release_v1 the ownership of this fd
+        * will be passed to display server
+        * Otherwise it will be used as a fence waiting for render done
+        * on tpl thread */
+       int32_t                       acquire_fence_fd;
+
+       tpl_gmutex                    mutex;
+       tpl_gcond                     cond;
+
+       tpl_wl_vk_surface_t          *wl_vk_surface;
 };
 
-static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(
-       tpl_surface_t *surface);
+static void
+_print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
+static int
+_get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
+static void
+__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
+static void
+__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
+static tpl_result_t
+_thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
+static void
+_thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
+static tpl_result_t
+_thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
+static void
+_thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
+                                                 tpl_wl_vk_buffer_t *wl_vk_buffer);
 
-static TPL_INLINE tpl_bool_t
-__tpl_wl_vk_wsi_display_is_wl_display(tpl_handle_t native_dpy)
+static tpl_bool_t
+_check_native_handle_is_wl_display(tpl_handle_t native_dpy)
 {
-       if (!native_dpy) return TPL_FALSE;
+       struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
+
+       if (!wl_vk_native_dpy) {
+               TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
+               return TPL_FALSE;
+       }
 
-       if (twe_check_native_handle_is_wl_display(native_dpy))
+       /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
+          is a memory address pointing the structure of wl_display_interface. */
+       if (wl_vk_native_dpy == &wl_display_interface)
                return TPL_TRUE;
 
+       if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
+                               strlen(wl_display_interface.name)) == 0) {
+               return TPL_TRUE;
+       }
+
        return TPL_FALSE;
 }
 
+static tpl_bool_t
+__thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
+{
+       tpl_wl_vk_display_t        *wl_vk_display = NULL;
+       tdm_error                   tdm_err = TDM_ERROR_NONE;
+
+       TPL_IGNORE(message);
+
+       wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
+       if (!wl_vk_display) {
+               TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
+               TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
+               return TPL_FALSE;
+       }
+
+       tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client);
+
+       /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
+        * When tdm_source is no longer available due to an unexpected situation,
+        * wl_vk_thread must remove it from the thread and destroy it.
+        * In that case, tdm_vblank can no longer be used for surfaces and displays
+        * that used this tdm_source. */
+       if (tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
+                               tdm_err);
+               TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
+
+               tpl_gsource_destroy(gsource, TPL_FALSE);
+
+               wl_vk_display->tdm_source = NULL;
+
+               return TPL_FALSE;
+       }
+
+       return TPL_TRUE;
+}
+
+static void
+__thread_func_tdm_finalize(tpl_gsource *gsource)
+{
+       tpl_wl_vk_display_t *wl_vk_display = NULL;
+
+       wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
+
+       TPL_LOG_T("WL_VK",
+                         "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)",
+                         wl_vk_display, wl_vk_display->tdm_client, gsource);
+
+       if (wl_vk_display->tdm_client) {
+               tdm_client_destroy(wl_vk_display->tdm_client);
+               wl_vk_display->tdm_client = NULL;
+               wl_vk_display->tdm_display_fd = -1;
+       }
+
+       wl_vk_display->tdm_initialized = TPL_FALSE;
+}
+
+static tpl_gsource_functions tdm_funcs = {
+       .prepare  = NULL,
+       .check    = NULL,
+       .dispatch = __thread_func_tdm_dispatch,
+       .finalize = __thread_func_tdm_finalize,
+};
+
 static tpl_result_t
-__tpl_wl_vk_wsi_display_init(tpl_display_t *display)
+_thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
+{
+       tdm_client       *tdm_client = NULL;
+       int               tdm_display_fd = -1;
+       tdm_error         tdm_err = TDM_ERROR_NONE;
+
+       tdm_client = tdm_client_create(&tdm_err);
+       if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
+       if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
+               tdm_client_destroy(tdm_client);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       wl_vk_display->tdm_display_fd  = tdm_display_fd;
+       wl_vk_display->tdm_client      = tdm_client;
+       wl_vk_display->tdm_source      = NULL;
+       wl_vk_display->tdm_initialized = TPL_TRUE;
+
+       TPL_INFO("[TDM_CLIENT_INIT]",
+                        "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
+                        wl_vk_display, tdm_client, tdm_display_fd);
+
+       return TPL_ERROR_NONE;
+}
+
+static void
+__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
+                                                         uint32_t name, const char *interface,
+                                                         uint32_t version)
 {
-       tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
+#if TIZEN_FEATURE_ENABLE
+       tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
 
+       if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
+               char *env = tpl_getenv("TPL_EFS");
+               if (env && !atoi(env)) {
+                       wl_vk_display->use_explicit_sync = TPL_FALSE;
+               } else {
+                       wl_vk_display->explicit_sync =
+                                       wl_registry_bind(wl_registry, name,
+                                                                        &zwp_linux_explicit_synchronization_v1_interface, 1);
+                       wl_vk_display->use_explicit_sync = TPL_TRUE;
+                       TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
+               }
+       }
+#endif
+}
+
+static void
+__cb_wl_resistry_global_remove_callback(void *data,
+                                                                               struct wl_registry *wl_registry,
+                                                                               uint32_t name)
+{
+}
+
+static const struct wl_registry_listener registry_listener = {
+       __cb_wl_resistry_global_callback,
+       __cb_wl_resistry_global_remove_callback
+};
+
+static void
+_wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
+                                         const char *func_name)
+{
+       int dpy_err;
+       char buf[1024];
+       strerror_r(errno, buf, sizeof(buf));
+
+       if (wl_vk_display->last_error == errno)
+               return;
+
+       TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
+
+       dpy_err = wl_display_get_error(wl_vk_display->wl_display);
+       if (dpy_err == EPROTO) {
+               const struct wl_interface *err_interface;
+               uint32_t err_proxy_id, err_code;
+               err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
+                                                                                                &err_interface,
+                                                                                                &err_proxy_id);
+               TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
+                               err_interface->name, err_code, err_proxy_id);
+       }
+
+       wl_vk_display->last_error = errno;
+}
+
+static tpl_result_t
+_thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
+{
+       struct wl_registry *registry                = NULL;
+       struct wl_event_queue *queue                = NULL;
+       struct wl_display *display_wrapper          = NULL;
+       struct wl_proxy *wl_tbm                     = NULL;
+       struct wayland_tbm_client *wl_tbm_client    = NULL;
+       int ret;
+       tpl_result_t result = TPL_ERROR_NONE;
+
+       queue = wl_display_create_queue(wl_vk_display->wl_display);
+       if (!queue) {
+               TPL_ERR("Failed to create wl_queue wl_display(%p)",
+                               wl_vk_display->wl_display);
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+       wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
+       if (!wl_vk_display->ev_queue) {
+               TPL_ERR("Failed to create wl_queue wl_display(%p)",
+                               wl_vk_display->wl_display);
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+       display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
+       if (!display_wrapper) {
+               TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
+                               wl_vk_display->wl_display);
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+       wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
+
+       registry = wl_display_get_registry(display_wrapper);
+       if (!registry) {
+               TPL_ERR("Failed to create wl_registry");
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+       wl_proxy_wrapper_destroy(display_wrapper);
+       display_wrapper = NULL;
+
+       wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
+       if (!wl_tbm_client) {
+               TPL_ERR("Failed to initialize wl_tbm_client.");
+               result = TPL_ERROR_INVALID_CONNECTION;
+               goto fini;
+       }
+
+       wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
+       if (!wl_tbm) {
+               TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
+               result = TPL_ERROR_INVALID_CONNECTION;
+               goto fini;
+       }
+
+       wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
+       wl_vk_display->wl_tbm_client = wl_tbm_client;
+
+       if (wl_registry_add_listener(registry, &registry_listener,
+                                                                wl_vk_display)) {
+               TPL_ERR("Failed to wl_registry_add_listener");
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+       ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
+       if (ret == -1) {
+               _wl_display_print_err(wl_vk_display, "roundtrip_queue");
+               result = TPL_ERROR_INVALID_OPERATION;
+               goto fini;
+       }
+
+#if TIZEN_FEATURE_ENABLE
+       if (wl_vk_display->explicit_sync) {
+               wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
+                                                  wl_vk_display->ev_queue);
+               TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
+                                 wl_vk_display->explicit_sync);
+       }
+#endif
+
+       wl_vk_display->wl_initialized = TPL_TRUE;
+
+       TPL_INFO("[WAYLAND_INIT]",
+                        "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
+                        wl_vk_display, wl_vk_display->wl_display,
+                        wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
+#if TIZEN_FEATURE_ENABLE
+       TPL_INFO("[WAYLAND_INIT]",
+                        "explicit_sync(%p)",
+                        wl_vk_display->explicit_sync);
+#endif
+fini:
+       if (display_wrapper)
+               wl_proxy_wrapper_destroy(display_wrapper);
+       if (registry)
+               wl_registry_destroy(registry);
+       if (queue)
+               wl_event_queue_destroy(queue);
+
+       return result;
+}
+
+static void
+_thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
+{
+       /* If wl_vk_display is in prepared state, cancel it */
+       if (wl_vk_display->prepared) {
+               wl_display_cancel_read(wl_vk_display->wl_display);
+               wl_vk_display->prepared = TPL_FALSE;
+       }
+
+       if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
+                                                                                 wl_vk_display->ev_queue) == -1) {
+               _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
+       }
+
+#if TIZEN_FEATURE_ENABLE
+       if (wl_vk_display->explicit_sync) {
+               TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
+                                "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
+                                wl_vk_display, wl_vk_display->explicit_sync);
+               zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
+               wl_vk_display->explicit_sync = NULL;
+       }
+#endif
+
+       if (wl_vk_display->wl_tbm_client) {
+               struct wl_proxy *wl_tbm = NULL;
+
+               wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
+                                                                               wl_vk_display->wl_tbm_client);
+               if (wl_tbm) {
+                       wl_proxy_set_queue(wl_tbm, NULL);
+               }
+
+               TPL_INFO("[WL_TBM_DEINIT]",
+                                "wl_vk_display(%p) wl_tbm_client(%p)",
+                                wl_vk_display, wl_vk_display->wl_tbm_client);
+               wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
+               wl_vk_display->wl_tbm_client = NULL;
+       }
+
+       wl_event_queue_destroy(wl_vk_display->ev_queue);
+
+       wl_vk_display->wl_initialized = TPL_FALSE;
+
+       TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
+                        wl_vk_display, wl_vk_display->wl_display);
+}
+
+static void*
+_thread_init(void *data)
+{
+       tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
+
+       if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
+               TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
+                               wl_vk_display, wl_vk_display->wl_display);
+       }
+
+       if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
+               TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
+       }
+
+       return wl_vk_display;
+}
+
+static tpl_bool_t
+__thread_func_disp_prepare(tpl_gsource *gsource)
+{
+       tpl_wl_vk_display_t *wl_vk_display =
+               (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
+
+       /* If this wl_vk_display is already prepared,
+        * do nothing in this function. */
+       if (wl_vk_display->prepared)
+               return TPL_FALSE;
+
+       /* If there is a last_error, there is no need to poll,
+        * so skip directly to dispatch.
+        * prepare -> dispatch */
+       if (wl_vk_display->last_error)
+               return TPL_TRUE;
+
+       while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
+                                                                                wl_vk_display->ev_queue) != 0) {
+               if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
+                                                                                         wl_vk_display->ev_queue) == -1) {
+                       _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
+               }
+       }
+
+       wl_vk_display->prepared = TPL_TRUE;
+
+       wl_display_flush(wl_vk_display->wl_display);
+
+       return TPL_FALSE;
+}
+
+static tpl_bool_t
+__thread_func_disp_check(tpl_gsource *gsource)
+{
+       tpl_wl_vk_display_t *wl_vk_display =
+               (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
+       tpl_bool_t ret = TPL_FALSE;
+
+       if (!wl_vk_display->prepared)
+               return ret;
+
+       /* If prepared, but last_error is set,
+        * cancel_read is executed and FALSE is returned.
+        * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
+        * and skipping disp_check from prepare to disp_dispatch.
+        * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
+       if (wl_vk_display->prepared && wl_vk_display->last_error) {
+               wl_display_cancel_read(wl_vk_display->wl_display);
+               return ret;
+       }
+
+       if (tpl_gsource_check_io_condition(gsource)) {
+               if (wl_display_read_events(wl_vk_display->wl_display) == -1)
+                       _wl_display_print_err(wl_vk_display, "read_event");
+               ret = TPL_TRUE;
+       } else {
+               wl_display_cancel_read(wl_vk_display->wl_display);
+               ret = TPL_FALSE;
+       }
+
+       wl_vk_display->prepared = TPL_FALSE;
+
+       return ret;
+}
+
+static tpl_bool_t
+__thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
+{
+       tpl_wl_vk_display_t *wl_vk_display =
+               (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
+
+       TPL_IGNORE(message);
+
+       /* If there is last_error, SOURCE_REMOVE should be returned
+        * to remove the gsource from the main loop.
+        * This is because wl_vk_display is not valid since last_error was set.*/
+       if (wl_vk_display->last_error) {
+               return TPL_FALSE;
+       }
+
+       tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
+       if (tpl_gsource_check_io_condition(gsource)) {
+               if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
+                                                                                         wl_vk_display->ev_queue) == -1) {
+                       _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
+               }
+       }
+
+       wl_display_flush(wl_vk_display->wl_display);
+       tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+
+       return TPL_TRUE;
+}
+
+static void
+__thread_func_disp_finalize(tpl_gsource *gsource)
+{
+       tpl_wl_vk_display_t *wl_vk_display =
+               (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
+
+       if (wl_vk_display->wl_initialized)
+               _thread_wl_display_fini(wl_vk_display);
+
+       TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)",
+                         wl_vk_display, gsource);
+
+       return;
+}
+
+
+static tpl_gsource_functions disp_funcs = {
+       .prepare  = __thread_func_disp_prepare,
+       .check    = __thread_func_disp_check,
+       .dispatch = __thread_func_disp_dispatch,
+       .finalize = __thread_func_disp_finalize,
+};
+
+static tpl_result_t
+__tpl_wl_vk_display_init(tpl_display_t *display)
+{
        TPL_ASSERT(display);
 
+       tpl_wl_vk_display_t *wl_vk_display = NULL;
+
        /* Do not allow default display in wayland */
        if (!display->native_handle) {
                TPL_ERR("Invalid native handle for display.");
                return TPL_ERROR_INVALID_PARAMETER;
        }
 
-       wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) calloc(1,
-                                                        sizeof(tpl_wayland_vk_wsi_display_t));
-       if (!wayland_vk_wsi_display) {
-               TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_display_t.");
-               return TPL_ERROR_OUT_OF_MEMORY;
+       if (!_check_native_handle_is_wl_display(display->native_handle)) {
+               TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
+               return TPL_ERROR_INVALID_PARAMETER;
        }
 
-       display->backend.data = wayland_vk_wsi_display;
+       wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
+                                                       sizeof(tpl_wl_vk_display_t));
+       if (!wl_vk_display) {
+               TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
+               return TPL_ERROR_OUT_OF_MEMORY;
+       }
 
-       if (twe_check_native_handle_is_wl_display(display->native_handle)) {
-               wayland_vk_wsi_display->wl_thread = twe_thread_create();
-               if (!wayland_vk_wsi_display->wl_thread) {
-                       TPL_ERR("Failed to create twe_thread.");
-                       goto free_display;
+       display->backend.data             = wl_vk_display;
+       display->bufmgr_fd                = -1;
+
+       wl_vk_display->tdm_initialized    = TPL_FALSE;
+       wl_vk_display->wl_initialized     = TPL_FALSE;
+
+       wl_vk_display->ev_queue           = NULL;
+       wl_vk_display->wl_display         = (struct wl_display *)display->native_handle;
+       wl_vk_display->last_error         = 0;
+       wl_vk_display->use_explicit_sync  = TPL_FALSE;   // default disabled
+       wl_vk_display->prepared           = TPL_FALSE;
+
+       /* Wayland Interfaces */
+#if TIZEN_FEATURE_ENABLE
+       wl_vk_display->explicit_sync      = NULL;
+#endif
+       wl_vk_display->wl_tbm_client      = NULL;
+
+       /* Vulkan specific surface capabilities */
+       wl_vk_display->min_buffer         = 2;
+       wl_vk_display->max_buffer         = VK_CLIENT_QUEUE_SIZE;
+       wl_vk_display->present_modes      = TPL_DISPLAY_PRESENT_MODE_FIFO;
+
+       wl_vk_display->use_wait_vblank    = TPL_TRUE;   // default enabled
+       {
+               char *env = tpl_getenv("TPL_WAIT_VBLANK");
+               if (env && !atoi(env)) {
+                       wl_vk_display->use_wait_vblank = TPL_FALSE;
                }
+       }
 
-               wayland_vk_wsi_display->twe_display =
-                       twe_display_add(wayland_vk_wsi_display->wl_thread,
-                                                       display->native_handle,
-                                                       display->backend.type);
-               if (!wayland_vk_wsi_display->twe_display) {
-                       TPL_ERR("Failed to add native_display(%p) to thread(%p)",
-                                       display->native_handle,
-                                       wayland_vk_wsi_display->wl_thread);
-                       goto free_display;
-               }
+       tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
 
-       } else {
-               TPL_ERR("Invalid native handle for display.");
+       /* Create gthread */
+       wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
+                                                                                          (tpl_gthread_func)_thread_init,
+                                                                                          (void *)wl_vk_display);
+       if (!wl_vk_display->thread) {
+               TPL_ERR("Failed to create wl_vk_thread");
                goto free_display;
        }
 
-       TPL_LOG_T("WL_VK",
-                         "[INIT DISPLAY] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)",
-                         wayland_vk_wsi_display,
-                         wayland_vk_wsi_display->wl_thread,
-                         wayland_vk_wsi_display->twe_display);
+       wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
+                                                                                                       (void *)wl_vk_display,
+                                                                                                       wl_display_get_fd(wl_vk_display->wl_display),
+                                                                                                       &disp_funcs, SOURCE_TYPE_NORMAL);
+       if (!wl_vk_display->disp_source) {
+               TPL_ERR("Failed to add native_display(%p) to thread(%p)",
+                               display->native_handle,
+                               wl_vk_display->thread);
+               goto free_display;
+       }
+
+       wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread,
+                                                                                                  (void *)wl_vk_display,
+                                                                                                  wl_vk_display->tdm_display_fd,
+                                                                                                  &tdm_funcs, SOURCE_TYPE_NORMAL);
+       if (!wl_vk_display->tdm_source) {
+               TPL_ERR("Failed to create tdm_gsource\n");
+               goto free_display;
+       }
+
+       TPL_INFO("[DISPLAY_INIT]",
+                        "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
+                        wl_vk_display,
+                        wl_vk_display->thread,
+                        wl_vk_display->wl_display);
+
+       TPL_INFO("[DISPLAY_INIT]",
+                        "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
+                        wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
+                        wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
 
        return TPL_ERROR_NONE;
 
 free_display:
-       if (wayland_vk_wsi_display) {
-               if (wayland_vk_wsi_display->twe_display)
-                       twe_display_del(wayland_vk_wsi_display->twe_display);
-               if (wayland_vk_wsi_display->wl_thread)
-                       twe_thread_destroy(wayland_vk_wsi_display->wl_thread);
-
-               wayland_vk_wsi_display->wl_thread = NULL;
-               wayland_vk_wsi_display->twe_display = NULL;
+       if (wl_vk_display->thread) {
+               if (wl_vk_display->tdm_source)
+                       tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
+               if (wl_vk_display->disp_source)
+                       tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
 
-               free(wayland_vk_wsi_display);
-               display->backend.data = NULL;
+               tpl_gthread_destroy(wl_vk_display->thread);
        }
 
+       wl_vk_display->thread = NULL;
+       free(wl_vk_display);
+
+       display->backend.data = NULL;
        return TPL_ERROR_INVALID_OPERATION;
 }
 
 static void
-__tpl_wl_vk_wsi_display_fini(tpl_display_t *display)
+__tpl_wl_vk_display_fini(tpl_display_t *display)
 {
-       tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display;
+       tpl_wl_vk_display_t *wl_vk_display;
 
        TPL_ASSERT(display);
 
-       wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data;
-       if (wayland_vk_wsi_display) {
-
-               TPL_LOG_T("WL_VK",
-                                 "[FINI] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)",
-                                 wayland_vk_wsi_display,
-                                 wayland_vk_wsi_display->wl_thread,
-                                 wayland_vk_wsi_display->twe_display);
+       wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
+       if (wl_vk_display) {
+               TPL_INFO("[DISPLAY_FINI]",
+                                "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
+                                wl_vk_display,
+                                wl_vk_display->thread,
+                                wl_vk_display->wl_display);
+
+               if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) {
+                       tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
+                       wl_vk_display->tdm_source = NULL;
+               }
 
-               if (wayland_vk_wsi_display->twe_display) {
-                       tpl_result_t ret = TPL_ERROR_NONE;
-                       ret = twe_display_del(wayland_vk_wsi_display->twe_display);
-                       if (ret != TPL_ERROR_NONE)
-                               TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)",
-                                               wayland_vk_wsi_display->twe_display,
-                                               wayland_vk_wsi_display->wl_thread);
-                       wayland_vk_wsi_display->twe_display = NULL;
+               if (wl_vk_display->disp_source) {
+                       tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
+                       wl_vk_display->disp_source = NULL;
                }
 
-               if (wayland_vk_wsi_display->wl_thread) {
-                       twe_thread_destroy(wayland_vk_wsi_display->wl_thread);
-                       wayland_vk_wsi_display->wl_thread = NULL;
+               if (wl_vk_display->thread) {
+                       tpl_gthread_destroy(wl_vk_display->thread);
+                       wl_vk_display->thread = NULL;
                }
 
-               free(wayland_vk_wsi_display);
+               tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
+
+               free(wl_vk_display);
        }
+
        display->backend.data = NULL;
 }
 
 static tpl_result_t
-__tpl_wl_vk_wsi_display_query_config(tpl_display_t *display,
+__tpl_wl_vk_display_query_config(tpl_display_t *display,
                tpl_surface_type_t surface_type,
                int red_size, int green_size,
                int blue_size, int alpha_size,
@@ -178,7 +870,7 @@ __tpl_wl_vk_wsi_display_query_config(tpl_display_t *display,
 }
 
 static tpl_result_t
-__tpl_wl_vk_wsi_display_filter_config(tpl_display_t *display,
+__tpl_wl_vk_display_filter_config(tpl_display_t *display,
                                                                          int *visual_id,
                                                                          int alpha_size)
 {
@@ -189,490 +881,1695 @@ __tpl_wl_vk_wsi_display_filter_config(tpl_display_t *display,
 }
 
 static tpl_result_t
-__tpl_wl_vk_wsi_display_query_window_supported_buffer_count(
+__tpl_wl_vk_display_query_window_supported_buffer_count(
        tpl_display_t *display,
        tpl_handle_t window, int *min, int *max)
 {
-       tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
-       tpl_result_t res = TPL_ERROR_NONE;
+       tpl_wl_vk_display_t *wl_vk_display = NULL;
+
+       TPL_ASSERT(display);
+       TPL_ASSERT(window);
+
+       wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
+
+       if (min) *min = wl_vk_display->min_buffer;
+       if (max) *max = wl_vk_display->max_buffer;
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_vk_display_query_window_supported_present_modes(
+       tpl_display_t *display,
+       tpl_handle_t window, int *present_modes)
+{
+       tpl_wl_vk_display_t *wl_vk_display = NULL;
+
+       TPL_ASSERT(display);
+       TPL_ASSERT(window);
+
+       wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
+
+       if (present_modes) {
+               *present_modes = wl_vk_display->present_modes;
+       }
+
+       return TPL_ERROR_NONE;
+}
+
+static void
+_tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+       tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
+       tpl_wl_vk_display_t *wl_vk_display      = wl_vk_surface->wl_vk_display;
+       tpl_wl_vk_swapchain_t *swapchain        = wl_vk_surface->swapchain;
+       tpl_wl_vk_buffer_t *wl_vk_buffer        = NULL;
+       tpl_bool_t need_to_release              = TPL_FALSE;
+       tpl_bool_t need_to_cancel               = TPL_FALSE;
+       buffer_status_t status                  = RELEASED;
+       int idx                                 = 0;
+
+       while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
+               tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
+               tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
+               wl_vk_buffer = wl_vk_surface->buffers[idx];
+
+               if (wl_vk_buffer) {
+                       wl_vk_surface->buffers[idx] = NULL;
+                       wl_vk_surface->buffer_cnt--;
+               } else {
+                       tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
+                       tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+                       idx++;
+                       continue;
+               }
+
+               tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
+
+               tpl_gmutex_lock(&wl_vk_buffer->mutex);
+
+               status = wl_vk_buffer->status;
+
+               TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
+                                 idx, wl_vk_buffer,
+                                 wl_vk_buffer->tbm_surface,
+                                 status_to_string[status]);
+
+               if (status >= ENQUEUED) {
+                       tpl_bool_t need_to_wait  = TPL_FALSE;
+                       tpl_result_t wait_result = TPL_ERROR_NONE;
+
+                       if (!wl_vk_display->use_explicit_sync &&
+                               status < WAITING_VBLANK)
+                               need_to_wait = TPL_TRUE;
+
+                       if (wl_vk_display->use_explicit_sync &&
+                               status < COMMITTED)
+                               need_to_wait = TPL_TRUE;
+
+                       if (need_to_wait) {
+                               tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+                               wait_result = tpl_cond_timed_wait(&wl_vk_buffer->cond,
+                                                                                                 &wl_vk_buffer->mutex,
+                                                                                                 16); /* 16ms */
+                               tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
+
+                               status = wl_vk_buffer->status;
+
+                               if (wait_result == TPL_ERROR_TIME_OUT)
+                                       TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
+                                                        wl_vk_buffer);
+                       }
+               }
+
+               /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
+               /* It has been acquired but has not yet been released, so this
+                * buffer must be released. */
+               need_to_release = (status >= ACQUIRED && status <= COMMITTED);
+
+               /* After dequeue, it has not been enqueued yet
+                * so cancel_dequeue must be performed. */
+               need_to_cancel = (status == DEQUEUED);
+
+               if (swapchain && swapchain->tbm_queue) {
+                       if (need_to_release) {
+                               tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
+                                                                                                       wl_vk_buffer->tbm_surface);
+                               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                                       TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+                                                       wl_vk_buffer->tbm_surface, tsq_err);
+                       }
+
+                       if (need_to_cancel) {
+                               tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
+                                                                                                                  wl_vk_buffer->tbm_surface);
+                               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                                       TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
+                                                       wl_vk_buffer->tbm_surface, tsq_err);
+                       }
+               }
+
+               wl_vk_buffer->status = RELEASED;
+
+               tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+               if (need_to_release || need_to_cancel)
+                       tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
+
+               tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+
+               idx++;
+       }
+}
+
+static tdm_client_vblank*
+_thread_create_tdm_client_vblank(tdm_client *tdm_client)
+{
+       tdm_client_vblank *vblank = NULL;
+       tdm_client_output *tdm_output = NULL;
+       tdm_error tdm_err = TDM_ERROR_NONE;
+
+       if (!tdm_client) {
+               TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
+               return NULL;
+       }
+
+       tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
+       if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
+               return NULL;
+       }
+
+       vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
+       if (!vblank || tdm_err != TDM_ERROR_NONE) {
+               TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
+               return NULL;
+       }
+
+       tdm_client_vblank_set_enable_fake(vblank, 1);
+       tdm_client_vblank_set_sync(vblank, 0);
+
+       return vblank;
+}
+
+static void
+_thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+       tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
+
+       /* tbm_surface_queue will be created at swapchain_create */
+
+       wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
+                                                               wl_vk_display->tdm_client);
+       if (wl_vk_surface->vblank) {
+               TPL_INFO("[VBLANK_INIT]",
+                                "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
+                                wl_vk_surface, wl_vk_display->tdm_client,
+                                wl_vk_surface->vblank);
+       }
+
+#if TIZEN_FEATURE_ENABLE
+       if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
+               wl_vk_surface->surface_sync =
+                       zwp_linux_explicit_synchronization_v1_get_synchronization(
+                                       wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
+               if (wl_vk_surface->surface_sync) {
+                       TPL_INFO("[EXPLICIT_SYNC_INIT]",
+                                        "wl_vk_surface(%p) surface_sync(%p)",
+                                        wl_vk_surface, wl_vk_surface->surface_sync);
+               } else {
+                       TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
+                                        wl_vk_surface);
+                       wl_vk_display->use_explicit_sync = TPL_FALSE;
+               }
+       }
+#endif
+       wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
+}
+
+static void
+_thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+       tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+
+       TPL_INFO("[SURFACE_FINI]",
+                        "wl_vk_surface(%p) wl_surface(%p)",
+                        wl_vk_surface, wl_vk_surface->wl_surface);
+
+       if (wl_vk_surface->vblank_waiting_buffers) {
+               __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
+               wl_vk_surface->vblank_waiting_buffers = NULL;
+       }
+
+#if TIZEN_FEATURE_ENABLE
+       if (wl_vk_surface->surface_sync) {
+               TPL_INFO("[SURFACE_SYNC_DESTROY]",
+                                "wl_vk_surface(%p) surface_sync(%p)",
+                                 wl_vk_surface, wl_vk_surface->surface_sync);
+               zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
+               wl_vk_surface->surface_sync = NULL;
+       }
+#endif
+
+       if (wl_vk_surface->vblank) {
+               TPL_INFO("[VBLANK_DESTROY]",
+                                "wl_vk_surface(%p) vblank(%p)",
+                                wl_vk_surface, wl_vk_surface->vblank);
+               tdm_client_vblank_destroy(wl_vk_surface->vblank);
+               wl_vk_surface->vblank = NULL;
+       }
+
+       tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+}
+
+static tpl_bool_t
+__thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
+{
+       tpl_wl_vk_surface_t *wl_vk_surface = NULL;
+
+       wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
+
+       tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+       if (message == INIT_SURFACE) { /* Initialize surface */
+               TPL_DEBUG("wl_vk_surface(%p) initialize message received!",
+                                 wl_vk_surface);
+               _thread_wl_vk_surface_init(wl_vk_surface);
+               tpl_gcond_signal(&wl_vk_surface->surf_cond);    
+       } else if (message == CREATE_QUEUE) { /* Create tbm_surface_queue */
+               TPL_DEBUG("wl_vk_surface(%p) queue creation message received!",
+                                 wl_vk_surface);
+               if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
+                       != TPL_ERROR_NONE) {
+                       TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
+                                       wl_vk_surface);
+               }
+               tpl_gcond_signal(&wl_vk_surface->surf_cond);
+       } else if (message == DESTROY_QUEUE) { /* swapchain destroy */
+               TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!",
+                                 wl_vk_surface);
+               _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
+               tpl_gcond_signal(&wl_vk_surface->surf_cond);
+       } else if (message == ACQUIRABLE) { /* Acquirable message */
+               TPL_DEBUG("wl_vk_surface(%p) acquirable message received!",
+                                 wl_vk_surface);
+               if (_thread_surface_queue_acquire(wl_vk_surface)
+                       != TPL_ERROR_NONE) {
+                       TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
+                                       wl_vk_surface);
+               }
+       }
+
+       /* init to NONE_MESSAGE */
+       wl_vk_surface->sent_message = NONE_MESSAGE;
+
+       tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+
+       return TPL_TRUE;
+}
+
+static void
+__thread_func_surf_finalize(tpl_gsource *gsource)
+{
+       tpl_wl_vk_surface_t *wl_vk_surface = NULL;
+
+       wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
+       TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
+
+       _thread_wl_vk_surface_fini(wl_vk_surface);
+
+       TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)",
+                         wl_vk_surface, gsource);
+}
+
+static tpl_gsource_functions surf_funcs = {
+       .prepare = NULL,
+       .check = NULL,
+       .dispatch = __thread_func_surf_dispatch,
+       .finalize = __thread_func_surf_finalize,
+};
+
+
+static tpl_result_t
+__tpl_wl_vk_surface_init(tpl_surface_t *surface)
+{
+       tpl_wl_vk_surface_t *wl_vk_surface      = NULL;
+       tpl_wl_vk_display_t *wl_vk_display      = NULL;
+       tpl_gsource *surf_source                = NULL;
+
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->display);
+       TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
+       TPL_ASSERT(surface->native_handle);
+
+       wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
+
+       wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
+                                                        sizeof(tpl_wl_vk_surface_t));
+       if (!wl_vk_surface) {
+               TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
+               return TPL_ERROR_OUT_OF_MEMORY;
+       }
+
+       surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
+                                                                        -1, &surf_funcs, SOURCE_TYPE_NORMAL);
+       if (!surf_source) {
+               TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
+                               wl_vk_surface);
+               free(wl_vk_surface);
+               surface->backend.data = NULL;
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       surface->backend.data                  = (void *)wl_vk_surface;
+       surface->width                                 = -1;
+       surface->height                        = -1;
+
+       wl_vk_surface->surf_source             = surf_source;
+       wl_vk_surface->swapchain               = NULL;
+
+       wl_vk_surface->wl_vk_display           = wl_vk_display;
+       wl_vk_surface->wl_surface              = (struct wl_surface *)surface->native_handle;
+       wl_vk_surface->tpl_surface             = surface;
+
+       wl_vk_surface->reset                   = TPL_FALSE;
+       wl_vk_surface->is_activated            = TPL_FALSE;
+       wl_vk_surface->vblank_done             = TPL_TRUE;
+
+       wl_vk_surface->render_done_cnt         = 0;
+
+       wl_vk_surface->vblank                  = NULL;
+#if TIZEN_FEATURE_ENABLE
+       wl_vk_surface->surface_sync            = NULL;
+#endif
+
+       wl_vk_surface->sent_message            = NONE_MESSAGE;
+
+       wl_vk_surface->post_interval           = surface->post_interval;
+
+       {
+               int i = 0;
+               for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
+                       wl_vk_surface->buffers[i]     = NULL;
+               wl_vk_surface->buffer_cnt         = 0;
+       }
+
+       tpl_gmutex_init(&wl_vk_surface->surf_mutex);
+       tpl_gcond_init(&wl_vk_surface->surf_cond);
+
+       tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
+
+       /* Initialize in thread */
+       tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+       wl_vk_surface->sent_message = INIT_SURFACE;
+       tpl_gsource_send_message(wl_vk_surface->surf_source,
+                                                        wl_vk_surface->sent_message);
+       tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
+       tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+
+       TPL_INFO("[SURFACE_INIT]",
+                         "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
+                         surface, wl_vk_surface, wl_vk_surface->surf_source);
+
+       return TPL_ERROR_NONE;
+}
+
+static void
+__tpl_wl_vk_surface_fini(tpl_surface_t *surface)
+{
+       tpl_wl_vk_surface_t *wl_vk_surface = NULL;
+       tpl_wl_vk_display_t *wl_vk_display = NULL;
+
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->display);
+
+       wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
+       TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
+
+       wl_vk_display = (tpl_wl_vk_display_t *)
+                                                        surface->display->backend.data;
+       TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
+
+       TPL_INFO("[SURFACE_FINI][BEGIN]",
+                        "wl_vk_surface(%p) wl_surface(%p)",
+                        wl_vk_surface, wl_vk_surface->wl_surface);
+
+       if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
+               /* finalize swapchain */
+
+       }
+
+       wl_vk_surface->swapchain        = NULL;
+
+       if (wl_vk_surface->surf_source)
+               tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
+       wl_vk_surface->surf_source      = NULL;
+
+       _print_buffer_lists(wl_vk_surface);
+
+       wl_vk_surface->wl_surface       = NULL;
+       wl_vk_surface->wl_vk_display    = NULL;
+       wl_vk_surface->tpl_surface      = NULL;
+
+       tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+       tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+       tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
+       tpl_gcond_clear(&wl_vk_surface->surf_cond);
+
+       TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
+
+       free(wl_vk_surface);
+       surface->backend.data = NULL;
+}
+
+static tpl_result_t
+__tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
+                                                                                 int post_interval)
+{
+       tpl_wl_vk_surface_t *wl_vk_surface = NULL;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
+
+       wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
+
+       TPL_INFO("[SET_POST_INTERVAL]",
+                        "wl_vk_surface(%p) post_interval(%d -> %d)",
+                        wl_vk_surface, wl_vk_surface->post_interval, post_interval);
+
+       wl_vk_surface->post_interval = post_interval;
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_bool_t
+__tpl_wl_vk_surface_validate(tpl_surface_t *surface)
+{
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->backend.data);
+
+       tpl_wl_vk_surface_t *wl_vk_surface =
+               (tpl_wl_vk_surface_t *)surface->backend.data;
+
+       return !(wl_vk_surface->reset);
+}
+
+static void
+__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
+                                                         void *data)
+{
+       tpl_wl_vk_surface_t *wl_vk_surface = NULL;
+       tpl_wl_vk_display_t *wl_vk_display = NULL;
+       tpl_wl_vk_swapchain_t *swapchain   = NULL;
+       tpl_surface_t *surface             = NULL;
+       tpl_bool_t is_activated            = TPL_FALSE;
+       int width, height;
+
+       wl_vk_surface = (tpl_wl_vk_surface_t *)data;
+       TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
+
+       wl_vk_display = wl_vk_surface->wl_vk_display;
+       TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
+
+       surface = wl_vk_surface->tpl_surface;
+       TPL_CHECK_ON_NULL_RETURN(surface);
+
+       swapchain = wl_vk_surface->swapchain;
+       TPL_CHECK_ON_NULL_RETURN(swapchain);
+
+       /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
+        * the changed window size at the next frame. */
+       width = tbm_surface_queue_get_width(tbm_queue);
+       height = tbm_surface_queue_get_height(tbm_queue);
+       if (surface->width != width || surface->height != height) {
+               TPL_INFO("[QUEUE_RESIZE]",
+                                "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
+                                wl_vk_surface, tbm_queue,
+                                surface->width, surface->height, width, height);
+       }
+
+       /* When queue_reset_callback is called, if is_activated is different from
+        * its previous state change the reset flag to TPL_TRUE to get a new buffer
+        * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
+       is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
+                                                                                                                  swapchain->tbm_queue);
+       if (wl_vk_surface->is_activated != is_activated) {
+               if (is_activated) {
+                       TPL_INFO("[ACTIVATED]",
+                                         "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
+                                         wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
+               } else {
+                       TPL_LOG_T("[DEACTIVATED]",
+                                         " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
+                                         wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
+               }
+       }
+
+       wl_vk_surface->reset = TPL_TRUE;
+
+       if (surface->reset_cb)
+               surface->reset_cb(surface->reset_data);
+}
+
+static void
+__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
+                                                                  void *data)
+{
+       TPL_IGNORE(tbm_queue);
+
+       tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
+       TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
+
+       tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+       if (wl_vk_surface->sent_message == NONE_MESSAGE) {
+               wl_vk_surface->sent_message = ACQUIRABLE;
+               tpl_gsource_send_message(wl_vk_surface->surf_source,
+                                                                wl_vk_surface->sent_message);
+       }
+       tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+}
+
+static tpl_result_t
+_thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+       TPL_ASSERT (wl_vk_surface);
+
+       tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
+       tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
+       tbm_surface_queue_h tbm_queue      = NULL;
+       tbm_bufmgr bufmgr = NULL;
+       unsigned int capability;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+
+       if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
+               TPL_ERR("buffer count(%d) must be higher than (%d)",
+                               swapchain->properties.buffer_count,
+                               wl_vk_display->min_buffer);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
+               TPL_ERR("buffer count(%d) must be lower than (%d)",
+                               swapchain->properties.buffer_count,
+                               wl_vk_display->max_buffer);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
+               TPL_ERR("Unsupported present_mode(%d)",
+                               swapchain->properties.present_mode);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (swapchain->tbm_queue) {
+               int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
+               int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
+
+               if (swapchain->swapchain_buffers) {
+                       int i;
+                       for (i = 0; i < swapchain->properties.buffer_count; i++) {
+                               if (swapchain->swapchain_buffers[i]) {
+                                       TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]);
+                                       tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
+                                       swapchain->swapchain_buffers[i] = NULL;
+                               }
+                       }
+
+                       free(swapchain->swapchain_buffers);
+                       swapchain->swapchain_buffers = NULL;
+               }
+
+               if (old_width != swapchain->properties.width ||
+                       old_height != swapchain->properties.height) {
+                       tbm_surface_queue_reset(swapchain->tbm_queue,
+                                                                       swapchain->properties.width,
+                                                                       swapchain->properties.height,
+                                                                       swapchain->properties.format);
+                       TPL_INFO("[RESIZE]",
+                                        "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
+                                        wl_vk_surface, swapchain, swapchain->tbm_queue,
+                                        old_width, old_height,
+                                        swapchain->properties.width,
+                                        swapchain->properties.height);
+               }
+
+               swapchain->properties.buffer_count =
+                       tbm_surface_queue_get_size(swapchain->tbm_queue);
+
+               wl_vk_surface->reset = TPL_FALSE;
+
+               __tpl_util_atomic_inc(&swapchain->ref_cnt);
+
+               TPL_INFO("[SWAPCHAIN_REUSE]",
+                                "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
+                                wl_vk_surface, swapchain, swapchain->tbm_queue,
+                                swapchain->properties.buffer_count);
+
+               return TPL_ERROR_NONE;
+       }
+
+       bufmgr = tbm_bufmgr_init(-1);
+       capability = tbm_bufmgr_get_capability(bufmgr);
+       tbm_bufmgr_deinit(bufmgr);
+
+       if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
+               tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
+                                                                       wl_vk_display->wl_tbm_client,
+                                                                       wl_vk_surface->wl_surface,
+                                                                       swapchain->properties.buffer_count,
+                                                                       swapchain->properties.width,
+                                                                       swapchain->properties.height,
+                                                                       TBM_FORMAT_ARGB8888);
+       } else {
+               tbm_queue = wayland_tbm_client_create_surface_queue(
+                                                                       wl_vk_display->wl_tbm_client,
+                                                                       wl_vk_surface->wl_surface,
+                                                                       swapchain->properties.buffer_count,
+                                                                       swapchain->properties.width,
+                                                                       swapchain->properties.height,
+                                                                       TBM_FORMAT_ARGB8888);
+       }
+
+       if (!tbm_queue) {
+               TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
+                               wl_vk_surface);
+               return TPL_ERROR_OUT_OF_MEMORY;
+       }
+
+       if (tbm_surface_queue_set_modes(
+                       tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
+                       TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
+                               tbm_queue);
+               tbm_surface_queue_destroy(tbm_queue);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       if (tbm_surface_queue_add_reset_cb(
+                       tbm_queue,
+                       __cb_tbm_queue_reset_callback,
+                       (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
+                               tbm_queue);
+               tbm_surface_queue_destroy(tbm_queue);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       if (tbm_surface_queue_add_acquirable_cb(
+                       tbm_queue,
+                       __cb_tbm_queue_acquirable_callback,
+                       (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
+                               tbm_queue);
+               tbm_surface_queue_destroy(tbm_queue);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       swapchain->tbm_queue = tbm_queue;
+
+       TPL_INFO("[TBM_QUEUE_CREATED]",
+                        "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)",
+                        wl_vk_surface, swapchain, tbm_queue);
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
+               tbm_format format, int width,
+               int height, int buffer_count, int present_mode)
+{
+       tpl_wl_vk_surface_t *wl_vk_surface              = NULL;
+       tpl_wl_vk_display_t *wl_vk_display      = NULL;
+       tpl_wl_vk_swapchain_t *swapchain  = NULL;
+
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->display);
+
+       wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
+
+       wl_vk_display = (tpl_wl_vk_display_t *)
+                                                        surface->display->backend.data;
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
+
+       swapchain = wl_vk_surface->swapchain;
+
+       if (swapchain == NULL) {
+               swapchain =
+                       (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
+                       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
+               swapchain->tbm_queue           = NULL;
+       }
+
+       swapchain->properties.buffer_count = buffer_count;
+       swapchain->properties.width        = width;
+       swapchain->properties.height       = height;
+       swapchain->properties.present_mode = present_mode;
+       swapchain->wl_vk_surface           = wl_vk_surface;
 
-       TPL_ASSERT(display);
-       TPL_ASSERT(window);
+       wl_vk_surface->swapchain           = swapchain;
 
-       wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data;
+       tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+       /* send swapchain create tbm_queue message */
+       wl_vk_surface->sent_message = CREATE_QUEUE;
+       tpl_gsource_send_message(wl_vk_surface->surf_source,
+                                                        wl_vk_surface->sent_message);
+       tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
+       tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
 
-       if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION;
+       TPL_CHECK_ON_FALSE_ASSERT_FAIL(
+               swapchain->tbm_queue != NULL,
+               "[CRITICAL FAIL] Failed to create tbm_surface_queue");
 
-       res = twe_display_get_buffer_count(wayland_vk_wsi_display->twe_display,
-                                                                          min, max);
-       if (res != TPL_ERROR_NONE) {
-               TPL_ERR("Failed to query buffer count. twe_display(%p)",
-                               wayland_vk_wsi_display->twe_display);
-               return res;
-       }
+       wl_vk_surface->reset = TPL_FALSE;
+
+       __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
 
        return TPL_ERROR_NONE;
 }
 
-static tpl_result_t
-__tpl_wl_vk_wsi_display_query_window_supported_present_modes(
-       tpl_display_t *display,
-       tpl_handle_t window, int *modes)
+static void
+_thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
 {
-       tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
-       tpl_result_t res = TPL_ERROR_NONE;
-
-       TPL_ASSERT(display);
-       TPL_ASSERT(window);
+       TPL_ASSERT(wl_vk_surface);
 
-       wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data;
+       tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
 
-       if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION;
+       TPL_CHECK_ON_NULL_RETURN(swapchain);
 
-       if (modes) {
-               res = twe_display_get_present_mode(wayland_vk_wsi_display->twe_display,
-                                                                                  modes);
-               if (res != TPL_ERROR_NONE) {
-                       TPL_ERR("Failed to query present modes. twe_display(%p)",
-                                       wayland_vk_wsi_display->twe_display);
-                       return res;
-               }
+       if (swapchain->tbm_queue) {
+               TPL_INFO("[TBM_QUEUE_DESTROY]",
+                                "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
+                                wl_vk_surface, swapchain, swapchain->tbm_queue);
+               tbm_surface_queue_destroy(swapchain->tbm_queue);
+               swapchain->tbm_queue = NULL;
        }
-
-       return TPL_ERROR_NONE;
 }
 
 static tpl_result_t
-__tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface)
+__tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
 {
-       tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
-       tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
-       twe_surface_h twe_surface = NULL;
+       tpl_wl_vk_swapchain_t *swapchain   = NULL;
+       tpl_wl_vk_surface_t *wl_vk_surface = NULL;
+       tpl_wl_vk_display_t *wl_vk_display = NULL;
 
        TPL_ASSERT(surface);
-       TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
-       TPL_ASSERT(surface->native_handle);
+       TPL_ASSERT(surface->display);
 
-       wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) calloc(1,
-                                                        sizeof(tpl_wayland_vk_wsi_surface_t));
-       if (!wayland_vk_wsi_surface) {
-               TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_surface_t.");
-               return TPL_ERROR_OUT_OF_MEMORY;
+       wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
+
+       wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
+       TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
+
+       swapchain = wl_vk_surface->swapchain;
+       if (!swapchain) {
+               TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
+                               wl_vk_surface);
+               return TPL_ERROR_INVALID_OPERATION;
        }
 
-       wayland_vk_wsi_display =
-               (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data;
-       if (!wayland_vk_wsi_display) {
-               TPL_ERR("Invalid parameter. wayland_vk_wsi_display(%p)",
-                               wayland_vk_wsi_display);
-               free(wayland_vk_wsi_surface);
-               return TPL_ERROR_INVALID_PARAMETER;
+       if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
+               TPL_INFO("[DESTROY_SWAPCHAIN]",
+                                "wl_vk_surface(%p) swapchain(%p) still valid.",
+                                wl_vk_surface, swapchain);
+               return TPL_ERROR_NONE;
        }
 
-       surface->backend.data = (void *)wayland_vk_wsi_surface;
-       wayland_vk_wsi_surface->tbm_queue = NULL;
+       TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
+                        "wl_vk_surface(%p) swapchain(%p)",
+                        wl_vk_surface, wl_vk_surface->swapchain);
+
+       if (swapchain->swapchain_buffers) {
+               for (int i = 0; i < swapchain->properties.buffer_count; i++) {
+                       if (swapchain->swapchain_buffers[i]) {
+                               TPL_DEBUG("Stop tracking tbm_surface(%p)",
+                                                 swapchain->swapchain_buffers[i]);
+                               tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
+                               swapchain->swapchain_buffers[i] = NULL;
+                       }
+               }
 
-       twe_surface = twe_surface_add(wayland_vk_wsi_display->wl_thread,
-                                                                 wayland_vk_wsi_display->twe_display,
-                                                                 surface->native_handle,
-                                                                 surface->format);
-       if (!twe_surface) {
-               TPL_ERR("Failed to add native_surface(%p) to thread(%p)",
-                               surface->native_handle, wayland_vk_wsi_display->wl_thread);
-               free(wayland_vk_wsi_surface);
-               surface->backend.data = NULL;
-               return TPL_ERROR_OUT_OF_MEMORY;
+               free(swapchain->swapchain_buffers);
+               swapchain->swapchain_buffers = NULL;
        }
 
-       wayland_vk_wsi_surface->twe_surface = twe_surface;
-       wayland_vk_wsi_surface->is_activated = TPL_FALSE;
+       _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
 
-       TPL_LOG_T("WL_VK",
-                         "[INIT]tpl_surface(%p) tpl_wayland_vk_wsi_surface(%p) twe_surface(%p)",
-                         surface, wayland_vk_wsi_surface, twe_surface);
+       tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+       wl_vk_surface->sent_message = DESTROY_QUEUE;
+       tpl_gsource_send_message(wl_vk_surface->surf_source,
+                                                        wl_vk_surface->sent_message);
+       tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
+       tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+
+       _print_buffer_lists(wl_vk_surface);
+
+       free(swapchain);
+       wl_vk_surface->swapchain = NULL;
 
        return TPL_ERROR_NONE;
 }
 
-static void
-__tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface)
+static tpl_result_t
+__tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
+                                                                                         tbm_surface_h **buffers,
+                                                                                         int *buffer_count)
 {
-       tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
-       tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
-
        TPL_ASSERT(surface);
+       TPL_ASSERT(surface->backend.data);
        TPL_ASSERT(surface->display);
+       TPL_ASSERT(surface->display->backend.data);
 
-       wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data;
-       if (wayland_vk_wsi_surface == NULL) return;
+       tpl_wl_vk_surface_t *wl_vk_surface =
+               (tpl_wl_vk_surface_t *)surface->backend.data;
+       tpl_wl_vk_display_t *wl_vk_display =
+               (tpl_wl_vk_display_t *)surface->display->backend.data;
+       tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
+       tpl_result_t ret                   = TPL_ERROR_NONE;
+       int i;
 
-       wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)
-                                                        surface->display->backend.data;
-       if (wayland_vk_wsi_display == NULL) return;
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+       TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
 
-       if (wayland_vk_wsi_surface->tbm_queue)
-               __tpl_wl_vk_wsi_surface_destroy_swapchain(surface);
+       tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
 
-       TPL_LOG_T("WL_VK",
-                         "[FINI] wayland_vk_wsi_surface(%p) native_surface(%p) twe_surface(%p)",
-                         wayland_vk_wsi_surface, surface->native_handle,
-                         wayland_vk_wsi_surface->twe_surface);
+       if (!buffers) {
+               *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
+               tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+               return TPL_ERROR_NONE;
+       }
 
-       if (twe_surface_del(wayland_vk_wsi_surface->twe_surface)
-                       != TPL_ERROR_NONE) {
-               TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)",
-                               wayland_vk_wsi_surface->twe_surface,
-                               wayland_vk_wsi_display->wl_thread);
+       swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
+                                                                               *buffer_count,
+                                                                               sizeof(tbm_surface_h));
+       if (!swapchain->swapchain_buffers) {
+               TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
+                               *buffer_count);
+               tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+               return TPL_ERROR_OUT_OF_MEMORY;
+       }
+
+       ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
+                                                                                               swapchain->tbm_queue,
+                                                                                               swapchain->swapchain_buffers,
+                                                                                               buffer_count);
+       if (!ret) {
+               TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
+                               wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
+               free(swapchain->swapchain_buffers);
+               swapchain->swapchain_buffers = NULL;
+               tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       for (i = 0; i < *buffer_count; i++) {
+               if (swapchain->swapchain_buffers[i]) {
+                       TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)",
+                                         i, swapchain->swapchain_buffers[i],
+                                         _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
+                       tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
+               }
        }
 
-       wayland_vk_wsi_surface->twe_surface = NULL;
+       *buffers = swapchain->swapchain_buffers;
 
-       free(wayland_vk_wsi_surface);
-       surface->backend.data = NULL;
+       tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+
+       return TPL_ERROR_NONE;
 }
 
-static tpl_result_t
-__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface,
-               tbm_surface_h tbm_surface,
-               int num_rects, const int *rects,
-               tbm_fd sync_fence)
+static void
+__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
 {
+       tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
+       tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
 
-       TPL_ASSERT(surface);
-       TPL_ASSERT(surface->display);
-       TPL_ASSERT(surface->display->native_handle);
-       TPL_ASSERT(tbm_surface);
+       TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
+                        wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
 
-       tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface =
-               (tpl_wayland_vk_wsi_surface_t *) surface->backend.data;
-       tbm_surface_queue_error_e tsq_err;
+       tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
+       if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
+               wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
+               wl_vk_surface->buffer_cnt--;
 
-       if (!tbm_surface_internal_is_valid(tbm_surface)) {
-               TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
-                               tbm_surface);
-               return TPL_ERROR_INVALID_PARAMETER;
+               wl_vk_buffer->idx = -1;
        }
+       tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
 
-       if (sync_fence != -1) {
-               tpl_result_t res = TPL_ERROR_NONE;
-               res = twe_surface_set_sync_fd(wayland_vk_wsi_surface->twe_surface,
-                                                                         tbm_surface, sync_fence);
-               if (res != TPL_ERROR_NONE) {
-                       TPL_WARN("Failed to set sync_fd(%d). Fallback to async mode.",
-                                        sync_fence);
-               }
+       wl_display_flush(wl_vk_display->wl_display);
+
+       if (wl_vk_buffer->wl_buffer) {
+               wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
+                                                                                 wl_vk_buffer->wl_buffer);
+               wl_vk_buffer->wl_buffer = NULL;
        }
 
-       tsq_err = tbm_surface_queue_enqueue(wayland_vk_wsi_surface->tbm_queue,
-                                                                               tbm_surface);
-       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) {
-               tbm_surface_internal_unref(tbm_surface);
-       } else {
-               TPL_ERR("Failed to enqeueue tbm_surface. | tsq_err = %d", tsq_err);
-               return TPL_ERROR_INVALID_OPERATION;
+#if TIZEN_FEATURE_ENABLE
+       if (wl_vk_buffer->buffer_release) {
+               zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
+               wl_vk_buffer->buffer_release = NULL;
+       }
+#endif
+
+       if (wl_vk_buffer->release_fence_fd != -1) {
+               close(wl_vk_buffer->release_fence_fd);
+               wl_vk_buffer->release_fence_fd = -1;
+       }
+
+       if (wl_vk_buffer->rects) {
+               free(wl_vk_buffer->rects);
+               wl_vk_buffer->rects = NULL;
+               wl_vk_buffer->num_rects = 0;
        }
 
-       TPL_LOG_T("WL_VK", "[ENQ] tbm_surface(%p) bo(%d)",
-                         tbm_surface,
-                         tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)));
+       wl_vk_buffer->tbm_surface = NULL;
+       wl_vk_buffer->bo_name = -1;
 
-       return TPL_ERROR_NONE;
+       free(wl_vk_buffer);
 }
 
-static tpl_bool_t
-__tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface)
+static tpl_wl_vk_buffer_t *
+_get_wl_vk_buffer(tbm_surface_h tbm_surface)
 {
-       TPL_ASSERT(surface);
-       TPL_ASSERT(surface->backend.data);
+       tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
+       tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
+                                                                          (void **)&wl_vk_buffer);
+       return wl_vk_buffer;
+}
+
+static tpl_wl_vk_buffer_t *
+_wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
+                                         tbm_surface_h tbm_surface)
+{
+       tpl_wl_vk_buffer_t  *wl_vk_buffer  = NULL;
+
+       wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
+
+       if (!wl_vk_buffer) {
+               wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
+               TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
+
+               tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
+                                                                                  (tbm_data_free)__cb_wl_vk_buffer_free);
+               tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
+                                                                                  wl_vk_buffer);
+
+               wl_vk_buffer->wl_buffer                = NULL;
+               wl_vk_buffer->tbm_surface              = tbm_surface;
+               wl_vk_buffer->bo_name                  = _get_tbm_surface_bo_name(tbm_surface);
+               wl_vk_buffer->wl_vk_surface            = wl_vk_surface;
+
+               wl_vk_buffer->status                   = RELEASED;
+
+               wl_vk_buffer->acquire_fence_fd         = -1;
+               wl_vk_buffer->release_fence_fd         = -1;
+
+               wl_vk_buffer->dx                       = 0;
+               wl_vk_buffer->dy                       = 0;
+               wl_vk_buffer->width                    = tbm_surface_get_width(tbm_surface);
+               wl_vk_buffer->height                   = tbm_surface_get_height(tbm_surface);
+
+               wl_vk_buffer->rects                    = NULL;
+               wl_vk_buffer->num_rects                = 0;
+
+               wl_vk_buffer->need_to_commit = TPL_FALSE;
+#if TIZEN_FEATURE_ENABLE
+               wl_vk_buffer->buffer_release = NULL;
+#endif
+               tpl_gmutex_init(&wl_vk_buffer->mutex);
+               tpl_gcond_init(&wl_vk_buffer->cond);
+
+               tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
+               {
+                       int i;
+                       for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
+                               if (wl_vk_surface->buffers[i] == NULL) break;
+
+                       /* If this exception is reached,
+                        * it may be a critical memory leak problem. */
+                       if (i == BUFFER_ARRAY_SIZE) {
+                               tpl_wl_vk_buffer_t *evicted_buffer = NULL;
+                               int evicted_idx = 0; /* evict the frontmost buffer */
+
+                               evicted_buffer = wl_vk_surface->buffers[evicted_idx];
+
+                               TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
+                                                wl_vk_surface);
+                               TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
+                                                evicted_buffer, evicted_buffer->tbm_surface,
+                                                status_to_string[evicted_buffer->status]);
+
+                               /* [TODO] need to think about whether there will be
+                                * better modifications */
+                               wl_vk_surface->buffer_cnt--;
+                               wl_vk_surface->buffers[evicted_idx]      = NULL;
+
+                               i = evicted_idx;
+                       }
+
+                       wl_vk_surface->buffer_cnt++;
+                       wl_vk_surface->buffers[i]          = wl_vk_buffer;
+                       wl_vk_buffer->idx                  = i;
+               }
+               tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
 
-       tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface =
-               (tpl_wayland_vk_wsi_surface_t *)surface->backend.data;
+               TPL_INFO("[WL_VK_BUFFER_CREATE]",
+                                "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
+                                wl_vk_surface, wl_vk_buffer, tbm_surface,
+                                wl_vk_buffer->bo_name);
+       }
 
-       return !(wayland_vk_wsi_surface->reset);
+       return wl_vk_buffer;
 }
 
 static tbm_surface_h
-__tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface,
-                                                                                       uint64_t timeout_ns,
-                                                                                       tbm_fd *sync_fence)
+__tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
+                                                                  uint64_t timeout_ns,
+                                                                  int32_t *release_fence)
 {
        TPL_ASSERT(surface);
        TPL_ASSERT(surface->backend.data);
        TPL_ASSERT(surface->display);
-
-       tbm_surface_h tbm_surface = NULL;
-       tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface =
-               (tpl_wayland_vk_wsi_surface_t *)surface->backend.data;
-       tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display =
-               (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data;
-       tbm_surface_queue_error_e tsq_err = 0;
-       tpl_result_t lock_ret = TPL_ERROR_NONE;
-
-       if (sync_fence)
-               *sync_fence = -1;
-
-       /* After the can dequeue state, call twe_display_lock to prevent other
-        * events from being processed in wayland_egl_thread
-        * during below dequeue procedure. */
-       lock_ret = twe_display_lock(wayland_vk_wsi_display->twe_display);
-
-       if (!tbm_surface_queue_can_dequeue(wayland_vk_wsi_surface->tbm_queue, 0)) {
-               if (timeout_ns == 0) return NULL;
-               else {
-                       tpl_result_t res = TPL_ERROR_NONE;
-                       if (lock_ret == TPL_ERROR_NONE)
-                               twe_display_unlock(wayland_vk_wsi_display->twe_display);
-                       TPL_OBJECT_UNLOCK(surface);
-                       TRACE_BEGIN("WAIT_DEQUEUEABLE");
-                       res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface,
-                                                                                          timeout_ns);
-                       TRACE_END();
-                       TPL_OBJECT_LOCK(surface);
-                       lock_ret = twe_display_lock(wayland_vk_wsi_display->twe_display);
-
-                       if (res == TPL_ERROR_TIME_OUT) {
-                               TPL_ERR("Failed to get buffer during timeout_ns(%u)", timeout_ns);
-                               if (lock_ret == TPL_ERROR_NONE)
-                                       twe_display_unlock(wayland_vk_wsi_display->twe_display);
-                               return NULL;
-                       } else if (res != TPL_ERROR_NONE) {
-                               TPL_ERR("Invalid parameter. twe_surface(%p) timeout_ns(%u)",
-                                               wayland_vk_wsi_surface->twe_surface, timeout_ns);
-                               if (lock_ret == TPL_ERROR_NONE)
-                                       twe_display_unlock(wayland_vk_wsi_display->twe_display);
-                               return NULL;
-                       }
-               }
+       TPL_ASSERT(surface->display->backend.data);
+       TPL_OBJECT_CHECK_RETURN(surface, NULL);
+
+       tpl_wl_vk_surface_t *wl_vk_surface =
+               (tpl_wl_vk_surface_t *)surface->backend.data;
+       tpl_wl_vk_display_t *wl_vk_display =
+               (tpl_wl_vk_display_t *)surface->display->backend.data;
+       tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
+       tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
+
+       tbm_surface_h tbm_surface          = NULL;
+       tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
+
+       TPL_OBJECT_UNLOCK(surface);
+       TRACE_BEGIN("WAIT_DEQUEUEABLE");
+       if (timeout_ns != UINT64_MAX) {
+               tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
+                                               swapchain->tbm_queue, timeout_ns/1000);
+       } else {
+               tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
        }
+       TRACE_END();
+       TPL_OBJECT_LOCK(surface);
 
-       if (wayland_vk_wsi_surface->reset) {
-               TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.",
-                                 wayland_vk_wsi_surface->tbm_queue);
-               if (lock_ret == TPL_ERROR_NONE)
-                       twe_display_unlock(wayland_vk_wsi_display->twe_display);
+       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
+               TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
+                               timeout_ns);
+               return NULL;
+       } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
+                               wl_vk_surface, swapchain->tbm_queue, tsq_err);
                return NULL;
        }
 
+       tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
+
+       if (wl_vk_surface->reset) {
+               TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
+                                 swapchain, swapchain->tbm_queue);
+               tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+               return NULL;
+       }
 
-       tsq_err = tbm_surface_queue_dequeue(wayland_vk_wsi_surface->tbm_queue,
+       tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
                                                                                &tbm_surface);
        if (!tbm_surface) {
-               TPL_ERR("Failed to get tbm_surface from tbm_surface_queue | tsq_err = %d",
-                               tsq_err);
-               if (lock_ret == TPL_ERROR_NONE)
-                       twe_display_unlock(wayland_vk_wsi_display->twe_display);
+               TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
+                               swapchain->tbm_queue, wl_vk_surface, tsq_err);
+               tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
                return NULL;
        }
 
        tbm_surface_internal_ref(tbm_surface);
 
-       if (sync_fence) {
-               *sync_fence = twe_surface_create_sync_fd(tbm_surface);
+       wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
+       TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
+
+       tpl_gmutex_lock(&wl_vk_buffer->mutex);
+       wl_vk_buffer->status = DEQUEUED;
+
+       if (release_fence) {
+#if TIZEN_FEATURE_ENABLE
+               if (wl_vk_surface->surface_sync) {
+                       *release_fence = wl_vk_buffer->release_fence_fd;
+                       TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
+                                         wl_vk_surface, wl_vk_buffer, *release_fence);
+                       wl_vk_buffer->release_fence_fd = -1;
+               } else
+#endif
+               {
+                       *release_fence = -1;
+               }
        }
 
-       TPL_LOG_T("WL_VK", "[DEQ] tbm_surface(%p) bo(%d)",
-                         tbm_surface,
-                         tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)));
+       wl_vk_surface->reset = TPL_FALSE;
+
+       TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+                         wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
+                         release_fence ? *release_fence : -1);
 
-       if (lock_ret == TPL_ERROR_NONE)
-               twe_display_unlock(wayland_vk_wsi_display->twe_display);
+       tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+       tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
 
        return tbm_surface;
 }
 
 static tpl_result_t
-__tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface,
-               tbm_surface_h **buffers,
-               int *buffer_count)
+__tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
+                                                                         tbm_surface_h tbm_surface)
 {
-       tbm_surface_h buffer = NULL;
-       tbm_surface_h *swapchain_buffers = NULL;
-       tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
-       tbm_surface_queue_error_e tsq_err;
-       int i, dequeue_count;
-       tpl_result_t ret = TPL_ERROR_NONE;
-
        TPL_ASSERT(surface);
        TPL_ASSERT(surface->backend.data);
+
+       tpl_wl_vk_surface_t *wl_vk_surface  =
+               (tpl_wl_vk_surface_t *)surface->backend.data;
+       tpl_wl_vk_swapchain_t *swapchain    = NULL;
+       tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
+       tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
+                                                                 TPL_ERROR_INVALID_PARAMETER);
+
+       swapchain = wl_vk_surface->swapchain;
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
+                                                                TPL_ERROR_INVALID_PARAMETER);
+
+       wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
+       if (wl_vk_buffer) {
+               tpl_gmutex_lock(&wl_vk_buffer->mutex);
+               wl_vk_buffer->status = RELEASED;
+               tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+       }
+
+       tbm_surface_internal_unref(tbm_surface);
+
+       TPL_INFO("[CANCEL BUFFER]",
+                        "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
+                         wl_vk_surface, swapchain, tbm_surface,
+                         _get_tbm_surface_bo_name(tbm_surface));
+
+       tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
+                                                                                          tbm_surface);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
+                                                                          tbm_surface_h tbm_surface,
+                                                                          int num_rects, const int *rects,
+                                                                          int32_t acquire_fence)
+{
+       TPL_ASSERT(surface);
        TPL_ASSERT(surface->display);
-       TPL_ASSERT(buffers);
-       TPL_ASSERT(buffer_count);
-
-       wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data;
-       swapchain_buffers = (tbm_surface_h *)calloc(
-                                                       wayland_vk_wsi_surface->buffer_count, sizeof(tbm_surface_h));
-       if (!swapchain_buffers) {
-               TPL_ERR("Failed to allocate memory for buffers.");
-               return TPL_ERROR_OUT_OF_MEMORY;
+       TPL_ASSERT(surface->backend.data);
+       TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
+
+       tpl_wl_vk_surface_t *wl_vk_surface  =
+               (tpl_wl_vk_surface_t *) surface->backend.data;
+       tpl_wl_vk_swapchain_t *swapchain    = wl_vk_surface->swapchain;
+       tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
+       tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
+       int bo_name                         = -1;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+       TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
+       TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
+                                                                 TPL_ERROR_INVALID_PARAMETER);
+
+       wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
+       if (!wl_vk_buffer) {
+               TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
        }
 
-       for (i = 0 ; i < wayland_vk_wsi_surface->buffer_count ; i++) {
-               tsq_err = tbm_surface_queue_dequeue(wayland_vk_wsi_surface->tbm_queue, &buffer);
-               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
-                       TPL_ERR("Failed to get tbm_surface from tbm_surface_queue | tsq_err = %d",
-                                       tsq_err);
-                       dequeue_count = i;
-                       ret = TPL_ERROR_OUT_OF_MEMORY;
-                       goto get_buffer_fail;
+       bo_name = wl_vk_buffer->bo_name;
+
+       tpl_gmutex_lock(&wl_vk_buffer->mutex);
+
+       /* If there are received region information, save it to wl_vk_buffer */
+       if (num_rects && rects) {
+               if (wl_vk_buffer->rects != NULL) {
+                       free(wl_vk_buffer->rects);
+                       wl_vk_buffer->rects = NULL;
+                       wl_vk_buffer->num_rects = 0;
                }
-               swapchain_buffers[i] = buffer;
-               TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p)", i, buffer);
-       }
 
-       for (i = 0 ; i < wayland_vk_wsi_surface->buffer_count ; i++) {
-               tsq_err = tbm_surface_queue_release(wayland_vk_wsi_surface->tbm_queue,
-                                                                                       swapchain_buffers[i]);
-               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
-                       TPL_ERR("Failed to release tbm_surface. | tsq_err = %d", tsq_err);
-                       ret = TPL_ERROR_INVALID_OPERATION;
-                       goto release_buffer_fail;
+               wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
+               wl_vk_buffer->num_rects = num_rects;
+
+               if (wl_vk_buffer->rects) {
+                       memcpy((char *)wl_vk_buffer->rects, (char *)rects,
+                                  sizeof(int) * 4 * num_rects);
+               } else {
+                       TPL_ERR("Failed to allocate memory for rects info.");
                }
        }
 
-       *buffers = swapchain_buffers;
-       *buffer_count = wayland_vk_wsi_surface->buffer_count;
+       if (wl_vk_buffer->acquire_fence_fd != -1)
+               close(wl_vk_buffer->acquire_fence_fd);
+
+       wl_vk_buffer->acquire_fence_fd = acquire_fence;
+
+       wl_vk_buffer->status = ENQUEUED;
+       TPL_LOG_T("WL_VK",
+                         "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
+                         wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
+
+       tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+       tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
+                                                                               tbm_surface);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               tbm_surface_internal_unref(tbm_surface);
+               TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
+                               tbm_surface, wl_vk_surface, tsq_err);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       tbm_surface_internal_unref(tbm_surface);
+
        return TPL_ERROR_NONE;
+}
+
+static const struct wl_buffer_listener wl_buffer_release_listener = {
+       (void *)__cb_wl_buffer_release,
+};
+
+static tpl_result_t
+_thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+       tbm_surface_h tbm_surface            = NULL;
+       tbm_surface_queue_error_e tsq_err    = TBM_SURFACE_QUEUE_ERROR_NONE;
+       tpl_wl_vk_display_t *wl_vk_display   = wl_vk_surface->wl_vk_display;
+       tpl_wl_vk_swapchain_t *swapchain     = wl_vk_surface->swapchain;
+       tpl_wl_vk_buffer_t *wl_vk_buffer     = NULL;
+       tpl_bool_t ready_to_commit           = TPL_TRUE;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+
+       while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
+               tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
+                                                                                       &tbm_surface);
+               if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+                       TPL_ERR("Failed to acquire from tbm_queue(%p)",
+                                       swapchain->tbm_queue);
+                       return TPL_ERROR_INVALID_OPERATION;
+               }
+
+               tbm_surface_internal_ref(tbm_surface);
 
-get_buffer_fail:
-       for (i = 0 ; i < dequeue_count ; i++) {
-               tsq_err = tbm_surface_queue_release(wayland_vk_wsi_surface->tbm_queue,
-                                                                                       swapchain_buffers[i]);
-               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
-                       TPL_ERR("Failed to release tbm_surface. | tsq_err = %d", tsq_err);
-                       goto release_buffer_fail;
+               wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
+               TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
+                                                                          "wl_vk_buffer sould be not NULL");
+
+               tpl_gmutex_lock(&wl_vk_buffer->mutex);
+
+               wl_vk_buffer->status = ACQUIRED;
+
+               TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
+                                 wl_vk_buffer, tbm_surface,
+                                 _get_tbm_surface_bo_name(tbm_surface));
+
+               if (wl_vk_buffer->wl_buffer == NULL) {
+                       wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
+                                               wl_vk_display->wl_tbm_client, tbm_surface);
+
+                       if (!wl_vk_buffer->wl_buffer) {
+                               TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
+                                                wl_vk_display->wl_tbm_client, tbm_surface);
+                       } else {
+                               if (wl_vk_buffer->acquire_fence_fd == -1 ||
+                                       wl_vk_display->use_explicit_sync == TPL_FALSE) {
+                                       wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
+                                                                                  &wl_buffer_release_listener, wl_vk_buffer);
+                               }
+
+                               TPL_LOG_T("WL_VK",
+                                                 "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
+                                                 wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
+                       }
+               }
+
+               if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done)
+                       ready_to_commit = TPL_TRUE;
+               else {
+                       wl_vk_buffer->status = WAITING_VBLANK;
+                       __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
+                       ready_to_commit = TPL_FALSE;
                }
+
+               tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+               if (ready_to_commit)
+                       _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
        }
 
-release_buffer_fail:
-       free(swapchain_buffers);
-       return ret;
+       return TPL_ERROR_NONE;
 }
 
+#if TIZEN_FEATURE_ENABLE
 static void
-__cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue,
-                                                          void *data)
+__cb_buffer_fenced_release(void *data,
+                                                  struct zwp_linux_buffer_release_v1 *release,
+                                                  int32_t fence)
 {
-       tpl_surface_t *surface = NULL;
-       tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
-       tpl_bool_t is_activated = TPL_FALSE;
+       tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
+       tbm_surface_h tbm_surface         = NULL;
 
-       surface = (tpl_surface_t *)data;
-       TPL_CHECK_ON_NULL_RETURN(surface);
+       TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
 
-       wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data;
-       TPL_CHECK_ON_NULL_RETURN(wayland_vk_wsi_surface);
+       tbm_surface = wl_vk_buffer->tbm_surface;
 
-       /* When queue_reset_callback is called, if is_activated is different from
-        * its previous state change the reset flag to TPL_TRUE to get a new buffer
-        * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
-       is_activated = twe_surface_check_activated(wayland_vk_wsi_surface->twe_surface);
+       if (tbm_surface_internal_is_valid(tbm_surface)) {
+               tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
+               tpl_wl_vk_swapchain_t *swapchain   = NULL;
+
+               if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
+                       TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
+                       tbm_surface_internal_unref(tbm_surface);
+                       return;
+               }
+
+               swapchain = wl_vk_surface->swapchain;
+
+               tpl_gmutex_lock(&wl_vk_buffer->mutex);
+               if (wl_vk_buffer->status == COMMITTED) {
+                       tbm_surface_queue_error_e tsq_err;
+
+                       zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
+                       wl_vk_buffer->buffer_release = NULL;
+
+                       wl_vk_buffer->release_fence_fd = fence;
+                       wl_vk_buffer->status = RELEASED;
+
+                       TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
+                                          wl_vk_buffer->bo_name,
+                                          fence);
+                       TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
+                                                       wl_vk_buffer->bo_name);
 
-       if (wayland_vk_wsi_surface->is_activated != is_activated) {
-               if (is_activated) {
-                       TPL_LOG_T("WL_VK",
-                                         "[ACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)",
-                                         wayland_vk_wsi_surface, surface_queue);
-               } else {
                        TPL_LOG_T("WL_VK",
-                                         "[DEACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)",
-                                         wayland_vk_wsi_surface, surface_queue);
+                                         "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+                                         wl_vk_buffer, tbm_surface,
+                                         wl_vk_buffer->bo_name,
+                                         fence);
+
+                       tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
+                                                                                               tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+                       tbm_surface_internal_unref(tbm_surface);
                }
-               wayland_vk_wsi_surface->is_activated = is_activated;
+
+               tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+       } else {
+               TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
        }
+}
+
+static void
+__cb_buffer_immediate_release(void *data,
+                                                         struct zwp_linux_buffer_release_v1 *release)
+{
+       tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
+       tbm_surface_h tbm_surface           = NULL;
 
-       wayland_vk_wsi_surface->reset = TPL_TRUE;
+       TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
 
-       if (surface->reset_cb)
-               surface->reset_cb(surface->reset_data);
+       tbm_surface = wl_vk_buffer->tbm_surface;
+
+       if (tbm_surface_internal_is_valid(tbm_surface)) {
+               tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
+               tpl_wl_vk_swapchain_t *swapchain   = NULL;
+
+               if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
+                       TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
+                       tbm_surface_internal_unref(tbm_surface);
+                       return;
+               }
+
+               swapchain = wl_vk_surface->swapchain;
+
+               tpl_gmutex_lock(&wl_vk_buffer->mutex);
+               if (wl_vk_buffer->status == COMMITTED) {
+                       tbm_surface_queue_error_e tsq_err;
+
+                       zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
+                       wl_vk_buffer->buffer_release = NULL;
+
+                       wl_vk_buffer->release_fence_fd = -1;
+                       wl_vk_buffer->status = RELEASED;
+
+                       TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
+                                          _get_tbm_surface_bo_name(tbm_surface));
+                       TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
+                                                       _get_tbm_surface_bo_name(tbm_surface));
+
+                       TPL_LOG_T("WL_VK",
+                                         "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
+                                         wl_vk_buffer, tbm_surface,
+                                         _get_tbm_surface_bo_name(tbm_surface));
+
+                       tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
+                                                                                               tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+                       tbm_surface_internal_unref(tbm_surface);
+               }
+
+               tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+       } else {
+               TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+       }
 }
 
-static tpl_result_t
-__tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface,
-               tbm_format format, int width,
-               int height, int buffer_count, int present_mode)
+static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
+       __cb_buffer_fenced_release,
+       __cb_buffer_immediate_release,
+};
+#endif
+
+static void
+__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
 {
-       tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
-       tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
-       tpl_result_t res = TPL_ERROR_NONE;
+       tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
+       tbm_surface_h tbm_surface = NULL;
 
-       TPL_ASSERT(surface);
-       TPL_ASSERT(surface->backend.data);
-       TPL_ASSERT(surface->display);
+       TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
 
-       wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data;
-       TPL_ASSERT(wayland_vk_wsi_surface);
+       tbm_surface = wl_vk_buffer->tbm_surface;
 
-       wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)
-                                                        surface->display->backend.data;
-       TPL_ASSERT(wayland_vk_wsi_display);
-
-       if (wayland_vk_wsi_surface->tbm_queue) {
-               TPL_LOG_T("WL_VK", "[REUSE] wayland_vk_wsi_surface(%p) tbm_queue(%p) size(%d)",
-                                 wayland_vk_wsi_surface, wayland_vk_wsi_surface->tbm_queue,
-                                 wayland_vk_wsi_surface->buffer_count);
-               wayland_vk_wsi_surface->buffer_count =
-                       tbm_surface_queue_get_size(wayland_vk_wsi_surface->tbm_queue);
-               wayland_vk_wsi_surface->reset = TPL_FALSE;
-               return TPL_ERROR_NONE;
+       if (tbm_surface_internal_is_valid(tbm_surface)) {
+               tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
+               tpl_wl_vk_swapchain_t *swapchain   = NULL;
+               tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+
+               if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
+                       TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
+                       tbm_surface_internal_unref(tbm_surface);
+                       return;
+               }
+
+               swapchain = wl_vk_surface->swapchain;
+
+               tpl_gmutex_lock(&wl_vk_buffer->mutex);
+
+               if (wl_vk_buffer->status == COMMITTED) {
+
+                       tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
+                                                                                               tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+                       wl_vk_buffer->status = RELEASED;
+
+                       TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
+                       TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
+                                                       wl_vk_buffer->bo_name);
+
+                       TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+                                         wl_vk_buffer->wl_buffer, tbm_surface,
+                                         wl_vk_buffer->bo_name);
+
+                       tbm_surface_internal_unref(tbm_surface);
+               }
+
+               tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+       } else {
+               TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
        }
+}
+
+static void
+__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
+                                          unsigned int sequence, unsigned int tv_sec,
+                                          unsigned int tv_usec, void *user_data)
+{
+       tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
+       tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
+
+       TRACE_ASYNC_END((int)wl_vk_surface, "WAIT_VBLANK");
+       TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface);
+
+       if (error == TDM_ERROR_TIMEOUT)
+               TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
+                                wl_vk_surface);
+
+       wl_vk_surface->vblank_done = TPL_TRUE;
 
-       res = twe_surface_create_swapchain(wayland_vk_wsi_surface->twe_surface,
-                                                                          width, height, format,
-                                                                          buffer_count, present_mode);
-       if (res != TPL_ERROR_NONE) {
-               TPL_ERR("Failed to create swapchain. twe_surface(%p)",
-                               wayland_vk_wsi_surface->twe_surface);
-               return res;
+       tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+       wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front(
+                                               wl_vk_surface->vblank_waiting_buffers,
+                                               NULL);
+       if (wl_vk_buffer)
+               _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
+       tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+}
+
+static tpl_result_t
+_thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+       tdm_error tdm_err                     = TDM_ERROR_NONE;
+       tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
+
+       if (wl_vk_surface->vblank == NULL) {
+               wl_vk_surface->vblank =
+                       _thread_create_tdm_client_vblank(wl_vk_display->tdm_client);
+               if (!wl_vk_surface->vblank) {
+                       TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
+                                        wl_vk_surface);
+                       return TPL_ERROR_OUT_OF_MEMORY;
+               }
        }
 
-       wayland_vk_wsi_surface->tbm_queue = twe_surface_get_tbm_queue(
-                       wayland_vk_wsi_surface->twe_surface);
+       tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
+                       wl_vk_surface->post_interval,
+                       __cb_tdm_client_vblank,
+                       (void *)wl_vk_surface);
 
-       /* Set reset_callback to tbm_queue */
-       if (tbm_surface_queue_add_reset_cb(wayland_vk_wsi_surface->tbm_queue,
-                                  __cb_tbm_queue_reset_callback,
-                                  (void *)surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
-               TPL_ERR("TBM surface queue add reset cb failed!");
-               twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface);
-               wayland_vk_wsi_surface->tbm_queue = NULL;
+       if (tdm_err == TDM_ERROR_NONE) {
+               wl_vk_surface->vblank_done = TPL_FALSE;
+               TRACE_ASYNC_BEGIN((int)wl_vk_surface, "WAIT_VBLANK");
+       } else {
+               TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
                return TPL_ERROR_INVALID_OPERATION;
        }
 
-       wayland_vk_wsi_surface->buffer_count = buffer_count;
-       wayland_vk_wsi_surface->reset = TPL_FALSE;
-
        return TPL_ERROR_NONE;
 }
 
-static tpl_result_t
-__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface)
+static void
+_thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
+                                                 tpl_wl_vk_buffer_t *wl_vk_buffer)
 {
-       tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
-       tpl_result_t res = TPL_ERROR_NONE;
+       tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
+       struct wl_surface *wl_surface         = wl_vk_surface->wl_surface;
+       uint32_t version;
+
+       TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
+                                                                  "wl_vk_buffer sould be not NULL");
+
+       if (wl_vk_buffer->wl_buffer == NULL) {
+               wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
+                                                                                       wl_vk_display->wl_tbm_client,
+                                                                                       wl_vk_buffer->tbm_surface);
+               if (wl_vk_buffer->wl_buffer &&
+                       (wl_vk_buffer->acquire_fence_fd == -1 ||
+                        wl_vk_display->use_explicit_sync == TPL_FALSE)) {
+                               wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
+                                                                          &wl_buffer_release_listener, wl_vk_buffer);
+               }
+       }
+       TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
+                                                                  "[FATAL] Failed to create wl_buffer");
 
-       TPL_ASSERT(surface);
-       TPL_ASSERT(surface->backend.data);
-       TPL_ASSERT(surface->display);
+       version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
 
-       wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data;
-       TPL_ASSERT(wayland_vk_wsi_surface);
+       wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
+                                         wl_vk_buffer->dx, wl_vk_buffer->dy);
 
-       if (wayland_vk_wsi_surface->reset) {
-               TPL_LOG_T("WL_VK",
-                                 "Since reset is in the TRUE state, it will not be destroyed.");
-               return TPL_ERROR_NONE;
+       if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
+               if (version < 4) {
+                       wl_surface_damage(wl_surface,
+                                                         wl_vk_buffer->dx, wl_vk_buffer->dy,
+                                                         wl_vk_buffer->width, wl_vk_buffer->height);
+               } else {
+                       wl_surface_damage_buffer(wl_surface,
+                                                                        0, 0,
+                                                                        wl_vk_buffer->width, wl_vk_buffer->height);
+               }
+       } else {
+               int i;
+               for (i = 0; i < wl_vk_buffer->num_rects; i++) {
+                       int inverted_y =
+                               wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
+                                               wl_vk_buffer->rects[i * 4 + 3]);
+                       if (version < 4) {
+                               wl_surface_damage(wl_surface,
+                                                                 wl_vk_buffer->rects[i * 4 + 0],
+                                                                 inverted_y,
+                                                                 wl_vk_buffer->rects[i * 4 + 2],
+                                                                 wl_vk_buffer->rects[i * 4 + 3]);
+                       } else {
+                               wl_surface_damage_buffer(wl_surface,
+                                                                                wl_vk_buffer->rects[i * 4 + 0],
+                                                                                inverted_y,
+                                                                                wl_vk_buffer->rects[i * 4 + 2],
+                                                                                wl_vk_buffer->rects[i * 4 + 3]);
+                       }
+               }
        }
 
-       res = twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface);
-       if (res != TPL_ERROR_NONE) {
-               TPL_ERR("Failed to destroy swapchain. twe_surface(%p)",
-                               wayland_vk_wsi_surface->twe_surface);
-               return res;
+#if TIZEN_FEATURE_ENABLE
+       if (wl_vk_display->use_explicit_sync &&
+               wl_vk_surface->surface_sync &&
+               wl_vk_buffer->acquire_fence_fd != -1) {
+
+               zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
+                                                                                                                          wl_vk_buffer->acquire_fence_fd);
+               TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
+                                 wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
+               close(wl_vk_buffer->acquire_fence_fd);
+               wl_vk_buffer->acquire_fence_fd = -1;
+
+               wl_vk_buffer->buffer_release =
+                       zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
+               if (!wl_vk_buffer->buffer_release) {
+                       TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
+               } else {
+                       zwp_linux_buffer_release_v1_add_listener(
+                               wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
+                       TPL_DEBUG("add explicit_sync_release_listener.");
+               }
        }
+#endif
 
-       wayland_vk_wsi_surface->tbm_queue = NULL;
+       wl_surface_commit(wl_surface);
 
-       return TPL_ERROR_NONE;
+       wl_display_flush(wl_vk_display->wl_display);
+
+       TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
+                                         wl_vk_buffer->bo_name);
+
+       tpl_gmutex_lock(&wl_vk_buffer->mutex);
+
+       wl_vk_buffer->need_to_commit   = TPL_FALSE;
+       wl_vk_buffer->status           = COMMITTED;
+
+       tpl_gcond_signal(&wl_vk_buffer->cond);
+
+       tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+       TPL_LOG_T("WL_VK",
+                         "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
+                         wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
+                         wl_vk_buffer->bo_name);
+
+       if (wl_vk_display->use_wait_vblank &&
+               _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
+               TPL_ERR("Failed to set wait vblank.");
 }
 
 tpl_bool_t
@@ -680,45 +2577,76 @@ __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
 {
        if (!native_dpy) return TPL_FALSE;
 
-       if (twe_check_native_handle_is_wl_display(native_dpy))
+       if (_check_native_handle_is_wl_display(native_dpy))
                return TPL_TRUE;
 
        return TPL_FALSE;
 }
 
 void
-__tpl_display_init_backend_wl_vk_wsi_thread(tpl_display_backend_t *backend)
+__tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
 {
        TPL_ASSERT(backend);
 
        backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
        backend->data = NULL;
 
-       backend->init = __tpl_wl_vk_wsi_display_init;
-       backend->fini = __tpl_wl_vk_wsi_display_fini;
-       backend->query_config = __tpl_wl_vk_wsi_display_query_config;
-       backend->filter_config = __tpl_wl_vk_wsi_display_filter_config;
+       backend->init = __tpl_wl_vk_display_init;
+       backend->fini = __tpl_wl_vk_display_fini;
+       backend->query_config = __tpl_wl_vk_display_query_config;
+       backend->filter_config = __tpl_wl_vk_display_filter_config;
        backend->query_window_supported_buffer_count =
-               __tpl_wl_vk_wsi_display_query_window_supported_buffer_count;
+               __tpl_wl_vk_display_query_window_supported_buffer_count;
        backend->query_window_supported_present_modes =
-               __tpl_wl_vk_wsi_display_query_window_supported_present_modes;
+               __tpl_wl_vk_display_query_window_supported_present_modes;
 }
 
 void
-__tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend)
+__tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
 {
        TPL_ASSERT(backend);
 
        backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
        backend->data = NULL;
 
-       backend->init = __tpl_wl_vk_wsi_surface_init;
-       backend->fini = __tpl_wl_vk_wsi_surface_fini;
-       backend->validate = __tpl_wl_vk_wsi_surface_validate;
-       backend->dequeue_buffer = __tpl_wl_vk_wsi_surface_dequeue_buffer;
-       backend->enqueue_buffer = __tpl_wl_vk_wsi_surface_enqueue_buffer;
+       backend->init = __tpl_wl_vk_surface_init;
+       backend->fini = __tpl_wl_vk_surface_fini;
+       backend->validate = __tpl_wl_vk_surface_validate;
+       backend->cancel_dequeued_buffer =
+               __tpl_wl_vk_surface_cancel_buffer;
+       backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
+       backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
        backend->get_swapchain_buffers =
-               __tpl_wl_vk_wsi_surface_get_swapchain_buffers;
-       backend->create_swapchain = __tpl_wl_vk_wsi_surface_create_swapchain;
-       backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain;
+               __tpl_wl_vk_surface_get_swapchain_buffers;
+       backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
+       backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
+       backend->set_post_interval =
+               __tpl_wl_vk_surface_set_post_interval;
+}
+
+static int
+_get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
+{
+       return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+}
+
+static void
+_print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+       int idx = 0;
+
+       tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
+       TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
+                        wl_vk_surface, wl_vk_surface->buffer_cnt);
+       for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
+               tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
+               if (wl_vk_buffer) {
+                       TPL_INFO("[INFO]",
+                                        "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
+                                        idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
+                                        wl_vk_buffer->bo_name,
+                                        status_to_string[wl_vk_buffer->status]);
+               }
+       }
+       tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
 }