tpl_tbm: Deleted unncessary codes related with vulkan/worker_thread 51/228251/1
authorJoonbum Ko <joonbum.ko@samsung.com>
Fri, 20 Mar 2020 03:31:46 +0000 (12:31 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Fri, 20 Mar 2020 03:31:50 +0000 (12:31 +0900)
 - vulkan's tbm backend is not available.
 - Currently, tbm backend in libtpl-egl means offscreen render backend
  for wayland-server compositor.
 - It can be extended, but delete unnecessary code before that.

Change-Id: I69f52f623430ed0bb18da6af5d5424b5712df68b
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_tbm.c

index 43dee96..b38c7c5 100644 (file)
 #include <tbm_surface_queue.h>
 #include <tbm_dummy_display.h>
 
-#include "tpl_worker_thread.h"
-#include <pthread.h>
-#include <time.h>
 
 typedef struct _tpl_tbm_display tpl_tbm_display_t;
 typedef struct _tpl_tbm_surface tpl_tbm_surface_t;
-typedef struct _tpl_tbm_buffer tpl_tbm_buffer_t;
 
 struct _tpl_tbm_display {
        int need_dpy_deinit;
@@ -24,70 +20,9 @@ struct _tpl_tbm_display {
 };
 
 struct _tpl_tbm_surface {
-       /* tbm_surface list */
-       tpl_list_t vblank_list;
-       pthread_mutex_t vblank_list_mutex;
-
-       tpl_list_t draw_waiting_queue;
-       pthread_mutex_t draw_waiting_mutex;
-
-       tpl_bool_t vblank_done;
-
-       tpl_worker_surface_t worker_surface;
-
-       tpl_bool_t need_worker_clear;
-       int present_mode;
-
-       tpl_bool_t need_reset;
-};
-
-struct _tpl_tbm_buffer {
-       tbm_fd wait_sync;
+       tbm_surface_queue_h tbm_queue;
 };
 
-static int tpl_tbm_buffer_key;
-#define KEY_tpl_tbm_buffer  (unsigned long)(&tpl_tbm_buffer_key)
-
-static void
-__tpl_tbm_buffer_free(tpl_tbm_buffer_t *tbm_buffer)
-{
-       TPL_ASSERT(tbm_buffer);
-       if (tbm_buffer->wait_sync != -1)
-               close(tbm_buffer->wait_sync);
-       free(tbm_buffer);
-}
-
-static void
-__tpl_tbm_buffer_remove_from_list(void *data)
-{
-       tbm_surface_h tbm_surface = data;
-       tbm_surface_internal_unref(tbm_surface);
-}
-
-static TPL_INLINE tpl_tbm_buffer_t *
-__tpl_tbm_get_tbm_buffer_from_tbm_surface(tbm_surface_h surface)
-{
-       tpl_tbm_buffer_t *buf = NULL;
-
-       if (!tbm_surface_internal_is_valid(surface))
-               return NULL;
-
-       tbm_surface_internal_get_user_data(surface, KEY_tpl_tbm_buffer,
-                                                                          (void **)&buf);
-       return buf;
-}
-
-static TPL_INLINE void
-__tpl_tbm_set_tbm_buffer_to_tbm_surface(tbm_surface_h surface,
-                                                                               tpl_tbm_buffer_t *buf)
-{
-       tbm_surface_internal_add_user_data(surface,
-                                                                          KEY_tpl_tbm_buffer,
-                                                                          (tbm_data_free)__tpl_tbm_buffer_free);
-       tbm_surface_internal_set_user_data(surface,
-                                                                          KEY_tpl_tbm_buffer, buf);
-}
-
 static tpl_result_t
 __tpl_tbm_display_init(tpl_display_t *display)
 {
@@ -230,223 +165,6 @@ __tpl_tbm_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
        return (tbm_surface_h)pixmap;
 }
 
-static void
-__tpl_tbm_surface_queue_notify_cb(tbm_surface_queue_h surface_queue, void *data)
-{
-       /* Do something */
-}
-
-static void
-__tpl_tbm_draw_done(tpl_surface_t *surface, tbm_surface_h tbm_surface,
-                                       tpl_result_t result)
-{
-       tpl_tbm_surface_t *tpl_tbm_surface = NULL;
-       tpl_tbm_buffer_t *tpl_tbm_buffer = NULL;
-       tbm_surface_queue_h tbm_queue = NULL;
-
-       TPL_ASSERT(surface);
-       TPL_ASSERT(tbm_surface);
-       TPL_ASSERT(tbm_surface_internal_is_valid(tbm_surface));
-
-       tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data;
-       tpl_tbm_buffer = __tpl_tbm_get_tbm_buffer_from_tbm_surface(tbm_surface);
-       tbm_queue = (tbm_surface_queue_h)surface->native_handle;
-
-       TPL_ASSERT(tpl_tbm_surface);
-       TPL_ASSERT(tpl_tbm_buffer);
-       TPL_ASSERT(tbm_queue);
-
-       close(tpl_tbm_buffer->wait_sync);
-       tpl_tbm_buffer->wait_sync = -1;
-
-       /* if server supported current supported mode then just send */
-
-       if (tpl_tbm_surface->present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO) {
-               pthread_mutex_lock(&tpl_tbm_surface->vblank_list_mutex);
-               /* unref in tpl list remove callback
-                  (__tpl_tbm_buffer_remove_from_list) */
-               tbm_surface_internal_ref(tbm_surface);
-               __tpl_list_push_back(&tpl_tbm_surface->vblank_list, tbm_surface);
-               pthread_mutex_unlock(&tpl_tbm_surface->vblank_list_mutex);
-       } else if (tpl_tbm_surface->present_mode ==
-                          TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED &&
-                          tpl_tbm_surface->vblank_done == TPL_FALSE) {
-               /* if can't process previous vblank event, send buffer immediately */
-               pthread_mutex_lock(&tpl_tbm_surface->vblank_list_mutex);
-               /* unref in tpl list remove callback
-                  (__tpl_tbm_buffer_remove_from_list) */
-               tbm_surface_internal_ref(tbm_surface);
-               __tpl_list_push_back(&tpl_tbm_surface->vblank_list, tbm_surface);
-               tpl_tbm_surface->vblank_done = TPL_TRUE;
-               pthread_mutex_unlock(&tpl_tbm_surface->vblank_list_mutex);
-       } else {
-               tbm_surface_internal_unref(tbm_surface);
-               if (tbm_surface_queue_enqueue(tbm_queue,
-                                                                         tbm_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
-                       TPL_ERR("tbm_surface_queue_enqueue failed. tbm_queue(%p) tbm_surface(%p)",
-                                       tbm_queue, tbm_surface);
-               }
-       }
-}
-
-static int
-__tpl_tbm_draw_wait_fd_get(tpl_surface_t *surface, tbm_surface_h tbm_surface)
-{
-       tpl_tbm_buffer_t *tpl_tbm_buffer;
-
-       TPL_ASSERT(tbm_surface);
-       TPL_ASSERT(tbm_surface_internal_is_valid(tbm_surface));
-
-       tpl_tbm_buffer = __tpl_tbm_get_tbm_buffer_from_tbm_surface(tbm_surface);
-       return tpl_tbm_buffer->wait_sync;
-}
-
-static void
-__tpl_tbm_vblank(tpl_surface_t *surface, unsigned int sequence,
-                                unsigned int tv_sec,
-                                unsigned int tv_usec)
-{
-       tpl_tbm_surface_t *tpl_tbm_surface;
-       tbm_surface_h tbm_surface;
-
-       TPL_ASSERT(surface);
-
-       tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data;
-
-       TPL_ASSERT(tpl_tbm_surface);
-
-       if ((tpl_tbm_surface->present_mode &
-                       (TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED)) == 0)
-               return;
-
-       pthread_mutex_lock(&tpl_tbm_surface->vblank_list_mutex);
-       tbm_surface = __tpl_list_pop_front(&tpl_tbm_surface->vblank_list,
-                                                                          __tpl_tbm_buffer_remove_from_list);
-       pthread_mutex_unlock(&tpl_tbm_surface->vblank_list_mutex);
-
-       if (tbm_surface_internal_is_valid(tbm_surface)) {
-               tbm_surface_queue_h tbm_queue = (tbm_surface_queue_h)surface->native_handle;
-               if (tbm_surface_queue_enqueue(tbm_queue,
-                                                                         tbm_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
-                       TPL_ERR("tbm_surface_queue_enqueue failed. tbm_queue(%p) tbm_surface(%p)",
-                                       tbm_queue, tbm_surface);
-               }
-               tpl_tbm_surface->vblank_done = TPL_TRUE;
-       } else {
-               tpl_tbm_surface->vblank_done = TPL_FALSE;
-       }
-
-}
-
-static tbm_surface_h
-__tpl_tbm_draw_wait_buffer_get(tpl_surface_t *surface)
-{
-       tpl_tbm_surface_t *tpl_tbm_surface;
-       tbm_surface_h tbm_surface;
-
-       tpl_tbm_surface = surface->backend.data;
-       pthread_mutex_lock(&tpl_tbm_surface->draw_waiting_mutex);
-       tbm_surface = __tpl_list_pop_front(&tpl_tbm_surface->draw_waiting_queue, NULL);
-       pthread_mutex_unlock(&tpl_tbm_surface->draw_waiting_mutex);
-
-       return tbm_surface;
-}
-
-static tpl_result_t
-__tpl_tbm_surface_create_swapchain(tpl_surface_t *surface,
-                                                                  tbm_format format, int width,
-                                                                  int height, int buffer_count, int present_mode)
-{
-       tpl_tbm_surface_t *tpl_tbm_surface = NULL;
-
-       TPL_ASSERT(surface);
-
-       tpl_tbm_surface = (tpl_tbm_surface_t *) surface->backend.data;
-       TPL_ASSERT(tpl_tbm_surface);
-
-       /* FIXME: vblank has performance problem so replace all present mode to MAILBOX */
-       present_mode = TPL_DISPLAY_PRESENT_MODE_MAILBOX;
-
-       /* TODO: check server supported present modes */
-       switch (present_mode) {
-       case TPL_DISPLAY_PRESENT_MODE_MAILBOX:
-       case TPL_DISPLAY_PRESENT_MODE_IMMEDIATE:
-               break;
-       case TPL_DISPLAY_PRESENT_MODE_FIFO:
-       case TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED:
-               if (__tpl_worker_support_vblank() == TPL_FALSE) {
-                       TPL_ERR("Unsupported present mode: %d, worker not support vblank",
-                                       present_mode);
-                       return TPL_ERROR_INVALID_PARAMETER;
-               }
-               break;
-       default:
-               TPL_ERR("Unsupported present mode: %d", present_mode);
-               return TPL_ERROR_INVALID_PARAMETER;
-       }
-
-       tpl_tbm_surface->present_mode = present_mode;
-
-       tpl_tbm_surface->worker_surface.surface = surface;
-       tpl_tbm_surface->worker_surface.draw_done = __tpl_tbm_draw_done;
-       tpl_tbm_surface->worker_surface.draw_wait_fd_get = __tpl_tbm_draw_wait_fd_get;
-       tpl_tbm_surface->worker_surface.vblank = __tpl_tbm_vblank;
-       tpl_tbm_surface->worker_surface.draw_wait_buffer_get =
-               __tpl_tbm_draw_wait_buffer_get;
-
-       __tpl_list_init(&tpl_tbm_surface->vblank_list);
-       __tpl_list_init(&tpl_tbm_surface->draw_waiting_queue);
-       pthread_mutex_init(&tpl_tbm_surface->vblank_list_mutex, NULL);
-       pthread_mutex_init(&tpl_tbm_surface->draw_waiting_mutex, NULL);
-
-       __tpl_worker_surface_list_insert(&tpl_tbm_surface->worker_surface);
-       tpl_tbm_surface->need_worker_clear = TPL_TRUE;
-
-       return TPL_ERROR_NONE;
-}
-
-static tpl_result_t
-__tpl_tbm_surface_destroy_swapchain(tpl_surface_t *surface)
-{
-       tpl_tbm_surface_t *tpl_tbm_surface = NULL;
-
-       TPL_ASSERT(surface);
-
-       tpl_tbm_surface = (tpl_tbm_surface_t *) surface->backend.data;
-       TPL_ASSERT(tpl_tbm_surface);
-
-       __tpl_worker_surface_list_remove(&tpl_tbm_surface->worker_surface);
-
-       pthread_mutex_lock(&tpl_tbm_surface->vblank_list_mutex);
-       __tpl_list_fini(&tpl_tbm_surface->vblank_list, NULL);
-       pthread_mutex_unlock(&tpl_tbm_surface->vblank_list_mutex);
-       pthread_mutex_destroy(&tpl_tbm_surface->vblank_list_mutex);
-
-       pthread_mutex_lock(&tpl_tbm_surface->draw_waiting_mutex);
-       __tpl_list_fini(&tpl_tbm_surface->draw_waiting_queue, NULL);
-       pthread_mutex_unlock(&tpl_tbm_surface->draw_waiting_mutex);
-       pthread_mutex_destroy(&tpl_tbm_surface->draw_waiting_mutex);
-       tpl_tbm_surface->need_worker_clear = TPL_FALSE;
-
-       return TPL_ERROR_NONE;
-}
-
-static void
-__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
-                                                         void *data)
-{
-       tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)data;
-
-       if (!tpl_tbm_surface) {
-               TPL_ERR("Invalid parameter. tpl_tbm_surface(%p)", tpl_tbm_surface);
-               return;
-       }
-
-       TPL_LOG_B("TBM", "tbm_queue(%p) has been reset!", tbm_queue);
-
-       tpl_tbm_surface->need_reset = TPL_TRUE;
-}
-
 static tpl_result_t
 __tpl_tbm_surface_init(tpl_surface_t *surface)
 {
@@ -461,50 +179,12 @@ __tpl_tbm_surface_init(tpl_surface_t *surface)
 
        surface->backend.data = (void *)tpl_tbm_surface;
 
-       tpl_tbm_surface->need_reset = TPL_FALSE;
-
-       if (surface->type == TPL_SURFACE_TYPE_WINDOW) {
-               if (__tpl_tbm_display_get_window_info(surface->display,
-                                                                                         surface->native_handle, &surface->width,
-                                                                                         &surface->height, NULL, 0, 0) != TPL_ERROR_NONE) {
-                       TPL_ERR("Failed to get native window(%p) info.",
-                                       surface->native_handle);
-                       goto error;
-               }
-
-               tbm_surface_queue_add_destroy_cb((tbm_surface_queue_h)surface->native_handle,
-                                                                                __tpl_tbm_surface_queue_notify_cb,
-                                                                                surface);
-
-               /* Set reset_callback to tbm_queue */
-               tbm_surface_queue_add_reset_cb((tbm_surface_queue_h)surface->native_handle,
-                                                                        __cb_tbm_queue_reset_callback,
-                                                                        (void *)tpl_tbm_surface);
-
-               TPL_LOG_B("TBM", "[INIT] tpl_surface(%p) tpl_tbm_surface_t(%p) tbm_surface_queue(%p)",
-                                 surface, tpl_tbm_surface, surface->native_handle);
-
-               return TPL_ERROR_NONE;
-       } else if (surface->type == TPL_SURFACE_TYPE_PIXMAP) {
-               if (__tpl_tbm_display_get_pixmap_info(surface->display,
-                                                                                         surface->native_handle, &surface->width,
-                                                                                         &surface->height, NULL) != TPL_TRUE) {
-                       TPL_ERR("Failed to get native pixmap(%p) info.",
-                                       surface->native_handle);
-
-                       goto error;
-               }
+       tpl_tbm_surface->tbm_queue = (tbm_surface_queue_h)surface->native_handle;
 
-               tbm_surface_internal_ref((tbm_surface_h)surface->native_handle);
-
-               return TPL_ERROR_NONE;
-       }
-
-error:
-       free(tpl_tbm_surface);
-       surface->backend.data = NULL;
+       TPL_LOG_B("TBM", "[INIT] tpl_surface(%p) tpl_tbm_surface_t(%p) tbm_surface_queue(%p)",
+                         surface, tpl_tbm_surface, surface->native_handle);
 
-       return TPL_ERROR_INVALID_OPERATION;
+       return TPL_ERROR_NONE;
 }
 
 static void
@@ -517,25 +197,15 @@ __tpl_tbm_surface_fini(tpl_surface_t *surface)
        tpl_tbm_surface = (tpl_tbm_surface_t *) surface->backend.data;
        TPL_ASSERT(tpl_tbm_surface);
 
-       if (tpl_tbm_surface->need_worker_clear)
-               __tpl_tbm_surface_destroy_swapchain(surface);
-
        TPL_ASSERT(surface);
        TPL_ASSERT(surface->display);
 
        TPL_LOG_B("TBM", "[FINI] tpl_surface(%p) tpl_tbm_surface_t(%p) native_handle(%p)",
                          surface, surface->backend.data, surface->native_handle);
 
-       if (surface->type == TPL_SURFACE_TYPE_PIXMAP)
-               tbm_surface_internal_unref((tbm_surface_h)surface->native_handle);
-       else if (surface->type == TPL_SURFACE_TYPE_WINDOW) {
-               tbm_surface_queue_remove_destroy_cb(
-                       (tbm_surface_queue_h)surface->native_handle,
-                       __tpl_tbm_surface_queue_notify_cb, surface);
-               /*TODO: we need fix for dequeued surface*/
-       }
+       tpl_tbm_surface->tbm_queue = NULL;
+       free(tpl_tbm_surface);
 
-       free(surface->backend.data);
        surface->backend.data = NULL;
 }
 
@@ -544,12 +214,7 @@ __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface,
                                                                 tbm_surface_h tbm_surface, int num_rects,
                                                                 const int *rects, tbm_fd sync_fence)
 {
-       tpl_tbm_surface_t *tpl_tbm_surface = NULL;
-       tpl_tbm_buffer_t *tpl_tbm_buffer = NULL;
-       tbm_surface_queue_h tbm_queue = NULL;
-       int ret = 0;
-       int union_x, union_y;
-       int union_w, union_h;
+       tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data;
 
        TPL_ASSERT(surface);
        TPL_ASSERT(surface->display);
@@ -565,91 +230,20 @@ __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface,
 
        tbm_surface_internal_unref(tbm_surface);
 
-       if (surface->type == TPL_SURFACE_TYPE_PIXMAP) {
-               TPL_ERR("Pixmap cannot post(%p, %p)", surface,
-                               surface->native_handle);
-               return TPL_ERROR_INVALID_PARAMETER;
+       if (sync_fence != -1) {
+               tbm_sync_fence_wait(sync_fence, -1);
+               close(sync_fence);
        }
 
-       if (surface->backend.type == TPL_BACKEND_TBM_VULKAN_WSI) {
-               tpl_tbm_surface = surface->backend.data;
-
-               tpl_tbm_buffer = __tpl_tbm_get_tbm_buffer_from_tbm_surface(tbm_surface);
-               tpl_tbm_buffer->wait_sync = sync_fence;
-
-               tbm_surface_internal_ref(tbm_surface);
-               pthread_mutex_init(&tpl_tbm_surface->draw_waiting_mutex, NULL);
-               __tpl_list_push_back(&tpl_tbm_surface->draw_waiting_queue, tbm_surface);
-               pthread_mutex_unlock(&tpl_tbm_surface->draw_waiting_mutex);
-               __tpl_worker_new_buffer_notify(&tpl_tbm_surface->worker_surface);
-       } else {
-               tbm_queue = (tbm_surface_queue_h)surface->native_handle;
-
-               if (!tbm_queue) {
-                       TPL_ERR("tbm_surface_queue is invalid.");
-                       return TPL_ERROR_INVALID_PARAMETER;
-               }
-
-               if (sync_fence != -1) {
-                       tbm_sync_fence_wait(sync_fence, -1);
-                       close(sync_fence);
-               }
-
-               /* If there are given damage rects for given tbm_surface, */
-               if (num_rects != 0 && rects != NULL) {
-                       int i;
-                       int left = surface->width;
-                       int bottom = surface->height;
-                       int right = 0, top = 0;
-
-                       /* Carculate the union region of given damage rectangles */
-                       for (i = 0; i < num_rects; i++) {
-                               int rect_i = i * 4;
-                               int x = rects[rect_i];
-                               int y = rects[rect_i + 1];
-                               int w = rects[rect_i + 2];
-                               int h = rects[rect_i + 3];
-
-                               left = (x < left) ? x : left;
-                               bottom = (y < bottom) ? y : bottom;
-                               right = ((x + w) > right) ? (x + w) : right;
-                               top = ((y + h) > top) ? (y + h) : top;
-                       }
-
-                       /* Calibrate so as not to exceed the range. */
-                       left = (left < 0) ? 0 : left;
-                       bottom = (bottom < 0) ? 0 : bottom;
-                       right = (right > surface->width) ? surface->width : right;
-                       top = (top > surface->height) ? surface->height : top;
-
-                       /* And set its union rect to tbm_surface as its damage region. */
-                       union_w = right - left;
-                       union_h = top - bottom;
-                       union_x = left;
-                       union_y = top;
-               } else {
-                       /* If there are no any damage rects,
-                        * set its full size of surface as its damage region. */
-                       union_w = surface->width;
-                       union_h = surface->height;
-                       union_x = 0;
-                       union_y = 0;
-               }
-
-               if (!(ret = tbm_surface_internal_set_damage(tbm_surface, union_x, union_y,
-                                                                                                       union_w, union_h)))
-                       TPL_WARN("Failed to set damage rect to tbm_surface(%p)", tbm_surface);
-
-               if (tbm_surface_queue_enqueue(tbm_queue, tbm_surface)
-                               != TBM_SURFACE_QUEUE_ERROR_NONE) {
-                       TPL_ERR("tbm_surface_queue_enqueue failed. tbm_queue(%p) tbm_surface(%p)",
-                                       tbm_queue, tbm_surface);
-                       return TPL_ERROR_INVALID_OPERATION;
-               }
+       if (tbm_surface_queue_enqueue(tpl_tbm_surface->tbm_queue, tbm_surface)
+                       != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("tbm_surface_queue_enqueue failed. tbm_queue(%p) tbm_surface(%p)",
+                               tpl_tbm_surface->tbm_queue, tbm_surface);
+               return TPL_ERROR_INVALID_OPERATION;
        }
 
        TPL_LOG_B("TBM", "[ENQ] tpl_surface(%p) tbm_queue(%p) tbm_surface(%p) bo(%d)",
-                         surface, tbm_queue, tbm_surface,
+                         surface, tpl_tbm_surface->tbm_queue, tbm_surface,
                          tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)));
 
        return TPL_ERROR_NONE;
@@ -658,12 +252,7 @@ __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface,
 static tpl_bool_t
 __tpl_tbm_surface_validate(tpl_surface_t *surface)
 {
-       tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data;
-       tpl_bool_t ret = TPL_TRUE;
-
-       ret = !tpl_tbm_surface->need_reset;
-
-       return ret;
+       return TPL_TRUE;
 }
 
 static tbm_surface_h
@@ -671,9 +260,7 @@ __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
                                                                 tbm_fd *sync_fence)
 {
        tbm_surface_h tbm_surface = NULL;
-       tbm_surface_queue_h tbm_queue = NULL;
        tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
-       tpl_tbm_buffer_t *tpl_tbm_buffer = NULL;
        tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data;
 
        TPL_ASSERT(surface);
@@ -684,11 +271,9 @@ __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
        if (sync_fence)
                *sync_fence = -1;
 
-       tbm_queue = (tbm_surface_queue_h)surface->native_handle;
-
        TPL_OBJECT_UNLOCK(surface);
-       if (tbm_surface_queue_can_dequeue(tbm_queue, 1) == 1)
-               tsq_err = tbm_surface_queue_dequeue(tbm_queue, &tbm_surface);
+       if (tbm_surface_queue_can_dequeue(tpl_tbm_surface->tbm_queue, 1) == 1)
+               tsq_err = tbm_surface_queue_dequeue(tpl_tbm_surface->tbm_queue, &tbm_surface);
        TPL_OBJECT_LOCK(surface);
 
        if (!tbm_surface) {
@@ -697,100 +282,20 @@ __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
                return NULL;
        }
 
-       if (surface->backend.type == TPL_BACKEND_TBM_VULKAN_WSI) {
-               if ((tpl_tbm_buffer = __tpl_tbm_get_tbm_buffer_from_tbm_surface(
-                                                                 tbm_surface)) == NULL) {
-                       tpl_tbm_buffer = (tpl_tbm_buffer_t *) calloc(1, sizeof(tpl_tbm_buffer_t));
-                       if (!tpl_tbm_buffer) {
-                               TPL_ERR("Mem alloc for tpl_tbm_buffer failed!");
-                               return NULL;
-                       }
-                       __tpl_tbm_set_tbm_buffer_to_tbm_surface(tbm_surface, tpl_tbm_buffer);
-               }
-       }
-
        /* Inc ref count about tbm_surface */
        /* It will be dec when before tbm_surface_queue_enqueue called */
        tbm_surface_internal_ref(tbm_surface);
 
-       tpl_tbm_surface->need_reset = TPL_FALSE;
-
        TPL_LOG_B("TBM", "[DEQ] tpl_surface(%p) tbm_queue(%p) tbm_surface(%p) bo(%d)",
-                         surface, tbm_queue, tbm_surface,
+                         surface, tpl_tbm_surface->tbm_queue, tbm_surface,
                          tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)));
 
-       TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)));
+       TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO(%d)",
+                                         tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)));
 
        return tbm_surface;
 }
 
-static tpl_result_t
-__tpl_tbm_surface_get_swapchain_buffers(tpl_surface_t *surface,
-                                                                               tbm_surface_h **buffers,
-                                                                               int *buffer_count)
-{
-       tbm_surface_h buffer = NULL;
-       tbm_surface_queue_h tbm_queue = NULL;
-       tbm_surface_h *swapchain_buffers = NULL;
-       tbm_surface_queue_error_e tsq_err;
-       tpl_result_t ret = TPL_ERROR_NONE;
-       int i, queue_size, dequeue_count = 0;
-
-       TPL_ASSERT(surface);
-       TPL_ASSERT(buffers);
-       TPL_ASSERT(buffer_count);
-
-       tbm_queue = (tbm_surface_queue_h)surface->native_handle;
-       TPL_ASSERT(tbm_queue);
-
-       queue_size = tbm_surface_queue_get_size(tbm_queue);
-       swapchain_buffers = (tbm_surface_h *)calloc(1,
-                                               sizeof(tbm_surface_h) * queue_size);
-       if (!swapchain_buffers) {
-               TPL_ERR("Failed to allocate memory for buffers.");
-               return TPL_ERROR_OUT_OF_MEMORY;
-       }
-
-       for (i = 0; i < queue_size; i++) {
-               tsq_err = tbm_surface_queue_dequeue(tbm_queue, &buffer);
-               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
-                       TPL_ERR("Failed to get tbm_surface from tbm_surface_queue | tsq_err = %d",
-                                       tsq_err);
-                       dequeue_count = i;
-                       ret = TPL_ERROR_OUT_OF_MEMORY;
-                       goto get_buffer_fail;
-               }
-               swapchain_buffers[i] = buffer;
-       }
-
-       for (i = 0 ; i < queue_size; i++) {
-               tsq_err = tbm_surface_queue_release(tbm_queue, swapchain_buffers[i]);
-               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
-                       TPL_ERR("Failed to release tbm_surface. | tsq_err = %d", tsq_err);
-                       ret = TPL_ERROR_INVALID_OPERATION;
-                       goto release_buffer_fail;
-               }
-       }
-
-       *buffers = swapchain_buffers;
-       *buffer_count = queue_size;
-       return TPL_ERROR_NONE;
-
-get_buffer_fail:
-       for (i = 0 ; i < dequeue_count ; i++) {
-               tsq_err = tbm_surface_queue_release(tbm_queue, swapchain_buffers[i]);
-               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
-                       TPL_ERR("Failed to release tbm_surface. | tsq_err = %d", tsq_err);
-                       goto release_buffer_fail;
-               }
-       }
-
-release_buffer_fail:
-       free(swapchain_buffers);
-       return ret;
-
-}
-
 tpl_bool_t
 __tpl_display_choose_backend_tbm(tpl_handle_t native_dpy)
 {
@@ -811,40 +316,6 @@ __tpl_display_choose_backend_tbm(tpl_handle_t native_dpy)
        return ret;
 }
 
-static tpl_result_t
-__tpl_tbm_display_query_window_supported_buffer_count(
-       tpl_display_t *display,
-       tpl_handle_t window, int *min, int *max)
-{
-       TPL_ASSERT(display);
-
-       if (!display->backend.data) return TPL_ERROR_INVALID_OPERATION;
-
-       if (min) *min = 0;
-       if (max) *max = 0; /* 0 mean no limit in vulkan */
-
-       return TPL_ERROR_NONE;
-}
-
-static tpl_result_t
-__tpl_tbm_display_query_window_supported_present_modes(
-       tpl_display_t *display,
-       tpl_handle_t window, int *modes)
-{
-       TPL_ASSERT(display);
-
-       if (!display->backend.data) return TPL_ERROR_INVALID_OPERATION;
-
-       if (modes) {
-               *modes = TPL_DISPLAY_PRESENT_MODE_MAILBOX | TPL_DISPLAY_PRESENT_MODE_IMMEDIATE;
-
-               if (__tpl_worker_support_vblank() == TPL_TRUE)
-                       *modes |= TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED;
-       }
-
-
-       return TPL_ERROR_NONE;
-}
 
 void
 __tpl_display_init_backend_tbm(tpl_display_backend_t *backend,
@@ -863,11 +334,6 @@ __tpl_display_init_backend_tbm(tpl_display_backend_t *backend,
        backend->get_pixmap_info = __tpl_tbm_display_get_pixmap_info;
        backend->get_buffer_from_native_pixmap =
                __tpl_tbm_display_get_buffer_from_native_pixmap;
-       backend->query_window_supported_buffer_count =
-               __tpl_tbm_display_query_window_supported_buffer_count;
-       backend->query_window_supported_present_modes =
-               __tpl_tbm_display_query_window_supported_present_modes;
-
 }
 
 void
@@ -884,13 +350,5 @@ __tpl_surface_init_backend_tbm(tpl_surface_backend_t *backend,
        backend->validate = __tpl_tbm_surface_validate;
        backend->dequeue_buffer = __tpl_tbm_surface_dequeue_buffer;
        backend->enqueue_buffer = __tpl_tbm_surface_enqueue_buffer;
-       backend->create_swapchain = __tpl_tbm_surface_create_swapchain;
-
-       if (type == TPL_BACKEND_TBM_VULKAN_WSI) {
-               backend->destroy_swapchain = __tpl_tbm_surface_destroy_swapchain;
-
-               backend->get_swapchain_buffers =
-                       __tpl_tbm_surface_get_swapchain_buffers;
-       }
 }