Implementation for SwapInterval is zero
[platform/core/uifw/libtpl-egl.git] / src / tpl_wl_vk_thread.c
1 #define inline __inline__
2 #undef inline
3
4 #include "tpl_internal.h"
5
6 #include <string.h>
7 #include <fcntl.h>
8 #include <unistd.h>
9 #include <sys/eventfd.h>
10
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
15
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
19
20 #include <tdm_client.h>
21
22 #ifndef TIZEN_FEATURE_ENABLE
23 #define TIZEN_FEATURE_ENABLE 1
24 #endif
25
26 #if TIZEN_FEATURE_ENABLE
27 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
28 #endif
29
30 #include "tpl_utils_gthread.h"
31
32 #define BUFFER_ARRAY_SIZE 10
33 #define VK_CLIENT_QUEUE_SIZE 3
34
35 static int wl_vk_buffer_key;
36 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
37
38 typedef struct _tpl_wl_vk_display       tpl_wl_vk_display_t;
39 typedef struct _tpl_wl_vk_surface       tpl_wl_vk_surface_t;
40 typedef struct _tpl_wl_vk_swapchain     tpl_wl_vk_swapchain_t;
41 typedef struct _tpl_wl_vk_buffer        tpl_wl_vk_buffer_t;
42
43 struct _tpl_wl_vk_display {
44         tpl_gsource                  *disp_source;
45         tpl_gthread                  *thread;
46         tpl_gmutex                    wl_event_mutex;
47
48         struct wl_display            *wl_display;
49         struct wl_event_queue        *ev_queue;
50         struct wayland_tbm_client    *wl_tbm_client;
51         int                           last_error; /* errno of the last wl_display error*/
52
53         tpl_bool_t                    wl_initialized;
54
55         struct {
56                 tdm_client                   *tdm_client;
57                 tpl_gsource                  *tdm_source;
58                 int                           tdm_display_fd;
59                 tpl_bool_t                    tdm_initialized;
60                 /* To make sure that tpl_gsource has been successfully finalized. */
61                 tpl_bool_t                    gsource_finalized;
62                 tpl_gmutex                    tdm_mutex;
63                 tpl_gcond                     tdm_cond;
64         } tdm;
65
66         tpl_bool_t                    use_wait_vblank;
67         tpl_bool_t                    use_explicit_sync;
68         tpl_bool_t                    prepared;
69
70         /* To make sure that tpl_gsource has been successfully finalized. */
71         tpl_bool_t                    gsource_finalized;
72         tpl_gmutex                    disp_mutex;
73         tpl_gcond                     disp_cond;
74
75         /* device surface capabilities */
76         int                           min_buffer;
77         int                           max_buffer;
78         int                           present_modes;
79 #if TIZEN_FEATURE_ENABLE
80         struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
81 #endif
82 };
83
84 struct _tpl_wl_vk_swapchain {
85         tpl_wl_vk_surface_t          *wl_vk_surface;
86
87         tbm_surface_queue_h           tbm_queue;
88         tpl_result_t                  result;
89
90         tpl_bool_t                    create_done;
91
92         struct {
93                 int                       width;
94                 int                       height;
95                 tbm_format                format;
96                 int                       buffer_count;
97                 int                       present_mode;
98         } properties;
99
100         tbm_surface_h                *swapchain_buffers;
101
102         /* [TEMP] To fix dEQP-VK.wsi.wayland.swapchain.modify.resize crash issue 
103          * It will be fixed properly using old_swapchain handle */
104         tbm_surface_h                *old_swapchain_buffers;
105
106         tpl_util_atomic_uint          ref_cnt;
107 };
108
109 typedef enum surf_message {
110         NONE_MESSAGE = 0,
111         INIT_SURFACE = 1,
112         ACQUIRABLE = 2,
113         CREATE_QUEUE = 4,
114         DESTROY_QUEUE = 8,
115 } surf_message;
116
117 struct _tpl_wl_vk_surface {
118         tpl_gsource                  *surf_source;
119
120         tpl_wl_vk_swapchain_t        *swapchain;
121
122         struct wl_surface            *wl_surface;
123 #if TIZEN_FEATURE_ENABLE
124         struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
125 #endif
126         tdm_client_vblank            *vblank;
127
128         /* surface information */
129         int                           render_done_cnt;
130
131         tpl_wl_vk_display_t          *wl_vk_display;
132         tpl_surface_t                *tpl_surface;
133
134         /* wl_vk_buffer array for buffer tracing */
135         tpl_wl_vk_buffer_t           *buffers[BUFFER_ARRAY_SIZE];
136         int                           buffer_cnt; /* the number of using wl_vk_buffers */
137         tpl_gmutex                    buffers_mutex;
138
139         tpl_list_t                   *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
140
141         tpl_gmutex                    surf_mutex;
142         tpl_gcond                     surf_cond;
143
144         /* for waiting draw done */
145         tpl_bool_t                    is_activated;
146         tpl_bool_t                    reset; /* TRUE if queue reseted by external  */
147         tpl_bool_t                    vblank_done;
148         tpl_bool_t                    vblank_enable;
149         tpl_bool_t                    initialized_in_thread;
150
151         /* To make sure that tpl_gsource has been successfully finalized. */
152         tpl_bool_t                    gsource_finalized;
153
154         surf_message                  sent_message;
155
156         int                           post_interval;
157 };
158
159 typedef enum buffer_status {
160         RELEASED = 0,             // 0
161         DEQUEUED,                 // 1
162         ENQUEUED,                 // 2
163         ACQUIRED,                 // 3
164         WAITING_SIGNALED,         // 4
165         WAITING_VBLANK,           // 5
166         COMMITTED,                // 6
167 } buffer_status_t;
168
169 static const char *status_to_string[7] = {
170         "RELEASED",                 // 0
171         "DEQUEUED",                 // 1
172         "ENQUEUED",                 // 2
173         "ACQUIRED",                 // 3
174         "WAITING_SIGNALED",         // 4
175         "WAITING_VBLANK",           // 5
176         "COMMITTED",                // 6
177 };
178
179 struct _tpl_wl_vk_buffer {
180         tbm_surface_h                 tbm_surface;
181         int                           bo_name;
182
183         struct wl_buffer             *wl_buffer;
184         int                           dx, dy; /* position to attach to wl_surface */
185         int                           width, height; /* size to attach to wl_surface */
186
187         buffer_status_t               status; /* for tracing buffer status */
188         int                           idx; /* position index in buffers array of wl_vk_surface */
189
190         /* for damage region */
191         int                           num_rects;
192         int                          *rects;
193
194         /* for checking need_to_commit (frontbuffer mode) */
195         tpl_bool_t                    need_to_commit;
196
197 #if TIZEN_FEATURE_ENABLE
198         /* to get release event via zwp_linux_buffer_release_v1 */
199         struct zwp_linux_buffer_release_v1 *buffer_release;
200 #endif
201
202         /* each buffers own its release_fence_fd, until it passes ownership
203          * to it to EGL */
204         int32_t                       release_fence_fd;
205
206         /* each buffers own its acquire_fence_fd.
207          * If it use zwp_linux_buffer_release_v1 the ownership of this fd
208          * will be passed to display server
209          * Otherwise it will be used as a fence waiting for render done
210          * on tpl thread */
211         int32_t                       acquire_fence_fd;
212
213         tpl_gmutex                    mutex;
214         tpl_gcond                     cond;
215
216         tpl_wl_vk_surface_t          *wl_vk_surface;
217 };
218
219 static void
220 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
221 static int
222 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
223 static void
224 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
225 static void
226 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
227 static tpl_result_t
228 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
229 static void
230 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
231 static tpl_result_t
232 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
233 static void
234 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
235                                                   tpl_wl_vk_buffer_t *wl_vk_buffer);
236
237 static tpl_bool_t
238 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
239 {
240         struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
241
242         if (!wl_vk_native_dpy) {
243                 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
244                 return TPL_FALSE;
245         }
246
247         /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
248            is a memory address pointing the structure of wl_display_interface. */
249         if (wl_vk_native_dpy == &wl_display_interface)
250                 return TPL_TRUE;
251
252         if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
253                                 strlen(wl_display_interface.name)) == 0) {
254                 return TPL_TRUE;
255         }
256
257         return TPL_FALSE;
258 }
259
260 static tpl_bool_t
261 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
262 {
263         tpl_wl_vk_display_t        *wl_vk_display = NULL;
264         tdm_error                   tdm_err = TDM_ERROR_NONE;
265
266         TPL_IGNORE(message);
267
268         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
269         if (!wl_vk_display) {
270                 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
271                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
272                 return TPL_FALSE;
273         }
274
275         tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client);
276
277         /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
278          * When tdm_source is no longer available due to an unexpected situation,
279          * wl_vk_thread must remove it from the thread and destroy it.
280          * In that case, tdm_vblank can no longer be used for surfaces and displays
281          * that used this tdm_source. */
282         if (tdm_err != TDM_ERROR_NONE) {
283                 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
284                                 tdm_err);
285                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
286
287                 tpl_gsource_destroy(gsource, TPL_FALSE);
288
289                 wl_vk_display->tdm.tdm_source = NULL;
290
291                 return TPL_FALSE;
292         }
293
294         return TPL_TRUE;
295 }
296
297 static void
298 __thread_func_tdm_finalize(tpl_gsource *gsource)
299 {
300         tpl_wl_vk_display_t *wl_vk_display = NULL;
301
302         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
303
304         tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
305
306         TPL_INFO("[TDM_CLIENT_FINI]",
307                          "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
308                          wl_vk_display, wl_vk_display->tdm.tdm_client,
309                          wl_vk_display->tdm.tdm_display_fd);
310
311         if (wl_vk_display->tdm.tdm_client) {
312                 tdm_client_destroy(wl_vk_display->tdm.tdm_client);
313                 wl_vk_display->tdm.tdm_client = NULL;
314                 wl_vk_display->tdm.tdm_display_fd = -1;
315         }
316
317         wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
318         wl_vk_display->tdm.gsource_finalized = TPL_TRUE;
319
320         tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond);
321         tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
322 }
323
324 static tpl_gsource_functions tdm_funcs = {
325         .prepare  = NULL,
326         .check    = NULL,
327         .dispatch = __thread_func_tdm_dispatch,
328         .finalize = __thread_func_tdm_finalize,
329 };
330
331 static tpl_result_t
332 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
333 {
334         tdm_client       *tdm_client = NULL;
335         int               tdm_display_fd = -1;
336         tdm_error         tdm_err = TDM_ERROR_NONE;
337
338         tdm_client = tdm_client_create(&tdm_err);
339         if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
340                 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
341                 return TPL_ERROR_INVALID_OPERATION;
342         }
343
344         tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
345         if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
346                 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
347                 tdm_client_destroy(tdm_client);
348                 return TPL_ERROR_INVALID_OPERATION;
349         }
350
351         wl_vk_display->tdm.tdm_display_fd  = tdm_display_fd;
352         wl_vk_display->tdm.tdm_client      = tdm_client;
353         wl_vk_display->tdm.tdm_source      = NULL;
354         wl_vk_display->tdm.tdm_initialized = TPL_TRUE;
355
356         TPL_INFO("[TDM_CLIENT_INIT]",
357                          "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
358                          wl_vk_display, tdm_client, tdm_display_fd);
359
360         return TPL_ERROR_NONE;
361 }
362
363 static void
364 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
365                                                           uint32_t name, const char *interface,
366                                                           uint32_t version)
367 {
368 #if TIZEN_FEATURE_ENABLE
369         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
370
371         if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
372                 char *env = tpl_getenv("TPL_EFS");
373                 if (env && !atoi(env)) {
374                         wl_vk_display->use_explicit_sync = TPL_FALSE;
375                 } else {
376                         wl_vk_display->explicit_sync =
377                                         wl_registry_bind(wl_registry, name,
378                                                                          &zwp_linux_explicit_synchronization_v1_interface, 1);
379                         wl_vk_display->use_explicit_sync = TPL_TRUE;
380                         TPL_LOG_D("[REGISTRY_BIND]",
381                                           "wl_vk_display(%p) bind zwp_linux_explicit_synchronization_v1_interface",
382                                           wl_vk_display);
383                 }
384         }
385 #endif
386 }
387
388 static void
389 __cb_wl_resistry_global_remove_callback(void *data,
390                                                                                 struct wl_registry *wl_registry,
391                                                                                 uint32_t name)
392 {
393 }
394
395 static const struct wl_registry_listener registry_listener = {
396         __cb_wl_resistry_global_callback,
397         __cb_wl_resistry_global_remove_callback
398 };
399
400 static void
401 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
402                                           const char *func_name)
403 {
404         int dpy_err;
405         char buf[1024];
406         strerror_r(errno, buf, sizeof(buf));
407
408         if (wl_vk_display->last_error == errno)
409                 return;
410
411         TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
412
413         dpy_err = wl_display_get_error(wl_vk_display->wl_display);
414         if (dpy_err == EPROTO) {
415                 const struct wl_interface *err_interface;
416                 uint32_t err_proxy_id, err_code;
417                 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
418                                                                                                  &err_interface,
419                                                                                                  &err_proxy_id);
420                 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
421                                 err_interface->name, err_code, err_proxy_id);
422         }
423
424         wl_vk_display->last_error = errno;
425 }
426
427 static tpl_result_t
428 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
429 {
430         struct wl_registry *registry                = NULL;
431         struct wl_event_queue *queue                = NULL;
432         struct wl_display *display_wrapper          = NULL;
433         struct wl_proxy *wl_tbm                     = NULL;
434         struct wayland_tbm_client *wl_tbm_client    = NULL;
435         int ret;
436         tpl_result_t result = TPL_ERROR_NONE;
437
438         queue = wl_display_create_queue(wl_vk_display->wl_display);
439         if (!queue) {
440                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
441                                 wl_vk_display->wl_display);
442                 result = TPL_ERROR_INVALID_OPERATION;
443                 goto fini;
444         }
445
446         wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
447         if (!wl_vk_display->ev_queue) {
448                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
449                                 wl_vk_display->wl_display);
450                 result = TPL_ERROR_INVALID_OPERATION;
451                 goto fini;
452         }
453
454         display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
455         if (!display_wrapper) {
456                 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
457                                 wl_vk_display->wl_display);
458                 result = TPL_ERROR_INVALID_OPERATION;
459                 goto fini;
460         }
461
462         wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
463
464         registry = wl_display_get_registry(display_wrapper);
465         if (!registry) {
466                 TPL_ERR("Failed to create wl_registry");
467                 result = TPL_ERROR_INVALID_OPERATION;
468                 goto fini;
469         }
470
471         wl_proxy_wrapper_destroy(display_wrapper);
472         display_wrapper = NULL;
473
474         wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
475         if (!wl_tbm_client) {
476                 TPL_ERR("Failed to initialize wl_tbm_client.");
477                 result = TPL_ERROR_INVALID_CONNECTION;
478                 goto fini;
479         }
480
481         wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
482         if (!wl_tbm) {
483                 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
484                 result = TPL_ERROR_INVALID_CONNECTION;
485                 goto fini;
486         }
487
488         wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
489         wl_vk_display->wl_tbm_client = wl_tbm_client;
490
491         if (wl_registry_add_listener(registry, &registry_listener,
492                                                                  wl_vk_display)) {
493                 TPL_ERR("Failed to wl_registry_add_listener");
494                 result = TPL_ERROR_INVALID_OPERATION;
495                 goto fini;
496         }
497
498         ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
499         if (ret == -1) {
500                 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
501                 result = TPL_ERROR_INVALID_OPERATION;
502                 goto fini;
503         }
504
505 #if TIZEN_FEATURE_ENABLE
506         if (wl_vk_display->explicit_sync) {
507                 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
508                                                    wl_vk_display->ev_queue);
509                 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
510                                   wl_vk_display->explicit_sync);
511         }
512 #endif
513
514         wl_vk_display->wl_initialized = TPL_TRUE;
515
516         TPL_INFO("[WAYLAND_INIT]",
517                          "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
518                          wl_vk_display, wl_vk_display->wl_display,
519                          wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
520 #if TIZEN_FEATURE_ENABLE
521         TPL_INFO("[WAYLAND_INIT]",
522                          "explicit_sync(%p)",
523                          wl_vk_display->explicit_sync);
524 #endif
525 fini:
526         if (display_wrapper)
527                 wl_proxy_wrapper_destroy(display_wrapper);
528         if (registry)
529                 wl_registry_destroy(registry);
530         if (queue)
531                 wl_event_queue_destroy(queue);
532
533         return result;
534 }
535
536 static void
537 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
538 {
539         /* If wl_vk_display is in prepared state, cancel it */
540         if (wl_vk_display->prepared) {
541                 wl_display_cancel_read(wl_vk_display->wl_display);
542                 wl_vk_display->prepared = TPL_FALSE;
543         }
544
545         if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
546                                                                                   wl_vk_display->ev_queue) == -1) {
547                 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
548         }
549
550 #if TIZEN_FEATURE_ENABLE
551         if (wl_vk_display->explicit_sync) {
552                 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
553                                  "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
554                                  wl_vk_display, wl_vk_display->explicit_sync);
555                 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
556                 wl_vk_display->explicit_sync = NULL;
557         }
558 #endif
559
560         if (wl_vk_display->wl_tbm_client) {
561                 struct wl_proxy *wl_tbm = NULL;
562
563                 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
564                                                                                 wl_vk_display->wl_tbm_client);
565                 if (wl_tbm) {
566                         wl_proxy_set_queue(wl_tbm, NULL);
567                 }
568
569                 TPL_INFO("[WL_TBM_DEINIT]",
570                                  "wl_vk_display(%p) wl_tbm_client(%p)",
571                                  wl_vk_display, wl_vk_display->wl_tbm_client);
572                 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
573                 wl_vk_display->wl_tbm_client = NULL;
574         }
575
576         wl_event_queue_destroy(wl_vk_display->ev_queue);
577
578         wl_vk_display->wl_initialized = TPL_FALSE;
579
580         TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
581                          wl_vk_display, wl_vk_display->wl_display);
582 }
583
584 static void*
585 _thread_init(void *data)
586 {
587         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
588
589         if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
590                 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
591                                 wl_vk_display, wl_vk_display->wl_display);
592         }
593
594         if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
595                 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
596         }
597
598         return wl_vk_display;
599 }
600
601 static tpl_bool_t
602 __thread_func_disp_prepare(tpl_gsource *gsource)
603 {
604         tpl_wl_vk_display_t *wl_vk_display =
605                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
606
607         /* If this wl_vk_display is already prepared,
608          * do nothing in this function. */
609         if (wl_vk_display->prepared)
610                 return TPL_FALSE;
611
612         /* If there is a last_error, there is no need to poll,
613          * so skip directly to dispatch.
614          * prepare -> dispatch */
615         if (wl_vk_display->last_error)
616                 return TPL_TRUE;
617
618         while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
619                                                                                  wl_vk_display->ev_queue) != 0) {
620                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
621                                                                                           wl_vk_display->ev_queue) == -1) {
622                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
623                 }
624         }
625
626         wl_vk_display->prepared = TPL_TRUE;
627
628         wl_display_flush(wl_vk_display->wl_display);
629
630         return TPL_FALSE;
631 }
632
633 static tpl_bool_t
634 __thread_func_disp_check(tpl_gsource *gsource)
635 {
636         tpl_wl_vk_display_t *wl_vk_display =
637                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
638         tpl_bool_t ret = TPL_FALSE;
639
640         if (!wl_vk_display->prepared)
641                 return ret;
642
643         /* If prepared, but last_error is set,
644          * cancel_read is executed and FALSE is returned.
645          * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
646          * and skipping disp_check from prepare to disp_dispatch.
647          * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
648         if (wl_vk_display->prepared && wl_vk_display->last_error) {
649                 wl_display_cancel_read(wl_vk_display->wl_display);
650                 return ret;
651         }
652
653         if (tpl_gsource_check_io_condition(gsource)) {
654                 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
655                         _wl_display_print_err(wl_vk_display, "read_event");
656                 ret = TPL_TRUE;
657         } else {
658                 wl_display_cancel_read(wl_vk_display->wl_display);
659                 ret = TPL_FALSE;
660         }
661
662         wl_vk_display->prepared = TPL_FALSE;
663
664         return ret;
665 }
666
667 static tpl_bool_t
668 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
669 {
670         tpl_wl_vk_display_t *wl_vk_display =
671                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
672
673         TPL_IGNORE(message);
674
675         /* If there is last_error, SOURCE_REMOVE should be returned
676          * to remove the gsource from the main loop.
677          * This is because wl_vk_display is not valid since last_error was set.*/
678         if (wl_vk_display->last_error) {
679                 return TPL_FALSE;
680         }
681
682         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
683         if (tpl_gsource_check_io_condition(gsource)) {
684                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
685                                                                                           wl_vk_display->ev_queue) == -1) {
686                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
687                 }
688         }
689
690         wl_display_flush(wl_vk_display->wl_display);
691         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
692
693         return TPL_TRUE;
694 }
695
696 static void
697 __thread_func_disp_finalize(tpl_gsource *gsource)
698 {
699         tpl_wl_vk_display_t *wl_vk_display =
700                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
701
702         tpl_gmutex_lock(&wl_vk_display->disp_mutex);
703         TPL_LOG_D("[D_FINALIZE]", "wl_vk_display(%p) tpl_gsource(%p)",
704                           wl_vk_display, gsource);
705
706         if (wl_vk_display->wl_initialized)
707                 _thread_wl_display_fini(wl_vk_display);
708
709         wl_vk_display->gsource_finalized = TPL_TRUE;
710
711         tpl_gcond_signal(&wl_vk_display->disp_cond);
712         tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
713
714         return;
715 }
716
717
718 static tpl_gsource_functions disp_funcs = {
719         .prepare  = __thread_func_disp_prepare,
720         .check    = __thread_func_disp_check,
721         .dispatch = __thread_func_disp_dispatch,
722         .finalize = __thread_func_disp_finalize,
723 };
724
725 static tpl_result_t
726 __tpl_wl_vk_display_init(tpl_display_t *display)
727 {
728         TPL_ASSERT(display);
729
730         tpl_wl_vk_display_t *wl_vk_display = NULL;
731
732         /* Do not allow default display in wayland */
733         if (!display->native_handle) {
734                 TPL_ERR("Invalid native handle for display.");
735                 return TPL_ERROR_INVALID_PARAMETER;
736         }
737
738         if (!_check_native_handle_is_wl_display(display->native_handle)) {
739                 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
740                 return TPL_ERROR_INVALID_PARAMETER;
741         }
742
743         wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
744                                                         sizeof(tpl_wl_vk_display_t));
745         if (!wl_vk_display) {
746                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
747                 return TPL_ERROR_OUT_OF_MEMORY;
748         }
749
750         display->backend.data             = wl_vk_display;
751         display->bufmgr_fd                = -1;
752
753         wl_vk_display->tdm.tdm_initialized    = TPL_FALSE;
754         wl_vk_display->wl_initialized     = TPL_FALSE;
755
756         wl_vk_display->ev_queue           = NULL;
757         wl_vk_display->wl_display         = (struct wl_display *)display->native_handle;
758         wl_vk_display->last_error         = 0;
759         wl_vk_display->use_explicit_sync  = TPL_FALSE;   // default disabled
760         wl_vk_display->prepared           = TPL_FALSE;
761
762         /* Wayland Interfaces */
763 #if TIZEN_FEATURE_ENABLE
764         wl_vk_display->explicit_sync      = NULL;
765 #endif
766         wl_vk_display->wl_tbm_client      = NULL;
767
768         /* Vulkan specific surface capabilities */
769         wl_vk_display->min_buffer         = 2;
770         wl_vk_display->max_buffer         = VK_CLIENT_QUEUE_SIZE;
771         wl_vk_display->present_modes      = TPL_DISPLAY_PRESENT_MODE_FIFO;
772
773         wl_vk_display->use_wait_vblank    = TPL_TRUE;   // default enabled
774         {
775                 char *env = tpl_getenv("TPL_WAIT_VBLANK");
776                 if (env && !atoi(env)) {
777                         wl_vk_display->use_wait_vblank = TPL_FALSE;
778                 }
779         }
780
781         tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
782
783         tpl_gmutex_init(&wl_vk_display->disp_mutex);
784         tpl_gcond_init(&wl_vk_display->disp_cond);
785
786         /* Create gthread */
787         wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
788                                                                                            (tpl_gthread_func)_thread_init,
789                                                                                            (void *)wl_vk_display);
790         if (!wl_vk_display->thread) {
791                 TPL_ERR("Failed to create wl_vk_thread");
792                 goto free_display;
793         }
794
795         wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
796                                                                                                         (void *)wl_vk_display,
797                                                                                                         wl_display_get_fd(wl_vk_display->wl_display),
798                                                                                                         FD_TYPE_SOCKET,
799                                                                                                         &disp_funcs, SOURCE_TYPE_NORMAL);
800         if (!wl_vk_display->disp_source) {
801                 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
802                                 display->native_handle,
803                                 wl_vk_display->thread);
804                 goto free_display;
805         }
806
807         tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex);
808         tpl_gcond_init(&wl_vk_display->tdm.tdm_cond);
809
810         wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread,
811                                                                                                    (void *)wl_vk_display,
812                                                                                                    wl_vk_display->tdm.tdm_display_fd,
813                                                                                                    FD_TYPE_SOCKET,
814                                                                                                    &tdm_funcs, SOURCE_TYPE_NORMAL);
815         if (!wl_vk_display->tdm.tdm_source) {
816                 TPL_ERR("Failed to create tdm_gsource\n");
817                 goto free_display;
818         }
819
820         TPL_INFO("[DISPLAY_INIT]",
821                          "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
822                          wl_vk_display,
823                          wl_vk_display->thread,
824                          wl_vk_display->wl_display);
825
826         TPL_INFO("[DISPLAY_INIT]",
827                          "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
828                          wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
829                          wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
830
831         return TPL_ERROR_NONE;
832
833 free_display:
834         if (wl_vk_display->tdm.tdm_source) {
835                 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
836                 while (!wl_vk_display->tdm.gsource_finalized) {
837                         tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
838                         tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
839                 }
840                 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
841         }
842
843         if (wl_vk_display->disp_source) {
844                 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
845                 while (!wl_vk_display->gsource_finalized) {
846                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
847                         tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
848                 }
849                 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
850         }
851
852         if (wl_vk_display->thread) {
853                 tpl_gthread_destroy(wl_vk_display->thread);
854         }
855
856         tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
857         tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
858         tpl_gcond_clear(&wl_vk_display->disp_cond);
859         tpl_gmutex_clear(&wl_vk_display->disp_mutex);
860
861         wl_vk_display->thread = NULL;
862         free(wl_vk_display);
863
864         display->backend.data = NULL;
865         return TPL_ERROR_INVALID_OPERATION;
866 }
867
868 static void
869 __tpl_wl_vk_display_fini(tpl_display_t *display)
870 {
871         tpl_wl_vk_display_t *wl_vk_display;
872
873         TPL_ASSERT(display);
874
875         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
876         if (wl_vk_display) {
877                 TPL_INFO("[DISPLAY_FINI]",
878                                  "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
879                                  wl_vk_display,
880                                  wl_vk_display->thread,
881                                  wl_vk_display->wl_display);
882
883                 if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) {
884                         /* This is a protection to prevent problems that arise in unexpected situations
885                          * that g_cond_wait cannot work normally.
886                          * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
887                          * caller should use tpl_gcond_wait() in the loop with checking finalized flag
888                          * */
889                         tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
890                         while (!wl_vk_display->tdm.gsource_finalized) {
891                                 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
892                                 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
893                         }
894                         wl_vk_display->tdm.tdm_source = NULL;
895                         tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
896                 }
897
898                 /* This is a protection to prevent problems that arise in unexpected situations
899                  * that g_cond_wait cannot work normally.
900                  * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
901                  * caller should use tpl_gcond_wait() in the loop with checking finalized flag
902                  * */
903                 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
904                 while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) {
905                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
906                         tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
907                 }
908                 wl_vk_display->disp_source = NULL;
909                 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
910
911                 if (wl_vk_display->thread) {
912                         tpl_gthread_destroy(wl_vk_display->thread);
913                         wl_vk_display->thread = NULL;
914                 }
915
916                 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
917                 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
918                 tpl_gcond_clear(&wl_vk_display->disp_cond);
919                 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
920
921                 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
922
923                 free(wl_vk_display);
924         }
925
926         display->backend.data = NULL;
927 }
928
929 static tpl_result_t
930 __tpl_wl_vk_display_query_config(tpl_display_t *display,
931                 tpl_surface_type_t surface_type,
932                 int red_size, int green_size,
933                 int blue_size, int alpha_size,
934                 int color_depth, int *native_visual_id,
935                 tpl_bool_t *is_slow)
936 {
937         TPL_ASSERT(display);
938
939         if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
940                         green_size == 8 && blue_size == 8 &&
941                         (color_depth == 32 || color_depth == 24)) {
942
943                 if (alpha_size == 8) {
944                         if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
945                         if (is_slow) *is_slow = TPL_FALSE;
946                         return TPL_ERROR_NONE;
947                 }
948                 if (alpha_size == 0) {
949                         if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
950                         if (is_slow) *is_slow = TPL_FALSE;
951                         return TPL_ERROR_NONE;
952                 }
953         }
954
955         return TPL_ERROR_INVALID_PARAMETER;
956 }
957
958 static tpl_result_t
959 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
960                                                                           int *visual_id,
961                                                                           int alpha_size)
962 {
963         TPL_IGNORE(display);
964         TPL_IGNORE(visual_id);
965         TPL_IGNORE(alpha_size);
966         return TPL_ERROR_NONE;
967 }
968
969 static tpl_result_t
970 __tpl_wl_vk_display_query_window_supported_buffer_count(
971         tpl_display_t *display,
972         tpl_handle_t window, int *min, int *max)
973 {
974         tpl_wl_vk_display_t *wl_vk_display = NULL;
975
976         TPL_ASSERT(display);
977         TPL_ASSERT(window);
978
979         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
980         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
981
982         if (min) *min = wl_vk_display->min_buffer;
983         if (max) *max = wl_vk_display->max_buffer;
984
985         return TPL_ERROR_NONE;
986 }
987
988 static tpl_result_t
989 __tpl_wl_vk_display_query_window_supported_present_modes(
990         tpl_display_t *display,
991         tpl_handle_t window, int *present_modes)
992 {
993         tpl_wl_vk_display_t *wl_vk_display = NULL;
994
995         TPL_ASSERT(display);
996         TPL_ASSERT(window);
997
998         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
999         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1000
1001         if (present_modes) {
1002                 *present_modes = wl_vk_display->present_modes;
1003         }
1004
1005         return TPL_ERROR_NONE;
1006 }
1007
1008 static void
1009 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
1010 {
1011         tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
1012         tpl_wl_vk_display_t *wl_vk_display      = wl_vk_surface->wl_vk_display;
1013         tpl_wl_vk_swapchain_t *swapchain        = wl_vk_surface->swapchain;
1014         tpl_wl_vk_buffer_t *wl_vk_buffer        = NULL;
1015         tpl_bool_t need_to_release              = TPL_FALSE;
1016         tpl_bool_t need_to_cancel               = TPL_FALSE;
1017         buffer_status_t status                  = RELEASED;
1018         int idx                                 = 0;
1019
1020         while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
1021                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1022                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1023                 wl_vk_buffer = wl_vk_surface->buffers[idx];
1024
1025                 if (wl_vk_buffer) {
1026                         wl_vk_surface->buffers[idx] = NULL;
1027                         wl_vk_surface->buffer_cnt--;
1028                 } else {
1029                         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1030                         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1031                         idx++;
1032                         continue;
1033                 }
1034
1035                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1036
1037                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
1038
1039                 status = wl_vk_buffer->status;
1040
1041                 TPL_INFO("[BUFFER_CLEAR]",
1042                                  "[%d] wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
1043                                  idx, wl_vk_surface, wl_vk_buffer,
1044                                  wl_vk_buffer->tbm_surface,
1045                                  status_to_string[status]);
1046
1047                 if (status >= ENQUEUED) {
1048                         tpl_bool_t need_to_wait  = TPL_FALSE;
1049                         tpl_result_t wait_result = TPL_ERROR_NONE;
1050
1051                         if (!wl_vk_display->use_explicit_sync &&
1052                                 status < WAITING_VBLANK)
1053                                 need_to_wait = TPL_TRUE;
1054
1055                         if (wl_vk_display->use_explicit_sync &&
1056                                 status < COMMITTED)
1057                                 need_to_wait = TPL_TRUE;
1058
1059                         if (need_to_wait) {
1060                                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1061                                 wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond,
1062                                                                                                   &wl_vk_buffer->mutex,
1063                                                                                                   16); /* 16ms */
1064                                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1065
1066                                 status = wl_vk_buffer->status;
1067
1068                                 if (wait_result == TPL_ERROR_TIME_OUT)
1069                                         TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
1070                                                          wl_vk_buffer);
1071                         }
1072                 }
1073
1074                 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1075                 /* It has been acquired but has not yet been released, so this
1076                  * buffer must be released. */
1077                 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
1078
1079                 /* After dequeue, it has not been enqueued yet
1080                  * so cancel_dequeue must be performed. */
1081                 need_to_cancel = (status == DEQUEUED);
1082
1083                 if (swapchain && swapchain->tbm_queue) {
1084                         if (need_to_release) {
1085                                 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
1086                                                                                                         wl_vk_buffer->tbm_surface);
1087                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1088                                         TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1089                                                         wl_vk_buffer->tbm_surface, tsq_err);
1090                         }
1091
1092                         if (need_to_cancel) {
1093                                 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
1094                                                                                                                    wl_vk_buffer->tbm_surface);
1095                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1096                                         TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1097                                                         wl_vk_buffer->tbm_surface, tsq_err);
1098                         }
1099                 }
1100
1101                 wl_vk_buffer->status = RELEASED;
1102
1103                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1104
1105                 if (need_to_release || need_to_cancel)
1106                         tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
1107
1108                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1109
1110                 idx++;
1111         }
1112 }
1113
1114 static tdm_client_vblank*
1115 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1116 {
1117         tdm_client_vblank *vblank = NULL;
1118         tdm_client_output *tdm_output = NULL;
1119         tdm_error tdm_err = TDM_ERROR_NONE;
1120
1121         if (!tdm_client) {
1122                 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1123                 return NULL;
1124         }
1125
1126         tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1127         if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1128                 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1129                 return NULL;
1130         }
1131
1132         vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1133         if (!vblank || tdm_err != TDM_ERROR_NONE) {
1134                 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1135                 return NULL;
1136         }
1137
1138         tdm_err = tdm_client_handle_pending_events(tdm_client);
1139         if (tdm_err != TDM_ERROR_NONE) {
1140                 TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err);
1141         }
1142
1143         tdm_client_vblank_set_enable_fake(vblank, 1);
1144         tdm_client_vblank_set_sync(vblank, 0);
1145
1146         return vblank;
1147 }
1148
1149 static void
1150 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1151 {
1152         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1153
1154         /* tbm_surface_queue will be created at swapchain_create */
1155
1156         if (wl_vk_display->use_wait_vblank) {
1157                 wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1158                                                                         wl_vk_display->tdm.tdm_client);
1159                 if (wl_vk_surface->vblank) {
1160                         TPL_INFO("[VBLANK_INIT]",
1161                                         "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1162                                         wl_vk_surface, wl_vk_display->tdm.tdm_client,
1163                                         wl_vk_surface->vblank);
1164
1165                         wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1166                         if (!wl_vk_surface->vblank_waiting_buffers) {
1167                                 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1168                                 wl_vk_surface->vblank = NULL;
1169                         }
1170                 }
1171         }
1172
1173 #if TIZEN_FEATURE_ENABLE
1174         if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1175                 wl_vk_surface->surface_sync =
1176                         zwp_linux_explicit_synchronization_v1_get_synchronization(
1177                                         wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1178                 if (wl_vk_surface->surface_sync) {
1179                         TPL_INFO("[EXPLICIT_SYNC_INIT]",
1180                                          "wl_vk_surface(%p) surface_sync(%p)",
1181                                          wl_vk_surface, wl_vk_surface->surface_sync);
1182                 } else {
1183                         TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1184                                          wl_vk_surface);
1185                         wl_vk_display->use_explicit_sync = TPL_FALSE;
1186                 }
1187         }
1188 #endif
1189
1190         wl_vk_surface->vblank_enable = (wl_vk_surface->vblank != NULL &&
1191                 wl_vk_surface->post_interval > 0);
1192 }
1193
1194 static void
1195 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1196 {
1197         TPL_INFO("[SURFACE_FINI]",
1198                          "wl_vk_surface(%p) wl_surface(%p)",
1199                          wl_vk_surface, wl_vk_surface->wl_surface);
1200
1201         if (wl_vk_surface->vblank_waiting_buffers) {
1202                 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1203                 wl_vk_surface->vblank_waiting_buffers = NULL;
1204         }
1205
1206 #if TIZEN_FEATURE_ENABLE
1207         if (wl_vk_surface->surface_sync) {
1208                 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1209                                  "wl_vk_surface(%p) surface_sync(%p)",
1210                                   wl_vk_surface, wl_vk_surface->surface_sync);
1211                 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1212                 wl_vk_surface->surface_sync = NULL;
1213         }
1214 #endif
1215
1216         if (wl_vk_surface->vblank) {
1217                 TPL_INFO("[VBLANK_DESTROY]",
1218                                  "wl_vk_surface(%p) vblank(%p)",
1219                                  wl_vk_surface, wl_vk_surface->vblank);
1220                 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1221                 wl_vk_surface->vblank = NULL;
1222         }
1223 }
1224
1225 static tpl_bool_t
1226 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1227 {
1228         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1229
1230         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1231
1232         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1233         if (message & INIT_SURFACE) { /* Initialize surface */
1234                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) initialize message received!",
1235                                   wl_vk_surface);
1236                 _thread_wl_vk_surface_init(wl_vk_surface);
1237                 wl_vk_surface->initialized_in_thread = TPL_TRUE;
1238                 tpl_gcond_signal(&wl_vk_surface->surf_cond);    
1239         }
1240         
1241         if (message & ACQUIRABLE) { /* Acquirable message */
1242                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) acquirable message received!",
1243                                   wl_vk_surface);
1244                 if (_thread_surface_queue_acquire(wl_vk_surface)
1245                         != TPL_ERROR_NONE) {
1246                         TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1247                                         wl_vk_surface);
1248                 }
1249         }
1250
1251         if (message & CREATE_QUEUE) { /* Create tbm_surface_queue */
1252                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) queue creation message received!",
1253                                   wl_vk_surface);
1254                 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1255                         != TPL_ERROR_NONE) {
1256                         TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1257                                         wl_vk_surface);
1258                 }
1259                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1260         }
1261         
1262         if (message & DESTROY_QUEUE) { /* swapchain destroy */
1263                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) swapchain destroy message received!",
1264                                   wl_vk_surface);
1265                 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1266                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1267         }
1268
1269         /* init to NONE_MESSAGE */
1270         wl_vk_surface->sent_message = NONE_MESSAGE;
1271
1272         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1273
1274         return TPL_TRUE;
1275 }
1276
1277 static void
1278 __thread_func_surf_finalize(tpl_gsource *gsource)
1279 {
1280         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1281
1282         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1283         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1284
1285         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1286         TPL_LOG_D("[S_FINALIZE]", "wl_vk_surface(%p) tpl_gsource(%p)",
1287                           wl_vk_surface, gsource);
1288
1289         _thread_wl_vk_surface_fini(wl_vk_surface);
1290
1291         wl_vk_surface->gsource_finalized = TPL_TRUE;
1292
1293         tpl_gcond_signal(&wl_vk_surface->surf_cond);
1294         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1295 }
1296
1297 static tpl_gsource_functions surf_funcs = {
1298         .prepare = NULL,
1299         .check = NULL,
1300         .dispatch = __thread_func_surf_dispatch,
1301         .finalize = __thread_func_surf_finalize,
1302 };
1303
1304
1305 static tpl_result_t
1306 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1307 {
1308         tpl_wl_vk_surface_t *wl_vk_surface      = NULL;
1309         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1310         tpl_gsource *surf_source                = NULL;
1311
1312         TPL_ASSERT(surface);
1313         TPL_ASSERT(surface->display);
1314         TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1315         TPL_ASSERT(surface->native_handle);
1316
1317         wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1318         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1319
1320         wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1321                                                          sizeof(tpl_wl_vk_surface_t));
1322         if (!wl_vk_surface) {
1323                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1324                 return TPL_ERROR_OUT_OF_MEMORY;
1325         }
1326
1327         surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1328                                                                          -1, FD_TYPE_NONE, &surf_funcs, SOURCE_TYPE_NORMAL);
1329         if (!surf_source) {
1330                 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1331                                 wl_vk_surface);
1332                 free(wl_vk_surface);
1333                 surface->backend.data = NULL;
1334                 return TPL_ERROR_INVALID_OPERATION;
1335         }
1336
1337         surface->backend.data                  = (void *)wl_vk_surface;
1338         surface->width                                 = -1;
1339         surface->height                        = -1;
1340
1341         wl_vk_surface->surf_source             = surf_source;
1342         wl_vk_surface->swapchain               = NULL;
1343
1344         wl_vk_surface->wl_vk_display           = wl_vk_display;
1345         wl_vk_surface->wl_surface              = (struct wl_surface *)surface->native_handle;
1346         wl_vk_surface->tpl_surface             = surface;
1347
1348         wl_vk_surface->reset                   = TPL_FALSE;
1349         wl_vk_surface->is_activated            = TPL_FALSE;
1350         wl_vk_surface->vblank_done             = TPL_TRUE;
1351         wl_vk_surface->initialized_in_thread   = TPL_FALSE;
1352
1353         wl_vk_surface->render_done_cnt         = 0;
1354
1355         wl_vk_surface->vblank                  = NULL;
1356         wl_vk_surface->vblank_enable           = TPL_FALSE;
1357 #if TIZEN_FEATURE_ENABLE
1358         wl_vk_surface->surface_sync            = NULL;
1359 #endif
1360
1361         wl_vk_surface->sent_message            = NONE_MESSAGE;
1362
1363         wl_vk_surface->post_interval           = surface->post_interval;
1364
1365         {
1366                 int i = 0;
1367                 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1368                         wl_vk_surface->buffers[i]     = NULL;
1369                 wl_vk_surface->buffer_cnt         = 0;
1370         }
1371
1372         tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1373         tpl_gcond_init(&wl_vk_surface->surf_cond);
1374
1375         tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1376
1377         /* Initialize in thread */
1378         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1379         wl_vk_surface->sent_message = INIT_SURFACE;
1380         tpl_gsource_send_message(wl_vk_surface->surf_source,
1381                                                          wl_vk_surface->sent_message);
1382         while (!wl_vk_surface->initialized_in_thread)
1383                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1384         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1385
1386         TPL_INFO("[SURFACE_INIT]",
1387                           "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1388                           surface, wl_vk_surface, wl_vk_surface->surf_source);
1389
1390         return TPL_ERROR_NONE;
1391 }
1392
1393 static void
1394 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1395 {
1396         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1397         tpl_wl_vk_display_t *wl_vk_display = NULL;
1398
1399         TPL_ASSERT(surface);
1400         TPL_ASSERT(surface->display);
1401
1402         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1403         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1404
1405         wl_vk_display = (tpl_wl_vk_display_t *)
1406                                                          surface->display->backend.data;
1407         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1408
1409         TPL_INFO("[SURFACE_FINI][BEGIN]",
1410                          "wl_vk_surface(%p) wl_surface(%p)",
1411                          wl_vk_surface, wl_vk_surface->wl_surface);
1412
1413         if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1414                 /* finalize swapchain */
1415
1416         }
1417
1418         wl_vk_surface->swapchain        = NULL;
1419
1420         /* This is a protection to prevent problems that arise in unexpected situations
1421          * that g_cond_wait cannot work normally.
1422          * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1423          * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1424          * */
1425         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1426         while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) {
1427                 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1428                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1429         }
1430         wl_vk_surface->surf_source = NULL;
1431         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1432
1433         _print_buffer_lists(wl_vk_surface);
1434
1435         wl_vk_surface->wl_surface       = NULL;
1436         wl_vk_surface->wl_vk_display    = NULL;
1437         wl_vk_surface->tpl_surface      = NULL;
1438
1439         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1440         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1441         tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1442         tpl_gcond_clear(&wl_vk_surface->surf_cond);
1443
1444         TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1445
1446         free(wl_vk_surface);
1447         surface->backend.data = NULL;
1448 }
1449
1450 static tpl_result_t
1451 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1452                                                                                   int post_interval)
1453 {
1454         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1455
1456         TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1457
1458         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1459
1460         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1461
1462         TPL_INFO("[SET_POST_INTERVAL]",
1463                          "wl_vk_surface(%p) post_interval(%d -> %d)",
1464                          wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1465
1466         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1467         wl_vk_surface->post_interval = post_interval;
1468         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1469
1470         return TPL_ERROR_NONE;
1471 }
1472
1473 static tpl_bool_t
1474 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1475 {
1476         TPL_ASSERT(surface);
1477         TPL_ASSERT(surface->backend.data);
1478
1479         tpl_wl_vk_surface_t *wl_vk_surface =
1480                 (tpl_wl_vk_surface_t *)surface->backend.data;
1481
1482         return !(wl_vk_surface->reset);
1483 }
1484
1485 static void
1486 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1487                                                           void *data)
1488 {
1489         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1490         tpl_wl_vk_display_t *wl_vk_display = NULL;
1491         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1492         tpl_surface_t *surface             = NULL;
1493         tpl_bool_t is_activated            = TPL_FALSE;
1494         int width, height;
1495
1496         wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1497         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1498
1499         wl_vk_display = wl_vk_surface->wl_vk_display;
1500         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1501
1502         surface = wl_vk_surface->tpl_surface;
1503         TPL_CHECK_ON_NULL_RETURN(surface);
1504
1505         swapchain = wl_vk_surface->swapchain;
1506         TPL_CHECK_ON_NULL_RETURN(swapchain);
1507
1508         /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1509          * the changed window size at the next frame. */
1510         width = tbm_surface_queue_get_width(tbm_queue);
1511         height = tbm_surface_queue_get_height(tbm_queue);
1512         if (surface->width != width || surface->height != height) {
1513                 TPL_INFO("[QUEUE_RESIZE]",
1514                                  "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1515                                  wl_vk_surface, tbm_queue,
1516                                  surface->width, surface->height, width, height);
1517         }
1518
1519         /* When queue_reset_callback is called, if is_activated is different from
1520          * its previous state change the reset flag to TPL_TRUE to get a new buffer
1521          * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1522         is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1523                                                                                                                    swapchain->tbm_queue);
1524         if (wl_vk_surface->is_activated != is_activated) {
1525                 if (is_activated) {
1526                         TPL_INFO("[ACTIVATED]",
1527                                           "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1528                                           wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1529                 } else {
1530                         TPL_INFO("[DEACTIVATED]",
1531                                          " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1532                                          wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1533                 }
1534         }
1535
1536         wl_vk_surface->reset = TPL_TRUE;
1537
1538         if (surface->reset_cb)
1539                 surface->reset_cb(surface->reset_data);
1540 }
1541
1542 static void
1543 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1544                                                                    void *data)
1545 {
1546         TPL_IGNORE(tbm_queue);
1547
1548         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1549         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1550
1551         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1552         if (wl_vk_surface->sent_message == NONE_MESSAGE) {
1553                 wl_vk_surface->sent_message = ACQUIRABLE;
1554                 tpl_gsource_send_message(wl_vk_surface->surf_source,
1555                                                                  wl_vk_surface->sent_message);
1556         }
1557         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1558 }
1559
1560 static tpl_result_t
1561 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1562 {
1563         TPL_ASSERT (wl_vk_surface);
1564
1565         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1566         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1567         tbm_surface_queue_h tbm_queue      = NULL;
1568         tbm_bufmgr bufmgr = NULL;
1569         unsigned int capability;
1570
1571         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1572         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1573
1574         if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1575                 TPL_ERR("buffer count(%d) must be higher than (%d)",
1576                                 swapchain->properties.buffer_count,
1577                                 wl_vk_display->min_buffer);
1578                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1579                 return TPL_ERROR_INVALID_PARAMETER;
1580         }
1581
1582         if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1583                 TPL_ERR("buffer count(%d) must be lower than (%d)",
1584                                 swapchain->properties.buffer_count,
1585                                 wl_vk_display->max_buffer);
1586                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1587                 return TPL_ERROR_INVALID_PARAMETER;
1588         }
1589
1590         if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1591                 TPL_ERR("Unsupported present_mode(%d)",
1592                                 swapchain->properties.present_mode);
1593                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1594                 return TPL_ERROR_INVALID_PARAMETER;
1595         }
1596
1597         if (swapchain->old_swapchain_buffers) {
1598                 TPL_ERR("Should be destroy old_swapchain before create");
1599                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1600                 return TPL_ERROR_INVALID_OPERATION;
1601         }
1602
1603         if (swapchain->tbm_queue) {
1604                 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1605                 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1606
1607                 if (swapchain->swapchain_buffers) {
1608                         swapchain->old_swapchain_buffers = swapchain->swapchain_buffers;
1609                         swapchain->swapchain_buffers = NULL;
1610                 }
1611
1612                 if (old_width != swapchain->properties.width ||
1613                         old_height != swapchain->properties.height) {
1614                         tbm_surface_queue_reset(swapchain->tbm_queue,
1615                                                                         swapchain->properties.width,
1616                                                                         swapchain->properties.height,
1617                                                                         TBM_FORMAT_ARGB8888);
1618                         TPL_INFO("[RESIZE]",
1619                                          "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1620                                          wl_vk_surface, swapchain, swapchain->tbm_queue,
1621                                          old_width, old_height,
1622                                          swapchain->properties.width,
1623                                          swapchain->properties.height);
1624                 }
1625
1626                 swapchain->properties.buffer_count =
1627                         tbm_surface_queue_get_size(swapchain->tbm_queue);
1628
1629                 wl_vk_surface->reset = TPL_FALSE;
1630
1631                 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1632                 swapchain->create_done = TPL_TRUE;
1633
1634                 TPL_INFO("[SWAPCHAIN_REUSE]",
1635                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1636                                  wl_vk_surface, swapchain, swapchain->tbm_queue,
1637                                  swapchain->properties.buffer_count);
1638
1639                 return TPL_ERROR_NONE;
1640         }
1641
1642         bufmgr = tbm_bufmgr_init(-1);
1643         capability = tbm_bufmgr_get_capability(bufmgr);
1644         tbm_bufmgr_deinit(bufmgr);
1645
1646         if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1647                 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1648                                                                         wl_vk_display->wl_tbm_client,
1649                                                                         wl_vk_surface->wl_surface,
1650                                                                         swapchain->properties.buffer_count,
1651                                                                         swapchain->properties.width,
1652                                                                         swapchain->properties.height,
1653                                                                         TBM_FORMAT_ARGB8888);
1654         } else {
1655                 tbm_queue = wayland_tbm_client_create_surface_queue(
1656                                                                         wl_vk_display->wl_tbm_client,
1657                                                                         wl_vk_surface->wl_surface,
1658                                                                         swapchain->properties.buffer_count,
1659                                                                         swapchain->properties.width,
1660                                                                         swapchain->properties.height,
1661                                                                         TBM_FORMAT_ARGB8888);
1662         }
1663
1664         if (!tbm_queue) {
1665                 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1666                                 wl_vk_surface);
1667                 swapchain->result = TPL_ERROR_OUT_OF_MEMORY;
1668                 return TPL_ERROR_OUT_OF_MEMORY;
1669         }
1670
1671         if (tbm_surface_queue_set_modes(
1672                         tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1673                         TBM_SURFACE_QUEUE_ERROR_NONE) {
1674                 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1675                                 tbm_queue);
1676                 tbm_surface_queue_destroy(tbm_queue);
1677                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1678                 return TPL_ERROR_INVALID_OPERATION;
1679         }
1680
1681         if (tbm_surface_queue_add_reset_cb(
1682                         tbm_queue,
1683                         __cb_tbm_queue_reset_callback,
1684                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1685                 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1686                                 tbm_queue);
1687                 tbm_surface_queue_destroy(tbm_queue);
1688                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1689                 return TPL_ERROR_INVALID_OPERATION;
1690         }
1691
1692         if (tbm_surface_queue_add_acquirable_cb(
1693                         tbm_queue,
1694                         __cb_tbm_queue_acquirable_callback,
1695                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1696                 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1697                                 tbm_queue);
1698                 tbm_surface_queue_destroy(tbm_queue);
1699                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1700                 return TPL_ERROR_INVALID_OPERATION;
1701         }
1702
1703         swapchain->tbm_queue = tbm_queue;
1704         swapchain->create_done = TPL_TRUE;
1705
1706         TPL_INFO("[TBM_QUEUE_CREATED]",
1707                          "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1708                          wl_vk_surface, swapchain, tbm_queue);
1709
1710         return TPL_ERROR_NONE;
1711 }
1712
1713 static tpl_result_t
1714 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1715                 tbm_format format, int width,
1716                 int height, int buffer_count, int present_mode)
1717 {
1718         tpl_wl_vk_surface_t *wl_vk_surface              = NULL;
1719         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1720         tpl_wl_vk_swapchain_t *swapchain  = NULL;
1721
1722         TPL_ASSERT(surface);
1723         TPL_ASSERT(surface->display);
1724
1725         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1726         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1727
1728         wl_vk_display = (tpl_wl_vk_display_t *)
1729                                                          surface->display->backend.data;
1730         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1731
1732         swapchain = wl_vk_surface->swapchain;
1733
1734         if (swapchain == NULL) {
1735                 swapchain =
1736                         (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1737                         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1738                 swapchain->tbm_queue           = NULL;
1739         }
1740
1741         swapchain->properties.buffer_count = buffer_count;
1742         swapchain->properties.width        = width;
1743         swapchain->properties.height       = height;
1744         swapchain->properties.present_mode = present_mode;
1745         swapchain->wl_vk_surface           = wl_vk_surface;
1746         swapchain->properties.format       = format;
1747         swapchain->swapchain_buffers       = NULL;
1748         swapchain->old_swapchain_buffers   = NULL;
1749
1750         swapchain->result                  = TPL_ERROR_NONE;
1751         swapchain->create_done             = TPL_FALSE;
1752
1753         wl_vk_surface->swapchain           = swapchain;
1754
1755         __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1756
1757         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1758         /* send swapchain create tbm_queue message */
1759         wl_vk_surface->sent_message = CREATE_QUEUE;
1760         tpl_gsource_send_message(wl_vk_surface->surf_source,
1761                                                          wl_vk_surface->sent_message);
1762         while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE)
1763                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1764         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1765
1766         TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1767                 swapchain->tbm_queue != NULL,
1768                 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1769
1770         wl_vk_surface->reset = TPL_FALSE;
1771
1772         return TPL_ERROR_NONE;
1773 }
1774
1775 static void
1776 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1777 {
1778         TPL_ASSERT(wl_vk_surface);
1779
1780         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1781
1782         TPL_CHECK_ON_NULL_RETURN(swapchain);
1783
1784         if (swapchain->tbm_queue) {
1785                 TPL_INFO("[TBM_QUEUE_DESTROY]",
1786                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1787                                  wl_vk_surface, swapchain, swapchain->tbm_queue);
1788                 tbm_surface_queue_destroy(swapchain->tbm_queue);
1789                 swapchain->tbm_queue = NULL;
1790         }
1791 }
1792
1793 void __untrack_swapchain_buffers(tpl_wl_vk_surface_t *wl_vk_surface, tbm_surface_h *sc_buffers)
1794 {
1795         tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1796
1797         for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1798                 if (sc_buffers[i]) {
1799                         TPL_INFO("[UNTRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1800                                          i, wl_vk_surface, swapchain, sc_buffers[i],
1801                                          _get_tbm_surface_bo_name(sc_buffers[i]));
1802                         tbm_surface_internal_unref(sc_buffers[i]);
1803                         sc_buffers[i] = NULL;
1804                 }               
1805         }
1806 }
1807
1808 static tpl_result_t
1809 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1810 {
1811         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1812         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1813         tpl_wl_vk_display_t *wl_vk_display = NULL;
1814
1815         TPL_ASSERT(surface);
1816         TPL_ASSERT(surface->display);
1817
1818         wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1819         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1820
1821         wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1822         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1823
1824         swapchain = wl_vk_surface->swapchain;
1825         if (!swapchain) {
1826                 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1827                                 wl_vk_surface);
1828                 return TPL_ERROR_INVALID_OPERATION;
1829         }
1830
1831         if (!swapchain->tbm_queue) {
1832                 TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.",
1833                                 wl_vk_surface, wl_vk_surface->swapchain);
1834                 return TPL_ERROR_INVALID_OPERATION;
1835         }
1836
1837         if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1838                 TPL_INFO("[DESTROY_SWAPCHAIN]",
1839                                  "wl_vk_surface(%p) swapchain(%p) still valid.",
1840                                  wl_vk_surface, swapchain);
1841                 if (swapchain->old_swapchain_buffers) {
1842                         __untrack_swapchain_buffers(wl_vk_surface, swapchain->old_swapchain_buffers);
1843                         free(swapchain->old_swapchain_buffers);
1844                         swapchain->old_swapchain_buffers = NULL;
1845                 }
1846                 return TPL_ERROR_NONE;
1847         }
1848
1849         TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1850                          "wl_vk_surface(%p) swapchain(%p)",
1851                          wl_vk_surface, wl_vk_surface->swapchain);
1852
1853         if (swapchain->swapchain_buffers) {
1854                 __untrack_swapchain_buffers(wl_vk_surface, swapchain->swapchain_buffers);
1855                 free(swapchain->swapchain_buffers);
1856                 swapchain->swapchain_buffers = NULL;
1857         }
1858
1859         _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1860
1861         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1862         wl_vk_surface->sent_message = DESTROY_QUEUE;
1863         tpl_gsource_send_message(wl_vk_surface->surf_source,
1864                                                          wl_vk_surface->sent_message);
1865         while (swapchain->tbm_queue)
1866                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1867         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1868
1869         _print_buffer_lists(wl_vk_surface);
1870
1871         free(swapchain);
1872         wl_vk_surface->swapchain = NULL;
1873
1874         return TPL_ERROR_NONE;
1875 }
1876
1877 static tpl_result_t
1878 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1879                                                                                           tbm_surface_h **buffers,
1880                                                                                           int *buffer_count)
1881 {
1882         TPL_ASSERT(surface);
1883         TPL_ASSERT(surface->backend.data);
1884         TPL_ASSERT(surface->display);
1885         TPL_ASSERT(surface->display->backend.data);
1886
1887         tpl_wl_vk_surface_t *wl_vk_surface =
1888                 (tpl_wl_vk_surface_t *)surface->backend.data;
1889         tpl_wl_vk_display_t *wl_vk_display =
1890                 (tpl_wl_vk_display_t *)surface->display->backend.data;
1891         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1892         tpl_result_t ret                   = TPL_ERROR_NONE;
1893         int i;
1894
1895         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1896         TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1897
1898         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1899
1900         if (!buffers) {
1901                 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1902                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1903                 return TPL_ERROR_NONE;
1904         }
1905
1906         swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1907                                                                                 *buffer_count,
1908                                                                                 sizeof(tbm_surface_h));
1909         if (!swapchain->swapchain_buffers) {
1910                 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1911                                 *buffer_count);
1912                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1913                 return TPL_ERROR_OUT_OF_MEMORY;
1914         }
1915
1916         ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1917                                                                                                 swapchain->tbm_queue,
1918                                                                                                 swapchain->swapchain_buffers,
1919                                                                                                 buffer_count);
1920         if (!ret) {
1921                 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1922                                 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1923                 free(swapchain->swapchain_buffers);
1924                 swapchain->swapchain_buffers = NULL;
1925                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1926                 return TPL_ERROR_INVALID_OPERATION;
1927         }
1928
1929         for (i = 0; i < *buffer_count; i++) {
1930                 if (swapchain->swapchain_buffers[i]) {
1931                         TPL_INFO("[TRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1932                                           i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i],
1933                                           _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1934                         tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1935                 }
1936         }
1937
1938         *buffers = swapchain->swapchain_buffers;
1939
1940         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1941
1942         return TPL_ERROR_NONE;
1943 }
1944
1945 static void
1946 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1947 {
1948         tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1949         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1950
1951         TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1952                          wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1953
1954         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1955         if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1956                 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1957                 wl_vk_surface->buffer_cnt--;
1958
1959                 wl_vk_buffer->idx = -1;
1960         }
1961         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1962
1963         wl_display_flush(wl_vk_display->wl_display);
1964
1965         if (wl_vk_buffer->wl_buffer) {
1966                 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1967                                                                                   wl_vk_buffer->wl_buffer);
1968                 wl_vk_buffer->wl_buffer = NULL;
1969         }
1970
1971 #if TIZEN_FEATURE_ENABLE
1972         if (wl_vk_buffer->buffer_release) {
1973                 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1974                 wl_vk_buffer->buffer_release = NULL;
1975         }
1976 #endif
1977
1978         if (wl_vk_buffer->release_fence_fd != -1) {
1979                 close(wl_vk_buffer->release_fence_fd);
1980                 wl_vk_buffer->release_fence_fd = -1;
1981         }
1982
1983         if (wl_vk_buffer->rects) {
1984                 free(wl_vk_buffer->rects);
1985                 wl_vk_buffer->rects = NULL;
1986                 wl_vk_buffer->num_rects = 0;
1987         }
1988
1989         wl_vk_buffer->tbm_surface = NULL;
1990         wl_vk_buffer->bo_name = -1;
1991
1992         free(wl_vk_buffer);
1993 }
1994
1995 static tpl_wl_vk_buffer_t *
1996 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1997 {
1998         tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1999         tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2000                                                                            (void **)&wl_vk_buffer);
2001         return wl_vk_buffer;
2002 }
2003
2004 static tpl_wl_vk_buffer_t *
2005 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
2006                                           tbm_surface_h tbm_surface)
2007 {
2008         tpl_wl_vk_buffer_t  *wl_vk_buffer  = NULL;
2009
2010         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2011
2012         if (!wl_vk_buffer) {
2013                 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
2014                 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
2015
2016                 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2017                                                                                    (tbm_data_free)__cb_wl_vk_buffer_free);
2018                 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2019                                                                                    wl_vk_buffer);
2020
2021                 wl_vk_buffer->wl_buffer                = NULL;
2022                 wl_vk_buffer->tbm_surface              = tbm_surface;
2023                 wl_vk_buffer->bo_name                  = _get_tbm_surface_bo_name(tbm_surface);
2024                 wl_vk_buffer->wl_vk_surface            = wl_vk_surface;
2025
2026                 wl_vk_buffer->status                   = RELEASED;
2027
2028                 wl_vk_buffer->acquire_fence_fd         = -1;
2029                 wl_vk_buffer->release_fence_fd         = -1;
2030
2031                 wl_vk_buffer->dx                       = 0;
2032                 wl_vk_buffer->dy                       = 0;
2033                 wl_vk_buffer->width                    = tbm_surface_get_width(tbm_surface);
2034                 wl_vk_buffer->height                   = tbm_surface_get_height(tbm_surface);
2035
2036                 wl_vk_buffer->rects                    = NULL;
2037                 wl_vk_buffer->num_rects                = 0;
2038
2039                 wl_vk_buffer->need_to_commit = TPL_FALSE;
2040 #if TIZEN_FEATURE_ENABLE
2041                 wl_vk_buffer->buffer_release = NULL;
2042 #endif
2043                 tpl_gmutex_init(&wl_vk_buffer->mutex);
2044                 tpl_gcond_init(&wl_vk_buffer->cond);
2045
2046                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2047                 {
2048                         int i;
2049                         for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2050                                 if (wl_vk_surface->buffers[i] == NULL) break;
2051
2052                         /* If this exception is reached,
2053                          * it may be a critical memory leak problem. */
2054                         if (i == BUFFER_ARRAY_SIZE) {
2055                                 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
2056                                 int evicted_idx = 0; /* evict the frontmost buffer */
2057
2058                                 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
2059
2060                                 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
2061                                                  wl_vk_surface);
2062                                 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2063                                                  evicted_buffer, evicted_buffer->tbm_surface,
2064                                                  status_to_string[evicted_buffer->status]);
2065
2066                                 /* [TODO] need to think about whether there will be
2067                                  * better modifications */
2068                                 wl_vk_surface->buffer_cnt--;
2069                                 wl_vk_surface->buffers[evicted_idx]      = NULL;
2070
2071                                 i = evicted_idx;
2072                         }
2073
2074                         wl_vk_surface->buffer_cnt++;
2075                         wl_vk_surface->buffers[i]          = wl_vk_buffer;
2076                         wl_vk_buffer->idx                  = i;
2077                 }
2078                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2079
2080                 TPL_INFO("[WL_VK_BUFFER_CREATE]",
2081                                  "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2082                                  wl_vk_surface, wl_vk_buffer, tbm_surface,
2083                                  wl_vk_buffer->bo_name);
2084         }
2085
2086         return wl_vk_buffer;
2087 }
2088
2089 static tbm_surface_h
2090 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
2091                                                                    uint64_t timeout_ns,
2092                                                                    int32_t *release_fence)
2093 {
2094         TPL_ASSERT(surface);
2095         TPL_ASSERT(surface->backend.data);
2096         TPL_ASSERT(surface->display);
2097         TPL_ASSERT(surface->display->backend.data);
2098         TPL_OBJECT_CHECK_RETURN(surface, NULL);
2099
2100         tpl_wl_vk_surface_t *wl_vk_surface =
2101                 (tpl_wl_vk_surface_t *)surface->backend.data;
2102         tpl_wl_vk_display_t *wl_vk_display =
2103                 (tpl_wl_vk_display_t *)surface->display->backend.data;
2104         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
2105         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
2106
2107         tbm_surface_h tbm_surface          = NULL;
2108         tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_NONE;
2109
2110         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
2111         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
2112
2113         TPL_OBJECT_UNLOCK(surface);
2114         TRACE_BEGIN("WAIT_DEQUEUEABLE");
2115         if (timeout_ns != UINT64_MAX) {
2116                 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2117                                                 swapchain->tbm_queue, timeout_ns/1000);
2118         } else {
2119                 tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
2120         }
2121         TRACE_END();
2122         TPL_OBJECT_LOCK(surface);
2123
2124         if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2125                 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
2126                                 timeout_ns);
2127                 return NULL;
2128         } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2129                 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
2130                                 wl_vk_surface, swapchain->tbm_queue, tsq_err);
2131                 return NULL;
2132         }
2133
2134         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
2135
2136         if (wl_vk_surface->reset) {
2137                 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
2138                                   swapchain, swapchain->tbm_queue);
2139                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2140                 return NULL;
2141         }
2142
2143         tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
2144                                                                                 &tbm_surface);
2145         if (!tbm_surface) {
2146                 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
2147                                 swapchain->tbm_queue, wl_vk_surface, tsq_err);
2148                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2149                 return NULL;
2150         }
2151
2152         tbm_surface_internal_ref(tbm_surface);
2153
2154         wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
2155         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
2156
2157         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2158         wl_vk_buffer->status = DEQUEUED;
2159
2160         if (release_fence) {
2161 #if TIZEN_FEATURE_ENABLE
2162                 if (wl_vk_surface->surface_sync) {
2163                         *release_fence = wl_vk_buffer->release_fence_fd;
2164                         TPL_LOG_D("[EXPLICIT_FENCE]", "wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
2165                                           wl_vk_surface, wl_vk_buffer, *release_fence);
2166                         wl_vk_buffer->release_fence_fd = -1;
2167                 } else
2168 #endif
2169                 {
2170                         *release_fence = -1;
2171                 }
2172         }
2173
2174         wl_vk_surface->reset = TPL_FALSE;
2175
2176         TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2177                           wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
2178                           release_fence ? *release_fence : -1);
2179
2180         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2181         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2182
2183         return tbm_surface;
2184 }
2185
2186 static tpl_result_t
2187 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
2188                                                                           tbm_surface_h tbm_surface)
2189 {
2190         TPL_ASSERT(surface);
2191         TPL_ASSERT(surface->backend.data);
2192
2193         tpl_wl_vk_surface_t *wl_vk_surface  =
2194                 (tpl_wl_vk_surface_t *)surface->backend.data;
2195         tpl_wl_vk_swapchain_t *swapchain    = NULL;
2196         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2197         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2198
2199         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2200                                                                   TPL_ERROR_INVALID_PARAMETER);
2201
2202         swapchain = wl_vk_surface->swapchain;
2203         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2204         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2205                                                                  TPL_ERROR_INVALID_PARAMETER);
2206
2207         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2208         if (wl_vk_buffer) {
2209                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2210                 wl_vk_buffer->status = RELEASED;
2211                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2212         }
2213
2214         tbm_surface_internal_unref(tbm_surface);
2215
2216         TPL_INFO("[CANCEL BUFFER]",
2217                          "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2218                           wl_vk_surface, swapchain, tbm_surface,
2219                           _get_tbm_surface_bo_name(tbm_surface));
2220
2221         tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2222                                                                                            tbm_surface);
2223         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2224                 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2225                 return TPL_ERROR_INVALID_OPERATION;
2226         }
2227
2228         return TPL_ERROR_NONE;
2229 }
2230
2231 static tpl_result_t
2232 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2233                                                                            tbm_surface_h tbm_surface,
2234                                                                            int num_rects, const int *rects,
2235                                                                            int32_t acquire_fence)
2236 {
2237         TPL_ASSERT(surface);
2238         TPL_ASSERT(surface->display);
2239         TPL_ASSERT(surface->backend.data);
2240         TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2241
2242         tpl_wl_vk_surface_t *wl_vk_surface  =
2243                 (tpl_wl_vk_surface_t *) surface->backend.data;
2244         tpl_wl_vk_swapchain_t *swapchain    = wl_vk_surface->swapchain;
2245         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2246         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2247         int bo_name                         = -1;
2248
2249         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2250         TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2251         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2252                                                                   TPL_ERROR_INVALID_PARAMETER);
2253
2254         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2255         if (!wl_vk_buffer) {
2256                 TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
2257                 return TPL_ERROR_INVALID_PARAMETER;
2258         }
2259
2260         bo_name = wl_vk_buffer->bo_name;
2261
2262         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2263
2264         /* If there are received region information, save it to wl_vk_buffer */
2265         if (num_rects && rects) {
2266                 if (wl_vk_buffer->rects != NULL) {
2267                         free(wl_vk_buffer->rects);
2268                         wl_vk_buffer->rects = NULL;
2269                         wl_vk_buffer->num_rects = 0;
2270                 }
2271
2272                 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2273                 wl_vk_buffer->num_rects = num_rects;
2274
2275                 if (wl_vk_buffer->rects) {
2276                         memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2277                                    sizeof(int) * 4 * num_rects);
2278                 } else {
2279                         TPL_ERR("Failed to allocate memory for rects info.");
2280                 }
2281         }
2282
2283         if (wl_vk_buffer->acquire_fence_fd != -1)
2284                 close(wl_vk_buffer->acquire_fence_fd);
2285
2286         wl_vk_buffer->acquire_fence_fd = acquire_fence;
2287
2288         wl_vk_buffer->status = ENQUEUED;
2289         TPL_LOG_T("WL_VK",
2290                           "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2291                           wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2292
2293         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2294
2295         tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2296                                                                                 tbm_surface);
2297         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2298                 tbm_surface_internal_unref(tbm_surface);
2299                 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2300                                 tbm_surface, wl_vk_surface, tsq_err);
2301                 return TPL_ERROR_INVALID_OPERATION;
2302         }
2303
2304         tbm_surface_internal_unref(tbm_surface);
2305
2306         return TPL_ERROR_NONE;
2307 }
2308
2309 static const struct wl_buffer_listener wl_buffer_release_listener = {
2310         (void *)__cb_wl_buffer_release,
2311 };
2312
2313 static tpl_result_t
2314 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2315 {
2316         tbm_surface_h tbm_surface            = NULL;
2317         tbm_surface_queue_error_e tsq_err    = TBM_SURFACE_QUEUE_ERROR_NONE;
2318         tpl_wl_vk_display_t *wl_vk_display   = wl_vk_surface->wl_vk_display;
2319         tpl_wl_vk_swapchain_t *swapchain     = wl_vk_surface->swapchain;
2320         tpl_wl_vk_buffer_t *wl_vk_buffer     = NULL;
2321         tpl_bool_t ready_to_commit           = TPL_TRUE;
2322
2323         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2324
2325         while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2326                 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2327                                                                                         &tbm_surface);
2328                 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2329                         TPL_ERR("Failed to acquire from tbm_queue(%p)",
2330                                         swapchain->tbm_queue);
2331                         return TPL_ERROR_INVALID_OPERATION;
2332                 }
2333
2334                 tbm_surface_internal_ref(tbm_surface);
2335
2336                 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2337                 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2338                                                                            "wl_vk_buffer sould be not NULL");
2339
2340                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2341
2342                 wl_vk_buffer->status = ACQUIRED;
2343
2344                 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2345                                   wl_vk_buffer, tbm_surface,
2346                                   _get_tbm_surface_bo_name(tbm_surface));
2347
2348                 if (wl_vk_buffer->wl_buffer == NULL) {
2349                         wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2350                                                 wl_vk_display->wl_tbm_client, tbm_surface);
2351
2352                         if (!wl_vk_buffer->wl_buffer) {
2353                                 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2354                                                  wl_vk_display->wl_tbm_client, tbm_surface);
2355                         } else {
2356                                 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2357                                         wl_vk_display->use_explicit_sync == TPL_FALSE) {
2358                                         wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2359                                                                                    &wl_buffer_release_listener, wl_vk_buffer);
2360                                 }
2361
2362                                 TPL_LOG_T("WL_VK",
2363                                                   "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2364                                                   wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2365                         }
2366                 }
2367
2368                 if (!wl_vk_surface->vblank_enable || wl_vk_surface->vblank_done)
2369                         ready_to_commit = TPL_TRUE;
2370                 else {
2371                         wl_vk_buffer->status = WAITING_VBLANK;
2372                         __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2373                         ready_to_commit = TPL_FALSE;
2374                 }
2375
2376                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2377
2378                 if (ready_to_commit)
2379                         _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2380         }
2381
2382         return TPL_ERROR_NONE;
2383 }
2384
2385 #if TIZEN_FEATURE_ENABLE
2386 static void
2387 __cb_buffer_fenced_release(void *data,
2388                                                    struct zwp_linux_buffer_release_v1 *release,
2389                                                    int32_t fence)
2390 {
2391         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2392         tbm_surface_h tbm_surface         = NULL;
2393
2394         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2395
2396         tbm_surface = wl_vk_buffer->tbm_surface;
2397
2398         if (tbm_surface_internal_is_valid(tbm_surface)) {
2399                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2400                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2401
2402                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2403                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2404                         tbm_surface_internal_unref(tbm_surface);
2405                         return;
2406                 }
2407
2408                 swapchain = wl_vk_surface->swapchain;
2409
2410                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2411                 if (wl_vk_buffer->status == COMMITTED) {
2412                         tbm_surface_queue_error_e tsq_err;
2413
2414                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2415                         wl_vk_buffer->buffer_release = NULL;
2416
2417                         wl_vk_buffer->release_fence_fd = fence;
2418                         wl_vk_buffer->status = RELEASED;
2419
2420                         TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2421                                            wl_vk_buffer->bo_name,
2422                                            fence);
2423                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2424                                                         wl_vk_buffer->bo_name);
2425
2426                         TPL_LOG_T("WL_VK",
2427                                           "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2428                                           wl_vk_buffer, tbm_surface,
2429                                           wl_vk_buffer->bo_name,
2430                                           fence);
2431
2432                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2433                                                                                                 tbm_surface);
2434                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2435                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2436
2437                         tbm_surface_internal_unref(tbm_surface);
2438                 }
2439
2440                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2441
2442         } else {
2443                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2444         }
2445 }
2446
2447 static void
2448 __cb_buffer_immediate_release(void *data,
2449                                                           struct zwp_linux_buffer_release_v1 *release)
2450 {
2451         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2452         tbm_surface_h tbm_surface           = NULL;
2453
2454         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2455
2456         tbm_surface = wl_vk_buffer->tbm_surface;
2457
2458         if (tbm_surface_internal_is_valid(tbm_surface)) {
2459                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2460                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2461
2462                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2463                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2464                         tbm_surface_internal_unref(tbm_surface);
2465                         return;
2466                 }
2467
2468                 swapchain = wl_vk_surface->swapchain;
2469
2470                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2471                 if (wl_vk_buffer->status == COMMITTED) {
2472                         tbm_surface_queue_error_e tsq_err;
2473
2474                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2475                         wl_vk_buffer->buffer_release = NULL;
2476
2477                         wl_vk_buffer->release_fence_fd = -1;
2478                         wl_vk_buffer->status = RELEASED;
2479
2480                         TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2481                                            _get_tbm_surface_bo_name(tbm_surface));
2482                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2483                                                         _get_tbm_surface_bo_name(tbm_surface));
2484
2485                         TPL_LOG_T("WL_VK",
2486                                           "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2487                                           wl_vk_buffer, tbm_surface,
2488                                           _get_tbm_surface_bo_name(tbm_surface));
2489
2490                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2491                                                                                                 tbm_surface);
2492                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2493                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2494
2495                         tbm_surface_internal_unref(tbm_surface);
2496                 }
2497
2498                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2499
2500         } else {
2501                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2502         }
2503 }
2504
2505 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2506         __cb_buffer_fenced_release,
2507         __cb_buffer_immediate_release,
2508 };
2509 #endif
2510
2511 static void
2512 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2513 {
2514         tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2515         tbm_surface_h tbm_surface = NULL;
2516
2517         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2518
2519         tbm_surface = wl_vk_buffer->tbm_surface;
2520
2521         if (tbm_surface_internal_is_valid(tbm_surface)) {
2522                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2523                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2524                 tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2525
2526                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2527                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2528                         tbm_surface_internal_unref(tbm_surface);
2529                         return;
2530                 }
2531
2532                 swapchain = wl_vk_surface->swapchain;
2533
2534                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2535
2536                 if (wl_vk_buffer->status == COMMITTED) {
2537
2538                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2539                                                                                                 tbm_surface);
2540                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2541                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2542
2543                         wl_vk_buffer->status = RELEASED;
2544
2545                         TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2546                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2547                                                         wl_vk_buffer->bo_name);
2548
2549                         TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2550                                           wl_vk_buffer->wl_buffer, tbm_surface,
2551                                           wl_vk_buffer->bo_name);
2552
2553                         tbm_surface_internal_unref(tbm_surface);
2554                 }
2555
2556                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2557         } else {
2558                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2559         }
2560 }
2561
2562 static void
2563 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2564                                            unsigned int sequence, unsigned int tv_sec,
2565                                            unsigned int tv_usec, void *user_data)
2566 {
2567         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2568         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
2569
2570         TRACE_ASYNC_END((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2571         TPL_LOG_D("[VBLANK_DONE]", "wl_vk_surface(%p)", wl_vk_surface);
2572
2573         if (error == TDM_ERROR_TIMEOUT)
2574                 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2575                                  wl_vk_surface);
2576
2577         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2578         wl_vk_surface->vblank_done = TPL_TRUE;
2579
2580         if (wl_vk_surface->vblank && wl_vk_surface->vblank_waiting_buffers) {
2581                 tpl_bool_t is_empty = TPL_TRUE;
2582                 do {
2583                         tpl_wl_vk_buffer_t* wl_vk_buffer =(tpl_wl_vk_buffer_t *)
2584                                 __tpl_list_pop_front(wl_vk_surface->vblank_waiting_buffers, NULL);
2585                         is_empty = __tpl_list_is_empty(wl_vk_surface->vblank_waiting_buffers);
2586
2587                         if (!wl_vk_buffer) break;
2588
2589                         _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2590
2591                         /* If tdm error such as TIMEOUT occured,
2592                          * flush all vblank waiting buffers of its wl_vk_surface.
2593                          * Otherwise, only one wl_vk_buffer will be commited per one vblank event.
2594                          */
2595                         if (error == TDM_ERROR_NONE && wl_vk_surface->post_interval > 0)
2596                                 break;
2597                 } while (!is_empty);
2598
2599                 wl_vk_surface->vblank_enable = (wl_vk_surface->post_interval > 0);
2600         }
2601         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2602 }
2603
2604 static tpl_result_t
2605 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2606 {
2607         tdm_error tdm_err                     = TDM_ERROR_NONE;
2608         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2609
2610         if (wl_vk_surface->vblank == NULL) {
2611                 wl_vk_surface->vblank =
2612                         _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client);
2613                 if (!wl_vk_surface->vblank) {
2614                         TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2615                                          wl_vk_surface);
2616                         return TPL_ERROR_OUT_OF_MEMORY;
2617                 } else {
2618                         wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
2619                         if (!wl_vk_surface->vblank_waiting_buffers) {
2620                                 tdm_client_vblank_destroy(wl_vk_surface->vblank);
2621                                 wl_vk_surface->vblank = NULL;
2622                         }
2623                 }
2624         }
2625
2626         tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2627                         wl_vk_surface->post_interval,
2628                         __cb_tdm_client_vblank,
2629                         (void *)wl_vk_surface);
2630
2631         if (tdm_err == TDM_ERROR_NONE) {
2632                 wl_vk_surface->vblank_done = TPL_FALSE;
2633                 TRACE_ASYNC_BEGIN((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2634         } else {
2635                 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2636                 return TPL_ERROR_INVALID_OPERATION;
2637         }
2638
2639         return TPL_ERROR_NONE;
2640 }
2641
2642 static void
2643 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2644                                                   tpl_wl_vk_buffer_t *wl_vk_buffer)
2645 {
2646         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2647         struct wl_surface *wl_surface         = wl_vk_surface->wl_surface;
2648         uint32_t version;
2649
2650         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2651                                                                    "wl_vk_buffer sould be not NULL");
2652
2653         if (wl_vk_buffer->wl_buffer == NULL) {
2654                 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2655                                                                                         wl_vk_display->wl_tbm_client,
2656                                                                                         wl_vk_buffer->tbm_surface);
2657                 if (wl_vk_buffer->wl_buffer &&
2658                         (wl_vk_buffer->acquire_fence_fd == -1 ||
2659                          wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2660                                 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2661                                                                            &wl_buffer_release_listener, wl_vk_buffer);
2662                 }
2663         }
2664         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2665                                                                    "[FATAL] Failed to create wl_buffer");
2666
2667         version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2668
2669         wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2670                                           wl_vk_buffer->dx, wl_vk_buffer->dy);
2671
2672         if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2673                 if (version < 4) {
2674                         wl_surface_damage(wl_surface,
2675                                                           wl_vk_buffer->dx, wl_vk_buffer->dy,
2676                                                           wl_vk_buffer->width, wl_vk_buffer->height);
2677                 } else {
2678                         wl_surface_damage_buffer(wl_surface,
2679                                                                          0, 0,
2680                                                                          wl_vk_buffer->width, wl_vk_buffer->height);
2681                 }
2682         } else {
2683                 int i;
2684                 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2685                         int inverted_y =
2686                                 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2687                                                 wl_vk_buffer->rects[i * 4 + 3]);
2688                         if (version < 4) {
2689                                 wl_surface_damage(wl_surface,
2690                                                                   wl_vk_buffer->rects[i * 4 + 0],
2691                                                                   inverted_y,
2692                                                                   wl_vk_buffer->rects[i * 4 + 2],
2693                                                                   wl_vk_buffer->rects[i * 4 + 3]);
2694                         } else {
2695                                 wl_surface_damage_buffer(wl_surface,
2696                                                                                  wl_vk_buffer->rects[i * 4 + 0],
2697                                                                                  inverted_y,
2698                                                                                  wl_vk_buffer->rects[i * 4 + 2],
2699                                                                                  wl_vk_buffer->rects[i * 4 + 3]);
2700                         }
2701                 }
2702         }
2703
2704 #if TIZEN_FEATURE_ENABLE
2705         if (wl_vk_display->use_explicit_sync &&
2706                 wl_vk_surface->surface_sync &&
2707                 wl_vk_buffer->acquire_fence_fd != -1) {
2708
2709                 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2710                                                                                                                            wl_vk_buffer->acquire_fence_fd);
2711                 TPL_LOG_D("[SET_ACQUIRE_FENCE][1/2]", "wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2712                                   wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2713                 close(wl_vk_buffer->acquire_fence_fd);
2714                 wl_vk_buffer->acquire_fence_fd = -1;
2715
2716                 wl_vk_buffer->buffer_release =
2717                         zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2718                 if (!wl_vk_buffer->buffer_release) {
2719                         TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2720                 } else {
2721                         zwp_linux_buffer_release_v1_add_listener(
2722                                 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2723                         TPL_LOG_D("[SET_ACQUIRE_FENCE][2/2]", "add explicit_sync_release_listener.");
2724                 }
2725         }
2726 #endif
2727
2728         wl_surface_commit(wl_surface);
2729
2730         wl_display_flush(wl_vk_display->wl_display);
2731
2732         TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2733                                           wl_vk_buffer->bo_name);
2734
2735         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2736
2737         wl_vk_buffer->need_to_commit   = TPL_FALSE;
2738         wl_vk_buffer->status           = COMMITTED;
2739
2740         tpl_gcond_signal(&wl_vk_buffer->cond);
2741
2742         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2743
2744         TPL_LOG_T("WL_VK",
2745                           "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2746                           wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2747                           wl_vk_buffer->bo_name);
2748
2749         if (wl_vk_surface->post_interval > 0 && wl_vk_surface->vblank != NULL) {
2750                 wl_vk_surface->vblank_enable = TPL_TRUE;
2751                 if (_thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2752                         TPL_ERR("Failed to set wait vblank.");
2753         }
2754 }
2755
2756 tpl_bool_t
2757 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2758 {
2759         if (!native_dpy) return TPL_FALSE;
2760
2761         if (_check_native_handle_is_wl_display(native_dpy))
2762                 return TPL_TRUE;
2763
2764         return TPL_FALSE;
2765 }
2766
2767 void
2768 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2769 {
2770         TPL_ASSERT(backend);
2771
2772         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2773         backend->data = NULL;
2774
2775         backend->init = __tpl_wl_vk_display_init;
2776         backend->fini = __tpl_wl_vk_display_fini;
2777         backend->query_config = __tpl_wl_vk_display_query_config;
2778         backend->filter_config = __tpl_wl_vk_display_filter_config;
2779         backend->query_window_supported_buffer_count =
2780                 __tpl_wl_vk_display_query_window_supported_buffer_count;
2781         backend->query_window_supported_present_modes =
2782                 __tpl_wl_vk_display_query_window_supported_present_modes;
2783 }
2784
2785 void
2786 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2787 {
2788         TPL_ASSERT(backend);
2789
2790         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2791         backend->data = NULL;
2792
2793         backend->init = __tpl_wl_vk_surface_init;
2794         backend->fini = __tpl_wl_vk_surface_fini;
2795         backend->validate = __tpl_wl_vk_surface_validate;
2796         backend->cancel_dequeued_buffer =
2797                 __tpl_wl_vk_surface_cancel_buffer;
2798         backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2799         backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2800         backend->get_swapchain_buffers =
2801                 __tpl_wl_vk_surface_get_swapchain_buffers;
2802         backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2803         backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2804         backend->set_post_interval =
2805                 __tpl_wl_vk_surface_set_post_interval;
2806 }
2807
2808 static int
2809 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2810 {
2811         return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2812 }
2813
2814 static void
2815 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2816 {
2817         int idx = 0;
2818
2819         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2820         TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2821                          wl_vk_surface, wl_vk_surface->buffer_cnt);
2822         for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2823                 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2824                 if (wl_vk_buffer) {
2825                         TPL_INFO("[INFO]",
2826                                          "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2827                                          idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2828                                          wl_vk_buffer->bo_name,
2829                                          status_to_string[wl_vk_buffer->status]);
2830                 }
2831         }
2832         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2833 }