wl_vk: change the event message processing
[platform/core/uifw/libtpl-egl.git] / src / tpl_wl_vk_thread.c
1 #define inline __inline__
2 #undef inline
3
4 #include "tpl_internal.h"
5
6 #include <string.h>
7 #include <fcntl.h>
8 #include <unistd.h>
9 #include <sys/eventfd.h>
10
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
15
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
19
20 #include <tdm_client.h>
21
22 #ifndef TIZEN_FEATURE_ENABLE
23 #define TIZEN_FEATURE_ENABLE 1
24 #endif
25
26 #if TIZEN_FEATURE_ENABLE
27 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
28 #endif
29
30 #include "tpl_utils_gthread.h"
31
32 #define BUFFER_ARRAY_SIZE 10
33 #define VK_CLIENT_QUEUE_SIZE 3
34
35 static int wl_vk_buffer_key;
36 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
37
38 typedef struct _tpl_wl_vk_display       tpl_wl_vk_display_t;
39 typedef struct _tpl_wl_vk_surface       tpl_wl_vk_surface_t;
40 typedef struct _tpl_wl_vk_swapchain     tpl_wl_vk_swapchain_t;
41 typedef struct _tpl_wl_vk_buffer        tpl_wl_vk_buffer_t;
42
43 struct _tpl_wl_vk_display {
44         tpl_gsource                  *disp_source;
45         tpl_gthread                  *thread;
46         tpl_gmutex                    wl_event_mutex;
47
48         struct wl_display            *wl_display;
49         struct wl_event_queue        *ev_queue;
50         struct wayland_tbm_client    *wl_tbm_client;
51         int                           last_error; /* errno of the last wl_display error*/
52
53         tpl_bool_t                    wl_initialized;
54
55         struct {
56                 tdm_client                   *tdm_client;
57                 tpl_gsource                  *tdm_source;
58                 int                           tdm_display_fd;
59                 tpl_bool_t                    tdm_initialized;
60                 /* To make sure that tpl_gsource has been successfully finalized. */
61                 tpl_bool_t                    gsource_finalized;
62                 tpl_gmutex                    tdm_mutex;
63                 tpl_gcond                     tdm_cond;
64         } tdm;
65
66         tpl_bool_t                    use_wait_vblank;
67         tpl_bool_t                    use_explicit_sync;
68         tpl_bool_t                    prepared;
69
70         /* To make sure that tpl_gsource has been successfully finalized. */
71         tpl_bool_t                    gsource_finalized;
72         tpl_gmutex                    disp_mutex;
73         tpl_gcond                     disp_cond;
74
75         /* device surface capabilities */
76         int                           min_buffer;
77         int                           max_buffer;
78         int                           present_modes;
79 #if TIZEN_FEATURE_ENABLE
80         struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
81 #endif
82 };
83
84 struct _tpl_wl_vk_swapchain {
85         tpl_wl_vk_surface_t          *wl_vk_surface;
86
87         tbm_surface_queue_h           tbm_queue;
88         tpl_result_t                  result;
89
90         tpl_bool_t                    create_done;
91
92         struct {
93                 int                       width;
94                 int                       height;
95                 tbm_format                format;
96                 int                       buffer_count;
97                 int                       present_mode;
98         } properties;
99
100         tbm_surface_h                *swapchain_buffers;
101
102         tpl_util_atomic_uint          ref_cnt;
103 };
104
105 typedef enum surf_message {
106         NONE_MESSAGE = 0,
107         INIT_SURFACE = 1,
108         ACQUIRABLE = 2,
109         CREATE_QUEUE = 4,
110         DESTROY_QUEUE = 8,
111 } surf_message;
112
113 struct _tpl_wl_vk_surface {
114         tpl_gsource                  *surf_source;
115
116         tpl_wl_vk_swapchain_t        *swapchain;
117
118         struct wl_surface            *wl_surface;
119 #if TIZEN_FEATURE_ENABLE
120         struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
121 #endif
122         tdm_client_vblank            *vblank;
123
124         /* surface information */
125         int                           render_done_cnt;
126
127         tpl_wl_vk_display_t          *wl_vk_display;
128         tpl_surface_t                *tpl_surface;
129
130         /* wl_vk_buffer array for buffer tracing */
131         tpl_wl_vk_buffer_t           *buffers[BUFFER_ARRAY_SIZE];
132         int                           buffer_cnt; /* the number of using wl_vk_buffers */
133         tpl_gmutex                    buffers_mutex;
134
135         tpl_list_t                   *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
136
137         tpl_gmutex                    surf_mutex;
138         tpl_gcond                     surf_cond;
139
140         /* for waiting draw done */
141         tpl_bool_t                    is_activated;
142         tpl_bool_t                    reset; /* TRUE if queue reseted by external  */
143         tpl_bool_t                    vblank_done;
144         tpl_bool_t                    initialized_in_thread;
145
146         /* To make sure that tpl_gsource has been successfully finalized. */
147         tpl_bool_t                    gsource_finalized;
148
149         surf_message                  sent_message;
150
151         int                           post_interval;
152 };
153
154 typedef enum buffer_status {
155         RELEASED = 0,             // 0
156         DEQUEUED,                 // 1
157         ENQUEUED,                 // 2
158         ACQUIRED,                 // 3
159         WAITING_SIGNALED,         // 4
160         WAITING_VBLANK,           // 5
161         COMMITTED,                // 6
162 } buffer_status_t;
163
164 static const char *status_to_string[7] = {
165         "RELEASED",                 // 0
166         "DEQUEUED",                 // 1
167         "ENQUEUED",                 // 2
168         "ACQUIRED",                 // 3
169         "WAITING_SIGNALED",         // 4
170         "WAITING_VBLANK",           // 5
171         "COMMITTED",                // 6
172 };
173
174 struct _tpl_wl_vk_buffer {
175         tbm_surface_h                 tbm_surface;
176         int                           bo_name;
177
178         struct wl_buffer             *wl_buffer;
179         int                           dx, dy; /* position to attach to wl_surface */
180         int                           width, height; /* size to attach to wl_surface */
181
182         buffer_status_t               status; /* for tracing buffer status */
183         int                           idx; /* position index in buffers array of wl_vk_surface */
184
185         /* for damage region */
186         int                           num_rects;
187         int                          *rects;
188
189         /* for checking need_to_commit (frontbuffer mode) */
190         tpl_bool_t                    need_to_commit;
191
192 #if TIZEN_FEATURE_ENABLE
193         /* to get release event via zwp_linux_buffer_release_v1 */
194         struct zwp_linux_buffer_release_v1 *buffer_release;
195 #endif
196
197         /* each buffers own its release_fence_fd, until it passes ownership
198          * to it to EGL */
199         int32_t                       release_fence_fd;
200
201         /* each buffers own its acquire_fence_fd.
202          * If it use zwp_linux_buffer_release_v1 the ownership of this fd
203          * will be passed to display server
204          * Otherwise it will be used as a fence waiting for render done
205          * on tpl thread */
206         int32_t                       acquire_fence_fd;
207
208         tpl_gmutex                    mutex;
209         tpl_gcond                     cond;
210
211         tpl_wl_vk_surface_t          *wl_vk_surface;
212 };
213
214 static void
215 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
216 static int
217 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
218 static void
219 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
220 static void
221 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
222 static tpl_result_t
223 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
224 static void
225 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
226 static tpl_result_t
227 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
228 static void
229 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
230                                                   tpl_wl_vk_buffer_t *wl_vk_buffer);
231
232 static tpl_bool_t
233 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
234 {
235         struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
236
237         if (!wl_vk_native_dpy) {
238                 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
239                 return TPL_FALSE;
240         }
241
242         /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
243            is a memory address pointing the structure of wl_display_interface. */
244         if (wl_vk_native_dpy == &wl_display_interface)
245                 return TPL_TRUE;
246
247         if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
248                                 strlen(wl_display_interface.name)) == 0) {
249                 return TPL_TRUE;
250         }
251
252         return TPL_FALSE;
253 }
254
255 static tpl_bool_t
256 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
257 {
258         tpl_wl_vk_display_t        *wl_vk_display = NULL;
259         tdm_error                   tdm_err = TDM_ERROR_NONE;
260
261         TPL_IGNORE(message);
262
263         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
264         if (!wl_vk_display) {
265                 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
266                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
267                 return TPL_FALSE;
268         }
269
270         tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client);
271
272         /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
273          * When tdm_source is no longer available due to an unexpected situation,
274          * wl_vk_thread must remove it from the thread and destroy it.
275          * In that case, tdm_vblank can no longer be used for surfaces and displays
276          * that used this tdm_source. */
277         if (tdm_err != TDM_ERROR_NONE) {
278                 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
279                                 tdm_err);
280                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
281
282                 tpl_gsource_destroy(gsource, TPL_FALSE);
283
284                 wl_vk_display->tdm.tdm_source = NULL;
285
286                 return TPL_FALSE;
287         }
288
289         return TPL_TRUE;
290 }
291
292 static void
293 __thread_func_tdm_finalize(tpl_gsource *gsource)
294 {
295         tpl_wl_vk_display_t *wl_vk_display = NULL;
296
297         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
298
299         tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
300
301         TPL_INFO("[TDM_CLIENT_FINI]",
302                          "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
303                          wl_vk_display, wl_vk_display->tdm.tdm_client,
304                          wl_vk_display->tdm.tdm_display_fd);
305
306         if (wl_vk_display->tdm.tdm_client) {
307                 tdm_client_destroy(wl_vk_display->tdm.tdm_client);
308                 wl_vk_display->tdm.tdm_client = NULL;
309                 wl_vk_display->tdm.tdm_display_fd = -1;
310         }
311
312         wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
313         wl_vk_display->tdm.gsource_finalized = TPL_TRUE;
314
315         tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond);
316         tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
317 }
318
319 static tpl_gsource_functions tdm_funcs = {
320         .prepare  = NULL,
321         .check    = NULL,
322         .dispatch = __thread_func_tdm_dispatch,
323         .finalize = __thread_func_tdm_finalize,
324 };
325
326 static tpl_result_t
327 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
328 {
329         tdm_client       *tdm_client = NULL;
330         int               tdm_display_fd = -1;
331         tdm_error         tdm_err = TDM_ERROR_NONE;
332
333         tdm_client = tdm_client_create(&tdm_err);
334         if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
335                 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
336                 return TPL_ERROR_INVALID_OPERATION;
337         }
338
339         tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
340         if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
341                 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
342                 tdm_client_destroy(tdm_client);
343                 return TPL_ERROR_INVALID_OPERATION;
344         }
345
346         wl_vk_display->tdm.tdm_display_fd  = tdm_display_fd;
347         wl_vk_display->tdm.tdm_client      = tdm_client;
348         wl_vk_display->tdm.tdm_source      = NULL;
349         wl_vk_display->tdm.tdm_initialized = TPL_TRUE;
350
351         TPL_INFO("[TDM_CLIENT_INIT]",
352                          "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
353                          wl_vk_display, tdm_client, tdm_display_fd);
354
355         return TPL_ERROR_NONE;
356 }
357
358 static void
359 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
360                                                           uint32_t name, const char *interface,
361                                                           uint32_t version)
362 {
363 #if TIZEN_FEATURE_ENABLE
364         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
365
366         if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
367                 char *env = tpl_getenv("TPL_EFS");
368                 if (env && !atoi(env)) {
369                         wl_vk_display->use_explicit_sync = TPL_FALSE;
370                 } else {
371                         wl_vk_display->explicit_sync =
372                                         wl_registry_bind(wl_registry, name,
373                                                                          &zwp_linux_explicit_synchronization_v1_interface, 1);
374                         wl_vk_display->use_explicit_sync = TPL_TRUE;
375                         TPL_LOG_D("[REGISTRY_BIND]",
376                                           "wl_vk_display(%p) bind zwp_linux_explicit_synchronization_v1_interface",
377                                           wl_vk_display);
378                 }
379         }
380 #endif
381 }
382
383 static void
384 __cb_wl_resistry_global_remove_callback(void *data,
385                                                                                 struct wl_registry *wl_registry,
386                                                                                 uint32_t name)
387 {
388 }
389
390 static const struct wl_registry_listener registry_listener = {
391         __cb_wl_resistry_global_callback,
392         __cb_wl_resistry_global_remove_callback
393 };
394
395 static void
396 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
397                                           const char *func_name)
398 {
399         int dpy_err;
400         char buf[1024];
401         strerror_r(errno, buf, sizeof(buf));
402
403         if (wl_vk_display->last_error == errno)
404                 return;
405
406         TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
407
408         dpy_err = wl_display_get_error(wl_vk_display->wl_display);
409         if (dpy_err == EPROTO) {
410                 const struct wl_interface *err_interface;
411                 uint32_t err_proxy_id, err_code;
412                 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
413                                                                                                  &err_interface,
414                                                                                                  &err_proxy_id);
415                 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
416                                 err_interface->name, err_code, err_proxy_id);
417         }
418
419         wl_vk_display->last_error = errno;
420 }
421
422 static tpl_result_t
423 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
424 {
425         struct wl_registry *registry                = NULL;
426         struct wl_event_queue *queue                = NULL;
427         struct wl_display *display_wrapper          = NULL;
428         struct wl_proxy *wl_tbm                     = NULL;
429         struct wayland_tbm_client *wl_tbm_client    = NULL;
430         int ret;
431         tpl_result_t result = TPL_ERROR_NONE;
432
433         queue = wl_display_create_queue(wl_vk_display->wl_display);
434         if (!queue) {
435                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
436                                 wl_vk_display->wl_display);
437                 result = TPL_ERROR_INVALID_OPERATION;
438                 goto fini;
439         }
440
441         wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
442         if (!wl_vk_display->ev_queue) {
443                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
444                                 wl_vk_display->wl_display);
445                 result = TPL_ERROR_INVALID_OPERATION;
446                 goto fini;
447         }
448
449         display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
450         if (!display_wrapper) {
451                 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
452                                 wl_vk_display->wl_display);
453                 result = TPL_ERROR_INVALID_OPERATION;
454                 goto fini;
455         }
456
457         wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
458
459         registry = wl_display_get_registry(display_wrapper);
460         if (!registry) {
461                 TPL_ERR("Failed to create wl_registry");
462                 result = TPL_ERROR_INVALID_OPERATION;
463                 goto fini;
464         }
465
466         wl_proxy_wrapper_destroy(display_wrapper);
467         display_wrapper = NULL;
468
469         wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
470         if (!wl_tbm_client) {
471                 TPL_ERR("Failed to initialize wl_tbm_client.");
472                 result = TPL_ERROR_INVALID_CONNECTION;
473                 goto fini;
474         }
475
476         wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
477         if (!wl_tbm) {
478                 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
479                 result = TPL_ERROR_INVALID_CONNECTION;
480                 goto fini;
481         }
482
483         wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
484         wl_vk_display->wl_tbm_client = wl_tbm_client;
485
486         if (wl_registry_add_listener(registry, &registry_listener,
487                                                                  wl_vk_display)) {
488                 TPL_ERR("Failed to wl_registry_add_listener");
489                 result = TPL_ERROR_INVALID_OPERATION;
490                 goto fini;
491         }
492
493         ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
494         if (ret == -1) {
495                 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
496                 result = TPL_ERROR_INVALID_OPERATION;
497                 goto fini;
498         }
499
500 #if TIZEN_FEATURE_ENABLE
501         if (wl_vk_display->explicit_sync) {
502                 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
503                                                    wl_vk_display->ev_queue);
504                 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
505                                   wl_vk_display->explicit_sync);
506         }
507 #endif
508
509         wl_vk_display->wl_initialized = TPL_TRUE;
510
511         TPL_INFO("[WAYLAND_INIT]",
512                          "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
513                          wl_vk_display, wl_vk_display->wl_display,
514                          wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
515 #if TIZEN_FEATURE_ENABLE
516         TPL_INFO("[WAYLAND_INIT]",
517                          "explicit_sync(%p)",
518                          wl_vk_display->explicit_sync);
519 #endif
520 fini:
521         if (display_wrapper)
522                 wl_proxy_wrapper_destroy(display_wrapper);
523         if (registry)
524                 wl_registry_destroy(registry);
525         if (queue)
526                 wl_event_queue_destroy(queue);
527
528         return result;
529 }
530
531 static void
532 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
533 {
534         /* If wl_vk_display is in prepared state, cancel it */
535         if (wl_vk_display->prepared) {
536                 wl_display_cancel_read(wl_vk_display->wl_display);
537                 wl_vk_display->prepared = TPL_FALSE;
538         }
539
540         if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
541                                                                                   wl_vk_display->ev_queue) == -1) {
542                 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
543         }
544
545 #if TIZEN_FEATURE_ENABLE
546         if (wl_vk_display->explicit_sync) {
547                 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
548                                  "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
549                                  wl_vk_display, wl_vk_display->explicit_sync);
550                 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
551                 wl_vk_display->explicit_sync = NULL;
552         }
553 #endif
554
555         if (wl_vk_display->wl_tbm_client) {
556                 struct wl_proxy *wl_tbm = NULL;
557
558                 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
559                                                                                 wl_vk_display->wl_tbm_client);
560                 if (wl_tbm) {
561                         wl_proxy_set_queue(wl_tbm, NULL);
562                 }
563
564                 TPL_INFO("[WL_TBM_DEINIT]",
565                                  "wl_vk_display(%p) wl_tbm_client(%p)",
566                                  wl_vk_display, wl_vk_display->wl_tbm_client);
567                 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
568                 wl_vk_display->wl_tbm_client = NULL;
569         }
570
571         wl_event_queue_destroy(wl_vk_display->ev_queue);
572
573         wl_vk_display->wl_initialized = TPL_FALSE;
574
575         TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
576                          wl_vk_display, wl_vk_display->wl_display);
577 }
578
579 static void*
580 _thread_init(void *data)
581 {
582         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
583
584         if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
585                 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
586                                 wl_vk_display, wl_vk_display->wl_display);
587         }
588
589         if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
590                 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
591         }
592
593         return wl_vk_display;
594 }
595
596 static tpl_bool_t
597 __thread_func_disp_prepare(tpl_gsource *gsource)
598 {
599         tpl_wl_vk_display_t *wl_vk_display =
600                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
601
602         /* If this wl_vk_display is already prepared,
603          * do nothing in this function. */
604         if (wl_vk_display->prepared)
605                 return TPL_FALSE;
606
607         /* If there is a last_error, there is no need to poll,
608          * so skip directly to dispatch.
609          * prepare -> dispatch */
610         if (wl_vk_display->last_error)
611                 return TPL_TRUE;
612
613         while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
614                                                                                  wl_vk_display->ev_queue) != 0) {
615                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
616                                                                                           wl_vk_display->ev_queue) == -1) {
617                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
618                 }
619         }
620
621         wl_vk_display->prepared = TPL_TRUE;
622
623         wl_display_flush(wl_vk_display->wl_display);
624
625         return TPL_FALSE;
626 }
627
628 static tpl_bool_t
629 __thread_func_disp_check(tpl_gsource *gsource)
630 {
631         tpl_wl_vk_display_t *wl_vk_display =
632                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
633         tpl_bool_t ret = TPL_FALSE;
634
635         if (!wl_vk_display->prepared)
636                 return ret;
637
638         /* If prepared, but last_error is set,
639          * cancel_read is executed and FALSE is returned.
640          * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
641          * and skipping disp_check from prepare to disp_dispatch.
642          * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
643         if (wl_vk_display->prepared && wl_vk_display->last_error) {
644                 wl_display_cancel_read(wl_vk_display->wl_display);
645                 return ret;
646         }
647
648         if (tpl_gsource_check_io_condition(gsource)) {
649                 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
650                         _wl_display_print_err(wl_vk_display, "read_event");
651                 ret = TPL_TRUE;
652         } else {
653                 wl_display_cancel_read(wl_vk_display->wl_display);
654                 ret = TPL_FALSE;
655         }
656
657         wl_vk_display->prepared = TPL_FALSE;
658
659         return ret;
660 }
661
662 static tpl_bool_t
663 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
664 {
665         tpl_wl_vk_display_t *wl_vk_display =
666                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
667
668         TPL_IGNORE(message);
669
670         /* If there is last_error, SOURCE_REMOVE should be returned
671          * to remove the gsource from the main loop.
672          * This is because wl_vk_display is not valid since last_error was set.*/
673         if (wl_vk_display->last_error) {
674                 return TPL_FALSE;
675         }
676
677         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
678         if (tpl_gsource_check_io_condition(gsource)) {
679                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
680                                                                                           wl_vk_display->ev_queue) == -1) {
681                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
682                 }
683         }
684
685         wl_display_flush(wl_vk_display->wl_display);
686         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
687
688         return TPL_TRUE;
689 }
690
691 static void
692 __thread_func_disp_finalize(tpl_gsource *gsource)
693 {
694         tpl_wl_vk_display_t *wl_vk_display =
695                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
696
697         tpl_gmutex_lock(&wl_vk_display->disp_mutex);
698         TPL_LOG_D("[D_FINALIZE]", "wl_vk_display(%p) tpl_gsource(%p)",
699                           wl_vk_display, gsource);
700
701         if (wl_vk_display->wl_initialized)
702                 _thread_wl_display_fini(wl_vk_display);
703
704         wl_vk_display->gsource_finalized = TPL_TRUE;
705
706         tpl_gcond_signal(&wl_vk_display->disp_cond);
707         tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
708
709         return;
710 }
711
712
713 static tpl_gsource_functions disp_funcs = {
714         .prepare  = __thread_func_disp_prepare,
715         .check    = __thread_func_disp_check,
716         .dispatch = __thread_func_disp_dispatch,
717         .finalize = __thread_func_disp_finalize,
718 };
719
720 static tpl_result_t
721 __tpl_wl_vk_display_init(tpl_display_t *display)
722 {
723         TPL_ASSERT(display);
724
725         tpl_wl_vk_display_t *wl_vk_display = NULL;
726
727         /* Do not allow default display in wayland */
728         if (!display->native_handle) {
729                 TPL_ERR("Invalid native handle for display.");
730                 return TPL_ERROR_INVALID_PARAMETER;
731         }
732
733         if (!_check_native_handle_is_wl_display(display->native_handle)) {
734                 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
735                 return TPL_ERROR_INVALID_PARAMETER;
736         }
737
738         wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
739                                                         sizeof(tpl_wl_vk_display_t));
740         if (!wl_vk_display) {
741                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
742                 return TPL_ERROR_OUT_OF_MEMORY;
743         }
744
745         display->backend.data             = wl_vk_display;
746         display->bufmgr_fd                = -1;
747
748         wl_vk_display->tdm.tdm_initialized    = TPL_FALSE;
749         wl_vk_display->wl_initialized     = TPL_FALSE;
750
751         wl_vk_display->ev_queue           = NULL;
752         wl_vk_display->wl_display         = (struct wl_display *)display->native_handle;
753         wl_vk_display->last_error         = 0;
754         wl_vk_display->use_explicit_sync  = TPL_FALSE;   // default disabled
755         wl_vk_display->prepared           = TPL_FALSE;
756
757         /* Wayland Interfaces */
758 #if TIZEN_FEATURE_ENABLE
759         wl_vk_display->explicit_sync      = NULL;
760 #endif
761         wl_vk_display->wl_tbm_client      = NULL;
762
763         /* Vulkan specific surface capabilities */
764         wl_vk_display->min_buffer         = 2;
765         wl_vk_display->max_buffer         = VK_CLIENT_QUEUE_SIZE;
766         wl_vk_display->present_modes      = TPL_DISPLAY_PRESENT_MODE_FIFO;
767
768         wl_vk_display->use_wait_vblank    = TPL_TRUE;   // default enabled
769         {
770                 char *env = tpl_getenv("TPL_WAIT_VBLANK");
771                 if (env && !atoi(env)) {
772                         wl_vk_display->use_wait_vblank = TPL_FALSE;
773                 }
774         }
775
776         tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
777
778         tpl_gmutex_init(&wl_vk_display->disp_mutex);
779         tpl_gcond_init(&wl_vk_display->disp_cond);
780
781         /* Create gthread */
782         wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
783                                                                                            (tpl_gthread_func)_thread_init,
784                                                                                            (void *)wl_vk_display);
785         if (!wl_vk_display->thread) {
786                 TPL_ERR("Failed to create wl_vk_thread");
787                 goto free_display;
788         }
789
790         wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
791                                                                                                         (void *)wl_vk_display,
792                                                                                                         wl_display_get_fd(wl_vk_display->wl_display),
793                                                                                                         &disp_funcs, SOURCE_TYPE_NORMAL);
794         if (!wl_vk_display->disp_source) {
795                 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
796                                 display->native_handle,
797                                 wl_vk_display->thread);
798                 goto free_display;
799         }
800
801         tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex);
802         tpl_gcond_init(&wl_vk_display->tdm.tdm_cond);
803
804         wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread,
805                                                                                                    (void *)wl_vk_display,
806                                                                                                    wl_vk_display->tdm.tdm_display_fd,
807                                                                                                    &tdm_funcs, SOURCE_TYPE_NORMAL);
808         if (!wl_vk_display->tdm.tdm_source) {
809                 TPL_ERR("Failed to create tdm_gsource\n");
810                 goto free_display;
811         }
812
813         TPL_INFO("[DISPLAY_INIT]",
814                          "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
815                          wl_vk_display,
816                          wl_vk_display->thread,
817                          wl_vk_display->wl_display);
818
819         TPL_INFO("[DISPLAY_INIT]",
820                          "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
821                          wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
822                          wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
823
824         return TPL_ERROR_NONE;
825
826 free_display:
827         if (wl_vk_display->tdm.tdm_source) {
828                 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
829                 while (!wl_vk_display->tdm.gsource_finalized) {
830                         tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
831                         tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
832                 }
833                 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
834         }
835
836         if (wl_vk_display->disp_source) {
837                 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
838                 while (!wl_vk_display->gsource_finalized) {
839                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
840                         tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
841                 }
842                 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
843         }
844
845         if (wl_vk_display->thread) {
846                 tpl_gthread_destroy(wl_vk_display->thread);
847         }
848
849         tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
850         tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
851         tpl_gcond_clear(&wl_vk_display->disp_cond);
852         tpl_gmutex_clear(&wl_vk_display->disp_mutex);
853
854         wl_vk_display->thread = NULL;
855         free(wl_vk_display);
856
857         display->backend.data = NULL;
858         return TPL_ERROR_INVALID_OPERATION;
859 }
860
861 static void
862 __tpl_wl_vk_display_fini(tpl_display_t *display)
863 {
864         tpl_wl_vk_display_t *wl_vk_display;
865
866         TPL_ASSERT(display);
867
868         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
869         if (wl_vk_display) {
870                 TPL_INFO("[DISPLAY_FINI]",
871                                  "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
872                                  wl_vk_display,
873                                  wl_vk_display->thread,
874                                  wl_vk_display->wl_display);
875
876                 if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) {
877                         /* This is a protection to prevent problems that arise in unexpected situations
878                          * that g_cond_wait cannot work normally.
879                          * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
880                          * caller should use tpl_gcond_wait() in the loop with checking finalized flag
881                          * */
882                         tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
883                         while (!wl_vk_display->tdm.gsource_finalized) {
884                                 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
885                                 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
886                         }
887                         wl_vk_display->tdm.tdm_source = NULL;
888                         tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
889                 }
890
891                 /* This is a protection to prevent problems that arise in unexpected situations
892                  * that g_cond_wait cannot work normally.
893                  * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
894                  * caller should use tpl_gcond_wait() in the loop with checking finalized flag
895                  * */
896                 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
897                 while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) {
898                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
899                         tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
900                 }
901                 wl_vk_display->disp_source = NULL;
902                 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
903
904                 if (wl_vk_display->thread) {
905                         tpl_gthread_destroy(wl_vk_display->thread);
906                         wl_vk_display->thread = NULL;
907                 }
908
909                 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
910                 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
911                 tpl_gcond_clear(&wl_vk_display->disp_cond);
912                 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
913
914                 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
915
916                 free(wl_vk_display);
917         }
918
919         display->backend.data = NULL;
920 }
921
922 static tpl_result_t
923 __tpl_wl_vk_display_query_config(tpl_display_t *display,
924                 tpl_surface_type_t surface_type,
925                 int red_size, int green_size,
926                 int blue_size, int alpha_size,
927                 int color_depth, int *native_visual_id,
928                 tpl_bool_t *is_slow)
929 {
930         TPL_ASSERT(display);
931
932         if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
933                         green_size == 8 && blue_size == 8 &&
934                         (color_depth == 32 || color_depth == 24)) {
935
936                 if (alpha_size == 8) {
937                         if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
938                         if (is_slow) *is_slow = TPL_FALSE;
939                         return TPL_ERROR_NONE;
940                 }
941                 if (alpha_size == 0) {
942                         if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
943                         if (is_slow) *is_slow = TPL_FALSE;
944                         return TPL_ERROR_NONE;
945                 }
946         }
947
948         return TPL_ERROR_INVALID_PARAMETER;
949 }
950
951 static tpl_result_t
952 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
953                                                                           int *visual_id,
954                                                                           int alpha_size)
955 {
956         TPL_IGNORE(display);
957         TPL_IGNORE(visual_id);
958         TPL_IGNORE(alpha_size);
959         return TPL_ERROR_NONE;
960 }
961
962 static tpl_result_t
963 __tpl_wl_vk_display_query_window_supported_buffer_count(
964         tpl_display_t *display,
965         tpl_handle_t window, int *min, int *max)
966 {
967         tpl_wl_vk_display_t *wl_vk_display = NULL;
968
969         TPL_ASSERT(display);
970         TPL_ASSERT(window);
971
972         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
973         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
974
975         if (min) *min = wl_vk_display->min_buffer;
976         if (max) *max = wl_vk_display->max_buffer;
977
978         return TPL_ERROR_NONE;
979 }
980
981 static tpl_result_t
982 __tpl_wl_vk_display_query_window_supported_present_modes(
983         tpl_display_t *display,
984         tpl_handle_t window, int *present_modes)
985 {
986         tpl_wl_vk_display_t *wl_vk_display = NULL;
987
988         TPL_ASSERT(display);
989         TPL_ASSERT(window);
990
991         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
992         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
993
994         if (present_modes) {
995                 *present_modes = wl_vk_display->present_modes;
996         }
997
998         return TPL_ERROR_NONE;
999 }
1000
1001 static void
1002 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
1003 {
1004         tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
1005         tpl_wl_vk_display_t *wl_vk_display      = wl_vk_surface->wl_vk_display;
1006         tpl_wl_vk_swapchain_t *swapchain        = wl_vk_surface->swapchain;
1007         tpl_wl_vk_buffer_t *wl_vk_buffer        = NULL;
1008         tpl_bool_t need_to_release              = TPL_FALSE;
1009         tpl_bool_t need_to_cancel               = TPL_FALSE;
1010         buffer_status_t status                  = RELEASED;
1011         int idx                                 = 0;
1012
1013         while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
1014                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1015                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1016                 wl_vk_buffer = wl_vk_surface->buffers[idx];
1017
1018                 if (wl_vk_buffer) {
1019                         wl_vk_surface->buffers[idx] = NULL;
1020                         wl_vk_surface->buffer_cnt--;
1021                 } else {
1022                         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1023                         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1024                         idx++;
1025                         continue;
1026                 }
1027
1028                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1029
1030                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
1031
1032                 status = wl_vk_buffer->status;
1033
1034                 TPL_INFO("[BUFFER_CLEAR]",
1035                                  "[%d] wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
1036                                  idx, wl_vk_surface, wl_vk_buffer,
1037                                  wl_vk_buffer->tbm_surface,
1038                                  status_to_string[status]);
1039
1040                 if (status >= ENQUEUED) {
1041                         tpl_bool_t need_to_wait  = TPL_FALSE;
1042                         tpl_result_t wait_result = TPL_ERROR_NONE;
1043
1044                         if (!wl_vk_display->use_explicit_sync &&
1045                                 status < WAITING_VBLANK)
1046                                 need_to_wait = TPL_TRUE;
1047
1048                         if (wl_vk_display->use_explicit_sync &&
1049                                 status < COMMITTED)
1050                                 need_to_wait = TPL_TRUE;
1051
1052                         if (need_to_wait) {
1053                                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1054                                 wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond,
1055                                                                                                   &wl_vk_buffer->mutex,
1056                                                                                                   16); /* 16ms */
1057                                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1058
1059                                 status = wl_vk_buffer->status;
1060
1061                                 if (wait_result == TPL_ERROR_TIME_OUT)
1062                                         TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
1063                                                          wl_vk_buffer);
1064                         }
1065                 }
1066
1067                 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1068                 /* It has been acquired but has not yet been released, so this
1069                  * buffer must be released. */
1070                 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
1071
1072                 /* After dequeue, it has not been enqueued yet
1073                  * so cancel_dequeue must be performed. */
1074                 need_to_cancel = (status == DEQUEUED);
1075
1076                 if (swapchain && swapchain->tbm_queue) {
1077                         if (need_to_release) {
1078                                 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
1079                                                                                                         wl_vk_buffer->tbm_surface);
1080                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1081                                         TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1082                                                         wl_vk_buffer->tbm_surface, tsq_err);
1083                         }
1084
1085                         if (need_to_cancel) {
1086                                 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
1087                                                                                                                    wl_vk_buffer->tbm_surface);
1088                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1089                                         TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1090                                                         wl_vk_buffer->tbm_surface, tsq_err);
1091                         }
1092                 }
1093
1094                 wl_vk_buffer->status = RELEASED;
1095
1096                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1097
1098                 if (need_to_release || need_to_cancel)
1099                         tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
1100
1101                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1102
1103                 idx++;
1104         }
1105 }
1106
1107 static tdm_client_vblank*
1108 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1109 {
1110         tdm_client_vblank *vblank = NULL;
1111         tdm_client_output *tdm_output = NULL;
1112         tdm_error tdm_err = TDM_ERROR_NONE;
1113
1114         if (!tdm_client) {
1115                 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1116                 return NULL;
1117         }
1118
1119         tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1120         if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1121                 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1122                 return NULL;
1123         }
1124
1125         vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1126         if (!vblank || tdm_err != TDM_ERROR_NONE) {
1127                 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1128                 return NULL;
1129         }
1130
1131         tdm_err = tdm_client_handle_pending_events(tdm_client);
1132         if (tdm_err != TDM_ERROR_NONE) {
1133                 TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err);
1134         }
1135
1136         tdm_client_vblank_set_enable_fake(vblank, 1);
1137         tdm_client_vblank_set_sync(vblank, 0);
1138
1139         return vblank;
1140 }
1141
1142 static void
1143 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1144 {
1145         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1146
1147         /* tbm_surface_queue will be created at swapchain_create */
1148
1149         wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1150                                                                 wl_vk_display->tdm.tdm_client);
1151         if (wl_vk_surface->vblank) {
1152                 TPL_INFO("[VBLANK_INIT]",
1153                                  "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1154                                  wl_vk_surface, wl_vk_display->tdm.tdm_client,
1155                                  wl_vk_surface->vblank);
1156         }
1157
1158 #if TIZEN_FEATURE_ENABLE
1159         if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1160                 wl_vk_surface->surface_sync =
1161                         zwp_linux_explicit_synchronization_v1_get_synchronization(
1162                                         wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1163                 if (wl_vk_surface->surface_sync) {
1164                         TPL_INFO("[EXPLICIT_SYNC_INIT]",
1165                                          "wl_vk_surface(%p) surface_sync(%p)",
1166                                          wl_vk_surface, wl_vk_surface->surface_sync);
1167                 } else {
1168                         TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1169                                          wl_vk_surface);
1170                         wl_vk_display->use_explicit_sync = TPL_FALSE;
1171                 }
1172         }
1173 #endif
1174         wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1175 }
1176
1177 static void
1178 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1179 {
1180         TPL_INFO("[SURFACE_FINI]",
1181                          "wl_vk_surface(%p) wl_surface(%p)",
1182                          wl_vk_surface, wl_vk_surface->wl_surface);
1183
1184         if (wl_vk_surface->vblank_waiting_buffers) {
1185                 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1186                 wl_vk_surface->vblank_waiting_buffers = NULL;
1187         }
1188
1189 #if TIZEN_FEATURE_ENABLE
1190         if (wl_vk_surface->surface_sync) {
1191                 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1192                                  "wl_vk_surface(%p) surface_sync(%p)",
1193                                   wl_vk_surface, wl_vk_surface->surface_sync);
1194                 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1195                 wl_vk_surface->surface_sync = NULL;
1196         }
1197 #endif
1198
1199         if (wl_vk_surface->vblank) {
1200                 TPL_INFO("[VBLANK_DESTROY]",
1201                                  "wl_vk_surface(%p) vblank(%p)",
1202                                  wl_vk_surface, wl_vk_surface->vblank);
1203                 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1204                 wl_vk_surface->vblank = NULL;
1205         }
1206 }
1207
1208 static tpl_bool_t
1209 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1210 {
1211         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1212
1213         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1214
1215         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1216         if (message & INIT_SURFACE) { /* Initialize surface */
1217                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) initialize message received!",
1218                                   wl_vk_surface);
1219                 _thread_wl_vk_surface_init(wl_vk_surface);
1220                 wl_vk_surface->initialized_in_thread = TPL_TRUE;
1221                 tpl_gcond_signal(&wl_vk_surface->surf_cond);    
1222         }
1223         
1224         if (message & ACQUIRABLE) { /* Acquirable message */
1225                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) acquirable message received!",
1226                                   wl_vk_surface);
1227                 if (_thread_surface_queue_acquire(wl_vk_surface)
1228                         != TPL_ERROR_NONE) {
1229                         TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1230                                         wl_vk_surface);
1231                 }
1232         }
1233
1234         if (message & CREATE_QUEUE) { /* Create tbm_surface_queue */
1235                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) queue creation message received!",
1236                                   wl_vk_surface);
1237                 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1238                         != TPL_ERROR_NONE) {
1239                         TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1240                                         wl_vk_surface);
1241                 }
1242                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1243         }
1244         
1245         if (message & DESTROY_QUEUE) { /* swapchain destroy */
1246                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) swapchain destroy message received!",
1247                                   wl_vk_surface);
1248                 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1249                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1250         }
1251
1252         /* init to NONE_MESSAGE */
1253         wl_vk_surface->sent_message = NONE_MESSAGE;
1254
1255         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1256
1257         return TPL_TRUE;
1258 }
1259
1260 static void
1261 __thread_func_surf_finalize(tpl_gsource *gsource)
1262 {
1263         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1264
1265         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1266         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1267
1268         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1269         TPL_LOG_D("[S_FINALIZE]", "wl_vk_surface(%p) tpl_gsource(%p)",
1270                           wl_vk_surface, gsource);
1271
1272         _thread_wl_vk_surface_fini(wl_vk_surface);
1273
1274         wl_vk_surface->gsource_finalized = TPL_TRUE;
1275
1276         tpl_gcond_signal(&wl_vk_surface->surf_cond);
1277         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1278 }
1279
1280 static tpl_gsource_functions surf_funcs = {
1281         .prepare = NULL,
1282         .check = NULL,
1283         .dispatch = __thread_func_surf_dispatch,
1284         .finalize = __thread_func_surf_finalize,
1285 };
1286
1287
1288 static tpl_result_t
1289 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1290 {
1291         tpl_wl_vk_surface_t *wl_vk_surface      = NULL;
1292         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1293         tpl_gsource *surf_source                = NULL;
1294
1295         TPL_ASSERT(surface);
1296         TPL_ASSERT(surface->display);
1297         TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1298         TPL_ASSERT(surface->native_handle);
1299
1300         wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1301         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1302
1303         wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1304                                                          sizeof(tpl_wl_vk_surface_t));
1305         if (!wl_vk_surface) {
1306                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1307                 return TPL_ERROR_OUT_OF_MEMORY;
1308         }
1309
1310         surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1311                                                                          -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1312         if (!surf_source) {
1313                 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1314                                 wl_vk_surface);
1315                 free(wl_vk_surface);
1316                 surface->backend.data = NULL;
1317                 return TPL_ERROR_INVALID_OPERATION;
1318         }
1319
1320         surface->backend.data                  = (void *)wl_vk_surface;
1321         surface->width                                 = -1;
1322         surface->height                        = -1;
1323
1324         wl_vk_surface->surf_source             = surf_source;
1325         wl_vk_surface->swapchain               = NULL;
1326
1327         wl_vk_surface->wl_vk_display           = wl_vk_display;
1328         wl_vk_surface->wl_surface              = (struct wl_surface *)surface->native_handle;
1329         wl_vk_surface->tpl_surface             = surface;
1330
1331         wl_vk_surface->reset                   = TPL_FALSE;
1332         wl_vk_surface->is_activated            = TPL_FALSE;
1333         wl_vk_surface->vblank_done             = TPL_TRUE;
1334         wl_vk_surface->initialized_in_thread   = TPL_FALSE;
1335
1336         wl_vk_surface->render_done_cnt         = 0;
1337
1338         wl_vk_surface->vblank                  = NULL;
1339 #if TIZEN_FEATURE_ENABLE
1340         wl_vk_surface->surface_sync            = NULL;
1341 #endif
1342
1343         wl_vk_surface->sent_message            = NONE_MESSAGE;
1344
1345         wl_vk_surface->post_interval           = surface->post_interval;
1346
1347         {
1348                 int i = 0;
1349                 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1350                         wl_vk_surface->buffers[i]     = NULL;
1351                 wl_vk_surface->buffer_cnt         = 0;
1352         }
1353
1354         tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1355         tpl_gcond_init(&wl_vk_surface->surf_cond);
1356
1357         tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1358
1359         /* Initialize in thread */
1360         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1361         wl_vk_surface->sent_message = INIT_SURFACE;
1362         tpl_gsource_send_message(wl_vk_surface->surf_source,
1363                                                          wl_vk_surface->sent_message);
1364         while (!wl_vk_surface->initialized_in_thread)
1365                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1366         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1367
1368         TPL_INFO("[SURFACE_INIT]",
1369                           "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1370                           surface, wl_vk_surface, wl_vk_surface->surf_source);
1371
1372         return TPL_ERROR_NONE;
1373 }
1374
1375 static void
1376 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1377 {
1378         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1379         tpl_wl_vk_display_t *wl_vk_display = NULL;
1380
1381         TPL_ASSERT(surface);
1382         TPL_ASSERT(surface->display);
1383
1384         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1385         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1386
1387         wl_vk_display = (tpl_wl_vk_display_t *)
1388                                                          surface->display->backend.data;
1389         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1390
1391         TPL_INFO("[SURFACE_FINI][BEGIN]",
1392                          "wl_vk_surface(%p) wl_surface(%p)",
1393                          wl_vk_surface, wl_vk_surface->wl_surface);
1394
1395         if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1396                 /* finalize swapchain */
1397
1398         }
1399
1400         wl_vk_surface->swapchain        = NULL;
1401
1402         /* This is a protection to prevent problems that arise in unexpected situations
1403          * that g_cond_wait cannot work normally.
1404          * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1405          * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1406          * */
1407         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1408         while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) {
1409                 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1410                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1411         }
1412         wl_vk_surface->surf_source = NULL;
1413         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1414
1415         _print_buffer_lists(wl_vk_surface);
1416
1417         wl_vk_surface->wl_surface       = NULL;
1418         wl_vk_surface->wl_vk_display    = NULL;
1419         wl_vk_surface->tpl_surface      = NULL;
1420
1421         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1422         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1423         tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1424         tpl_gcond_clear(&wl_vk_surface->surf_cond);
1425
1426         TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1427
1428         free(wl_vk_surface);
1429         surface->backend.data = NULL;
1430 }
1431
1432 static tpl_result_t
1433 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1434                                                                                   int post_interval)
1435 {
1436         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1437
1438         TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1439
1440         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1441
1442         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1443
1444         TPL_INFO("[SET_POST_INTERVAL]",
1445                          "wl_vk_surface(%p) post_interval(%d -> %d)",
1446                          wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1447
1448         wl_vk_surface->post_interval = post_interval;
1449
1450         return TPL_ERROR_NONE;
1451 }
1452
1453 static tpl_bool_t
1454 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1455 {
1456         TPL_ASSERT(surface);
1457         TPL_ASSERT(surface->backend.data);
1458
1459         tpl_wl_vk_surface_t *wl_vk_surface =
1460                 (tpl_wl_vk_surface_t *)surface->backend.data;
1461
1462         return !(wl_vk_surface->reset);
1463 }
1464
1465 static void
1466 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1467                                                           void *data)
1468 {
1469         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1470         tpl_wl_vk_display_t *wl_vk_display = NULL;
1471         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1472         tpl_surface_t *surface             = NULL;
1473         tpl_bool_t is_activated            = TPL_FALSE;
1474         int width, height;
1475
1476         wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1477         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1478
1479         wl_vk_display = wl_vk_surface->wl_vk_display;
1480         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1481
1482         surface = wl_vk_surface->tpl_surface;
1483         TPL_CHECK_ON_NULL_RETURN(surface);
1484
1485         swapchain = wl_vk_surface->swapchain;
1486         TPL_CHECK_ON_NULL_RETURN(swapchain);
1487
1488         /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1489          * the changed window size at the next frame. */
1490         width = tbm_surface_queue_get_width(tbm_queue);
1491         height = tbm_surface_queue_get_height(tbm_queue);
1492         if (surface->width != width || surface->height != height) {
1493                 TPL_INFO("[QUEUE_RESIZE]",
1494                                  "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1495                                  wl_vk_surface, tbm_queue,
1496                                  surface->width, surface->height, width, height);
1497         }
1498
1499         /* When queue_reset_callback is called, if is_activated is different from
1500          * its previous state change the reset flag to TPL_TRUE to get a new buffer
1501          * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1502         is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1503                                                                                                                    swapchain->tbm_queue);
1504         if (wl_vk_surface->is_activated != is_activated) {
1505                 if (is_activated) {
1506                         TPL_INFO("[ACTIVATED]",
1507                                           "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1508                                           wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1509                 } else {
1510                         TPL_INFO("[DEACTIVATED]",
1511                                          " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1512                                          wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1513                 }
1514         }
1515
1516         wl_vk_surface->reset = TPL_TRUE;
1517
1518         if (surface->reset_cb)
1519                 surface->reset_cb(surface->reset_data);
1520 }
1521
1522 static void
1523 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1524                                                                    void *data)
1525 {
1526         TPL_IGNORE(tbm_queue);
1527
1528         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1529         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1530
1531         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1532         if (wl_vk_surface->sent_message == NONE_MESSAGE) {
1533                 wl_vk_surface->sent_message = ACQUIRABLE;
1534                 tpl_gsource_send_message(wl_vk_surface->surf_source,
1535                                                                  wl_vk_surface->sent_message);
1536         }
1537         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1538 }
1539
1540 static tpl_result_t
1541 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1542 {
1543         TPL_ASSERT (wl_vk_surface);
1544
1545         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1546         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1547         tbm_surface_queue_h tbm_queue      = NULL;
1548         tbm_bufmgr bufmgr = NULL;
1549         unsigned int capability;
1550
1551         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1552         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1553
1554         if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1555                 TPL_ERR("buffer count(%d) must be higher than (%d)",
1556                                 swapchain->properties.buffer_count,
1557                                 wl_vk_display->min_buffer);
1558                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1559                 return TPL_ERROR_INVALID_PARAMETER;
1560         }
1561
1562         if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1563                 TPL_ERR("buffer count(%d) must be lower than (%d)",
1564                                 swapchain->properties.buffer_count,
1565                                 wl_vk_display->max_buffer);
1566                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1567                 return TPL_ERROR_INVALID_PARAMETER;
1568         }
1569
1570         if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1571                 TPL_ERR("Unsupported present_mode(%d)",
1572                                 swapchain->properties.present_mode);
1573                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1574                 return TPL_ERROR_INVALID_PARAMETER;
1575         }
1576
1577         if (swapchain->tbm_queue) {
1578                 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1579                 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1580
1581                 if (swapchain->swapchain_buffers) {
1582                         int i;
1583                         for (i = 0; i < swapchain->properties.buffer_count; i++) {
1584                                 if (swapchain->swapchain_buffers[i]) {
1585                                         TPL_INFO("[UNTRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1586                                                          i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i],
1587                                                          _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1588                                         tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1589                                         swapchain->swapchain_buffers[i] = NULL;
1590                                 }
1591                         }
1592
1593                         free(swapchain->swapchain_buffers);
1594                         swapchain->swapchain_buffers = NULL;
1595                 }
1596
1597                 if (old_width != swapchain->properties.width ||
1598                         old_height != swapchain->properties.height) {
1599                         tbm_surface_queue_reset(swapchain->tbm_queue,
1600                                                                         swapchain->properties.width,
1601                                                                         swapchain->properties.height,
1602                                                                         TBM_FORMAT_ARGB8888);
1603                         TPL_INFO("[RESIZE]",
1604                                          "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1605                                          wl_vk_surface, swapchain, swapchain->tbm_queue,
1606                                          old_width, old_height,
1607                                          swapchain->properties.width,
1608                                          swapchain->properties.height);
1609                 }
1610
1611                 swapchain->properties.buffer_count =
1612                         tbm_surface_queue_get_size(swapchain->tbm_queue);
1613
1614                 wl_vk_surface->reset = TPL_FALSE;
1615
1616                 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1617                 swapchain->create_done = TPL_TRUE;
1618
1619                 TPL_INFO("[SWAPCHAIN_REUSE]",
1620                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1621                                  wl_vk_surface, swapchain, swapchain->tbm_queue,
1622                                  swapchain->properties.buffer_count);
1623
1624                 return TPL_ERROR_NONE;
1625         }
1626
1627         bufmgr = tbm_bufmgr_init(-1);
1628         capability = tbm_bufmgr_get_capability(bufmgr);
1629         tbm_bufmgr_deinit(bufmgr);
1630
1631         if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1632                 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1633                                                                         wl_vk_display->wl_tbm_client,
1634                                                                         wl_vk_surface->wl_surface,
1635                                                                         swapchain->properties.buffer_count,
1636                                                                         swapchain->properties.width,
1637                                                                         swapchain->properties.height,
1638                                                                         TBM_FORMAT_ARGB8888);
1639         } else {
1640                 tbm_queue = wayland_tbm_client_create_surface_queue(
1641                                                                         wl_vk_display->wl_tbm_client,
1642                                                                         wl_vk_surface->wl_surface,
1643                                                                         swapchain->properties.buffer_count,
1644                                                                         swapchain->properties.width,
1645                                                                         swapchain->properties.height,
1646                                                                         TBM_FORMAT_ARGB8888);
1647         }
1648
1649         if (!tbm_queue) {
1650                 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1651                                 wl_vk_surface);
1652                 swapchain->result = TPL_ERROR_OUT_OF_MEMORY;
1653                 return TPL_ERROR_OUT_OF_MEMORY;
1654         }
1655
1656         if (tbm_surface_queue_set_modes(
1657                         tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1658                         TBM_SURFACE_QUEUE_ERROR_NONE) {
1659                 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1660                                 tbm_queue);
1661                 tbm_surface_queue_destroy(tbm_queue);
1662                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1663                 return TPL_ERROR_INVALID_OPERATION;
1664         }
1665
1666         if (tbm_surface_queue_add_reset_cb(
1667                         tbm_queue,
1668                         __cb_tbm_queue_reset_callback,
1669                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1670                 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1671                                 tbm_queue);
1672                 tbm_surface_queue_destroy(tbm_queue);
1673                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1674                 return TPL_ERROR_INVALID_OPERATION;
1675         }
1676
1677         if (tbm_surface_queue_add_acquirable_cb(
1678                         tbm_queue,
1679                         __cb_tbm_queue_acquirable_callback,
1680                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1681                 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1682                                 tbm_queue);
1683                 tbm_surface_queue_destroy(tbm_queue);
1684                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1685                 return TPL_ERROR_INVALID_OPERATION;
1686         }
1687
1688         swapchain->tbm_queue = tbm_queue;
1689         swapchain->create_done = TPL_TRUE;
1690
1691         TPL_INFO("[TBM_QUEUE_CREATED]",
1692                          "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1693                          wl_vk_surface, swapchain, tbm_queue);
1694
1695         return TPL_ERROR_NONE;
1696 }
1697
1698 static tpl_result_t
1699 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1700                 tbm_format format, int width,
1701                 int height, int buffer_count, int present_mode)
1702 {
1703         tpl_wl_vk_surface_t *wl_vk_surface              = NULL;
1704         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1705         tpl_wl_vk_swapchain_t *swapchain  = NULL;
1706
1707         TPL_ASSERT(surface);
1708         TPL_ASSERT(surface->display);
1709
1710         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1711         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1712
1713         wl_vk_display = (tpl_wl_vk_display_t *)
1714                                                          surface->display->backend.data;
1715         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1716
1717         swapchain = wl_vk_surface->swapchain;
1718
1719         if (swapchain == NULL) {
1720                 swapchain =
1721                         (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1722                         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1723                 swapchain->tbm_queue           = NULL;
1724         }
1725
1726         swapchain->properties.buffer_count = buffer_count;
1727         swapchain->properties.width        = width;
1728         swapchain->properties.height       = height;
1729         swapchain->properties.present_mode = present_mode;
1730         swapchain->wl_vk_surface           = wl_vk_surface;
1731         swapchain->properties.format       = format;
1732
1733         swapchain->result                  = TPL_ERROR_NONE;
1734         swapchain->create_done             = TPL_FALSE;
1735
1736         wl_vk_surface->swapchain           = swapchain;
1737
1738         __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1739
1740         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1741         /* send swapchain create tbm_queue message */
1742         wl_vk_surface->sent_message = CREATE_QUEUE;
1743         tpl_gsource_send_message(wl_vk_surface->surf_source,
1744                                                          wl_vk_surface->sent_message);
1745         while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE)
1746                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1747         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1748
1749         TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1750                 swapchain->tbm_queue != NULL,
1751                 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1752
1753         wl_vk_surface->reset = TPL_FALSE;
1754
1755         return TPL_ERROR_NONE;
1756 }
1757
1758 static void
1759 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1760 {
1761         TPL_ASSERT(wl_vk_surface);
1762
1763         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1764
1765         TPL_CHECK_ON_NULL_RETURN(swapchain);
1766
1767         if (swapchain->tbm_queue) {
1768                 TPL_INFO("[TBM_QUEUE_DESTROY]",
1769                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1770                                  wl_vk_surface, swapchain, swapchain->tbm_queue);
1771                 tbm_surface_queue_destroy(swapchain->tbm_queue);
1772                 swapchain->tbm_queue = NULL;
1773         }
1774 }
1775
1776 static tpl_result_t
1777 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1778 {
1779         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1780         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1781         tpl_wl_vk_display_t *wl_vk_display = NULL;
1782
1783         TPL_ASSERT(surface);
1784         TPL_ASSERT(surface->display);
1785
1786         wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1787         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1788
1789         wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1790         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1791
1792         swapchain = wl_vk_surface->swapchain;
1793         if (!swapchain) {
1794                 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1795                                 wl_vk_surface);
1796                 return TPL_ERROR_INVALID_OPERATION;
1797         }
1798
1799         if (!swapchain->tbm_queue) {
1800                 TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.",
1801                                 wl_vk_surface, wl_vk_surface->swapchain);
1802                 return TPL_ERROR_INVALID_OPERATION;
1803         }
1804
1805         if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1806                 TPL_INFO("[DESTROY_SWAPCHAIN]",
1807                                  "wl_vk_surface(%p) swapchain(%p) still valid.",
1808                                  wl_vk_surface, swapchain);
1809                 return TPL_ERROR_NONE;
1810         }
1811
1812         TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1813                          "wl_vk_surface(%p) swapchain(%p)",
1814                          wl_vk_surface, wl_vk_surface->swapchain);
1815
1816         if (swapchain->swapchain_buffers) {
1817                 for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1818                         if (swapchain->swapchain_buffers[i]) {
1819                                 TPL_INFO("[UNTRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1820                                                  i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i],
1821                                                  _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1822                                 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1823                                 swapchain->swapchain_buffers[i] = NULL;
1824                         }
1825                 }
1826
1827                 free(swapchain->swapchain_buffers);
1828                 swapchain->swapchain_buffers = NULL;
1829         }
1830
1831         _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1832
1833         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1834         wl_vk_surface->sent_message = DESTROY_QUEUE;
1835         tpl_gsource_send_message(wl_vk_surface->surf_source,
1836                                                          wl_vk_surface->sent_message);
1837         while (swapchain->tbm_queue)
1838                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1839         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1840
1841         _print_buffer_lists(wl_vk_surface);
1842
1843         free(swapchain);
1844         wl_vk_surface->swapchain = NULL;
1845
1846         return TPL_ERROR_NONE;
1847 }
1848
1849 static tpl_result_t
1850 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1851                                                                                           tbm_surface_h **buffers,
1852                                                                                           int *buffer_count)
1853 {
1854         TPL_ASSERT(surface);
1855         TPL_ASSERT(surface->backend.data);
1856         TPL_ASSERT(surface->display);
1857         TPL_ASSERT(surface->display->backend.data);
1858
1859         tpl_wl_vk_surface_t *wl_vk_surface =
1860                 (tpl_wl_vk_surface_t *)surface->backend.data;
1861         tpl_wl_vk_display_t *wl_vk_display =
1862                 (tpl_wl_vk_display_t *)surface->display->backend.data;
1863         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1864         tpl_result_t ret                   = TPL_ERROR_NONE;
1865         int i;
1866
1867         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1868         TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1869
1870         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1871
1872         if (!buffers) {
1873                 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1874                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1875                 return TPL_ERROR_NONE;
1876         }
1877
1878         swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1879                                                                                 *buffer_count,
1880                                                                                 sizeof(tbm_surface_h));
1881         if (!swapchain->swapchain_buffers) {
1882                 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1883                                 *buffer_count);
1884                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1885                 return TPL_ERROR_OUT_OF_MEMORY;
1886         }
1887
1888         ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1889                                                                                                 swapchain->tbm_queue,
1890                                                                                                 swapchain->swapchain_buffers,
1891                                                                                                 buffer_count);
1892         if (!ret) {
1893                 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1894                                 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1895                 free(swapchain->swapchain_buffers);
1896                 swapchain->swapchain_buffers = NULL;
1897                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1898                 return TPL_ERROR_INVALID_OPERATION;
1899         }
1900
1901         for (i = 0; i < *buffer_count; i++) {
1902                 if (swapchain->swapchain_buffers[i]) {
1903                         TPL_INFO("[TRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1904                                           i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i],
1905                                           _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1906                         tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1907                 }
1908         }
1909
1910         *buffers = swapchain->swapchain_buffers;
1911
1912         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1913
1914         return TPL_ERROR_NONE;
1915 }
1916
1917 static void
1918 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1919 {
1920         tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1921         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1922
1923         TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1924                          wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1925
1926         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1927         if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1928                 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1929                 wl_vk_surface->buffer_cnt--;
1930
1931                 wl_vk_buffer->idx = -1;
1932         }
1933         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1934
1935         wl_display_flush(wl_vk_display->wl_display);
1936
1937         if (wl_vk_buffer->wl_buffer) {
1938                 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1939                                                                                   wl_vk_buffer->wl_buffer);
1940                 wl_vk_buffer->wl_buffer = NULL;
1941         }
1942
1943 #if TIZEN_FEATURE_ENABLE
1944         if (wl_vk_buffer->buffer_release) {
1945                 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1946                 wl_vk_buffer->buffer_release = NULL;
1947         }
1948 #endif
1949
1950         if (wl_vk_buffer->release_fence_fd != -1) {
1951                 close(wl_vk_buffer->release_fence_fd);
1952                 wl_vk_buffer->release_fence_fd = -1;
1953         }
1954
1955         if (wl_vk_buffer->rects) {
1956                 free(wl_vk_buffer->rects);
1957                 wl_vk_buffer->rects = NULL;
1958                 wl_vk_buffer->num_rects = 0;
1959         }
1960
1961         wl_vk_buffer->tbm_surface = NULL;
1962         wl_vk_buffer->bo_name = -1;
1963
1964         free(wl_vk_buffer);
1965 }
1966
1967 static tpl_wl_vk_buffer_t *
1968 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1969 {
1970         tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1971         tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1972                                                                            (void **)&wl_vk_buffer);
1973         return wl_vk_buffer;
1974 }
1975
1976 static tpl_wl_vk_buffer_t *
1977 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
1978                                           tbm_surface_h tbm_surface)
1979 {
1980         tpl_wl_vk_buffer_t  *wl_vk_buffer  = NULL;
1981
1982         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
1983
1984         if (!wl_vk_buffer) {
1985                 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
1986                 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
1987
1988                 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1989                                                                                    (tbm_data_free)__cb_wl_vk_buffer_free);
1990                 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1991                                                                                    wl_vk_buffer);
1992
1993                 wl_vk_buffer->wl_buffer                = NULL;
1994                 wl_vk_buffer->tbm_surface              = tbm_surface;
1995                 wl_vk_buffer->bo_name                  = _get_tbm_surface_bo_name(tbm_surface);
1996                 wl_vk_buffer->wl_vk_surface            = wl_vk_surface;
1997
1998                 wl_vk_buffer->status                   = RELEASED;
1999
2000                 wl_vk_buffer->acquire_fence_fd         = -1;
2001                 wl_vk_buffer->release_fence_fd         = -1;
2002
2003                 wl_vk_buffer->dx                       = 0;
2004                 wl_vk_buffer->dy                       = 0;
2005                 wl_vk_buffer->width                    = tbm_surface_get_width(tbm_surface);
2006                 wl_vk_buffer->height                   = tbm_surface_get_height(tbm_surface);
2007
2008                 wl_vk_buffer->rects                    = NULL;
2009                 wl_vk_buffer->num_rects                = 0;
2010
2011                 wl_vk_buffer->need_to_commit = TPL_FALSE;
2012 #if TIZEN_FEATURE_ENABLE
2013                 wl_vk_buffer->buffer_release = NULL;
2014 #endif
2015                 tpl_gmutex_init(&wl_vk_buffer->mutex);
2016                 tpl_gcond_init(&wl_vk_buffer->cond);
2017
2018                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2019                 {
2020                         int i;
2021                         for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2022                                 if (wl_vk_surface->buffers[i] == NULL) break;
2023
2024                         /* If this exception is reached,
2025                          * it may be a critical memory leak problem. */
2026                         if (i == BUFFER_ARRAY_SIZE) {
2027                                 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
2028                                 int evicted_idx = 0; /* evict the frontmost buffer */
2029
2030                                 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
2031
2032                                 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
2033                                                  wl_vk_surface);
2034                                 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2035                                                  evicted_buffer, evicted_buffer->tbm_surface,
2036                                                  status_to_string[evicted_buffer->status]);
2037
2038                                 /* [TODO] need to think about whether there will be
2039                                  * better modifications */
2040                                 wl_vk_surface->buffer_cnt--;
2041                                 wl_vk_surface->buffers[evicted_idx]      = NULL;
2042
2043                                 i = evicted_idx;
2044                         }
2045
2046                         wl_vk_surface->buffer_cnt++;
2047                         wl_vk_surface->buffers[i]          = wl_vk_buffer;
2048                         wl_vk_buffer->idx                  = i;
2049                 }
2050                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2051
2052                 TPL_INFO("[WL_VK_BUFFER_CREATE]",
2053                                  "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2054                                  wl_vk_surface, wl_vk_buffer, tbm_surface,
2055                                  wl_vk_buffer->bo_name);
2056         }
2057
2058         return wl_vk_buffer;
2059 }
2060
2061 static tbm_surface_h
2062 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
2063                                                                    uint64_t timeout_ns,
2064                                                                    int32_t *release_fence)
2065 {
2066         TPL_ASSERT(surface);
2067         TPL_ASSERT(surface->backend.data);
2068         TPL_ASSERT(surface->display);
2069         TPL_ASSERT(surface->display->backend.data);
2070         TPL_OBJECT_CHECK_RETURN(surface, NULL);
2071
2072         tpl_wl_vk_surface_t *wl_vk_surface =
2073                 (tpl_wl_vk_surface_t *)surface->backend.data;
2074         tpl_wl_vk_display_t *wl_vk_display =
2075                 (tpl_wl_vk_display_t *)surface->display->backend.data;
2076         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
2077         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
2078
2079         tbm_surface_h tbm_surface          = NULL;
2080         tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_NONE;
2081
2082         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
2083         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
2084
2085         TPL_OBJECT_UNLOCK(surface);
2086         TRACE_BEGIN("WAIT_DEQUEUEABLE");
2087         if (timeout_ns != UINT64_MAX) {
2088                 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2089                                                 swapchain->tbm_queue, timeout_ns/1000);
2090         } else {
2091                 tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
2092         }
2093         TRACE_END();
2094         TPL_OBJECT_LOCK(surface);
2095
2096         if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2097                 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
2098                                 timeout_ns);
2099                 return NULL;
2100         } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2101                 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
2102                                 wl_vk_surface, swapchain->tbm_queue, tsq_err);
2103                 return NULL;
2104         }
2105
2106         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
2107
2108         if (wl_vk_surface->reset) {
2109                 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
2110                                   swapchain, swapchain->tbm_queue);
2111                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2112                 return NULL;
2113         }
2114
2115         tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
2116                                                                                 &tbm_surface);
2117         if (!tbm_surface) {
2118                 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
2119                                 swapchain->tbm_queue, wl_vk_surface, tsq_err);
2120                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2121                 return NULL;
2122         }
2123
2124         tbm_surface_internal_ref(tbm_surface);
2125
2126         wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
2127         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
2128
2129         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2130         wl_vk_buffer->status = DEQUEUED;
2131
2132         if (release_fence) {
2133 #if TIZEN_FEATURE_ENABLE
2134                 if (wl_vk_surface->surface_sync) {
2135                         *release_fence = wl_vk_buffer->release_fence_fd;
2136                         TPL_LOG_D("[EXPLICIT_FENCE]", "wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
2137                                           wl_vk_surface, wl_vk_buffer, *release_fence);
2138                         wl_vk_buffer->release_fence_fd = -1;
2139                 } else
2140 #endif
2141                 {
2142                         *release_fence = -1;
2143                 }
2144         }
2145
2146         wl_vk_surface->reset = TPL_FALSE;
2147
2148         TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2149                           wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
2150                           release_fence ? *release_fence : -1);
2151
2152         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2153         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2154
2155         return tbm_surface;
2156 }
2157
2158 static tpl_result_t
2159 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
2160                                                                           tbm_surface_h tbm_surface)
2161 {
2162         TPL_ASSERT(surface);
2163         TPL_ASSERT(surface->backend.data);
2164
2165         tpl_wl_vk_surface_t *wl_vk_surface  =
2166                 (tpl_wl_vk_surface_t *)surface->backend.data;
2167         tpl_wl_vk_swapchain_t *swapchain    = NULL;
2168         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2169         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2170
2171         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2172                                                                   TPL_ERROR_INVALID_PARAMETER);
2173
2174         swapchain = wl_vk_surface->swapchain;
2175         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2176         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2177                                                                  TPL_ERROR_INVALID_PARAMETER);
2178
2179         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2180         if (wl_vk_buffer) {
2181                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2182                 wl_vk_buffer->status = RELEASED;
2183                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2184         }
2185
2186         tbm_surface_internal_unref(tbm_surface);
2187
2188         TPL_INFO("[CANCEL BUFFER]",
2189                          "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2190                           wl_vk_surface, swapchain, tbm_surface,
2191                           _get_tbm_surface_bo_name(tbm_surface));
2192
2193         tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2194                                                                                            tbm_surface);
2195         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2196                 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2197                 return TPL_ERROR_INVALID_OPERATION;
2198         }
2199
2200         return TPL_ERROR_NONE;
2201 }
2202
2203 static tpl_result_t
2204 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2205                                                                            tbm_surface_h tbm_surface,
2206                                                                            int num_rects, const int *rects,
2207                                                                            int32_t acquire_fence)
2208 {
2209         TPL_ASSERT(surface);
2210         TPL_ASSERT(surface->display);
2211         TPL_ASSERT(surface->backend.data);
2212         TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2213
2214         tpl_wl_vk_surface_t *wl_vk_surface  =
2215                 (tpl_wl_vk_surface_t *) surface->backend.data;
2216         tpl_wl_vk_swapchain_t *swapchain    = wl_vk_surface->swapchain;
2217         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2218         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2219         int bo_name                         = -1;
2220
2221         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2222         TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2223         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2224                                                                   TPL_ERROR_INVALID_PARAMETER);
2225
2226         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2227         if (!wl_vk_buffer) {
2228                 TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
2229                 return TPL_ERROR_INVALID_PARAMETER;
2230         }
2231
2232         bo_name = wl_vk_buffer->bo_name;
2233
2234         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2235
2236         /* If there are received region information, save it to wl_vk_buffer */
2237         if (num_rects && rects) {
2238                 if (wl_vk_buffer->rects != NULL) {
2239                         free(wl_vk_buffer->rects);
2240                         wl_vk_buffer->rects = NULL;
2241                         wl_vk_buffer->num_rects = 0;
2242                 }
2243
2244                 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2245                 wl_vk_buffer->num_rects = num_rects;
2246
2247                 if (wl_vk_buffer->rects) {
2248                         memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2249                                    sizeof(int) * 4 * num_rects);
2250                 } else {
2251                         TPL_ERR("Failed to allocate memory for rects info.");
2252                 }
2253         }
2254
2255         if (wl_vk_buffer->acquire_fence_fd != -1)
2256                 close(wl_vk_buffer->acquire_fence_fd);
2257
2258         wl_vk_buffer->acquire_fence_fd = acquire_fence;
2259
2260         wl_vk_buffer->status = ENQUEUED;
2261         TPL_LOG_T("WL_VK",
2262                           "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2263                           wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2264
2265         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2266
2267         tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2268                                                                                 tbm_surface);
2269         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2270                 tbm_surface_internal_unref(tbm_surface);
2271                 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2272                                 tbm_surface, wl_vk_surface, tsq_err);
2273                 return TPL_ERROR_INVALID_OPERATION;
2274         }
2275
2276         tbm_surface_internal_unref(tbm_surface);
2277
2278         return TPL_ERROR_NONE;
2279 }
2280
2281 static const struct wl_buffer_listener wl_buffer_release_listener = {
2282         (void *)__cb_wl_buffer_release,
2283 };
2284
2285 static tpl_result_t
2286 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2287 {
2288         tbm_surface_h tbm_surface            = NULL;
2289         tbm_surface_queue_error_e tsq_err    = TBM_SURFACE_QUEUE_ERROR_NONE;
2290         tpl_wl_vk_display_t *wl_vk_display   = wl_vk_surface->wl_vk_display;
2291         tpl_wl_vk_swapchain_t *swapchain     = wl_vk_surface->swapchain;
2292         tpl_wl_vk_buffer_t *wl_vk_buffer     = NULL;
2293         tpl_bool_t ready_to_commit           = TPL_TRUE;
2294
2295         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2296
2297         while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2298                 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2299                                                                                         &tbm_surface);
2300                 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2301                         TPL_ERR("Failed to acquire from tbm_queue(%p)",
2302                                         swapchain->tbm_queue);
2303                         return TPL_ERROR_INVALID_OPERATION;
2304                 }
2305
2306                 tbm_surface_internal_ref(tbm_surface);
2307
2308                 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2309                 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2310                                                                            "wl_vk_buffer sould be not NULL");
2311
2312                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2313
2314                 wl_vk_buffer->status = ACQUIRED;
2315
2316                 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2317                                   wl_vk_buffer, tbm_surface,
2318                                   _get_tbm_surface_bo_name(tbm_surface));
2319
2320                 if (wl_vk_buffer->wl_buffer == NULL) {
2321                         wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2322                                                 wl_vk_display->wl_tbm_client, tbm_surface);
2323
2324                         if (!wl_vk_buffer->wl_buffer) {
2325                                 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2326                                                  wl_vk_display->wl_tbm_client, tbm_surface);
2327                         } else {
2328                                 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2329                                         wl_vk_display->use_explicit_sync == TPL_FALSE) {
2330                                         wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2331                                                                                    &wl_buffer_release_listener, wl_vk_buffer);
2332                                 }
2333
2334                                 TPL_LOG_T("WL_VK",
2335                                                   "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2336                                                   wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2337                         }
2338                 }
2339
2340                 if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done)
2341                         ready_to_commit = TPL_TRUE;
2342                 else {
2343                         wl_vk_buffer->status = WAITING_VBLANK;
2344                         __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2345                         ready_to_commit = TPL_FALSE;
2346                 }
2347
2348                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2349
2350                 if (ready_to_commit)
2351                         _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2352         }
2353
2354         return TPL_ERROR_NONE;
2355 }
2356
2357 #if TIZEN_FEATURE_ENABLE
2358 static void
2359 __cb_buffer_fenced_release(void *data,
2360                                                    struct zwp_linux_buffer_release_v1 *release,
2361                                                    int32_t fence)
2362 {
2363         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2364         tbm_surface_h tbm_surface         = NULL;
2365
2366         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2367
2368         tbm_surface = wl_vk_buffer->tbm_surface;
2369
2370         if (tbm_surface_internal_is_valid(tbm_surface)) {
2371                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2372                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2373
2374                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2375                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2376                         tbm_surface_internal_unref(tbm_surface);
2377                         return;
2378                 }
2379
2380                 swapchain = wl_vk_surface->swapchain;
2381
2382                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2383                 if (wl_vk_buffer->status == COMMITTED) {
2384                         tbm_surface_queue_error_e tsq_err;
2385
2386                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2387                         wl_vk_buffer->buffer_release = NULL;
2388
2389                         wl_vk_buffer->release_fence_fd = fence;
2390                         wl_vk_buffer->status = RELEASED;
2391
2392                         TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2393                                            wl_vk_buffer->bo_name,
2394                                            fence);
2395                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2396                                                         wl_vk_buffer->bo_name);
2397
2398                         TPL_LOG_T("WL_VK",
2399                                           "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2400                                           wl_vk_buffer, tbm_surface,
2401                                           wl_vk_buffer->bo_name,
2402                                           fence);
2403
2404                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2405                                                                                                 tbm_surface);
2406                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2407                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2408
2409                         tbm_surface_internal_unref(tbm_surface);
2410                 }
2411
2412                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2413
2414         } else {
2415                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2416         }
2417 }
2418
2419 static void
2420 __cb_buffer_immediate_release(void *data,
2421                                                           struct zwp_linux_buffer_release_v1 *release)
2422 {
2423         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2424         tbm_surface_h tbm_surface           = NULL;
2425
2426         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2427
2428         tbm_surface = wl_vk_buffer->tbm_surface;
2429
2430         if (tbm_surface_internal_is_valid(tbm_surface)) {
2431                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2432                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2433
2434                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2435                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2436                         tbm_surface_internal_unref(tbm_surface);
2437                         return;
2438                 }
2439
2440                 swapchain = wl_vk_surface->swapchain;
2441
2442                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2443                 if (wl_vk_buffer->status == COMMITTED) {
2444                         tbm_surface_queue_error_e tsq_err;
2445
2446                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2447                         wl_vk_buffer->buffer_release = NULL;
2448
2449                         wl_vk_buffer->release_fence_fd = -1;
2450                         wl_vk_buffer->status = RELEASED;
2451
2452                         TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2453                                            _get_tbm_surface_bo_name(tbm_surface));
2454                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2455                                                         _get_tbm_surface_bo_name(tbm_surface));
2456
2457                         TPL_LOG_T("WL_VK",
2458                                           "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2459                                           wl_vk_buffer, tbm_surface,
2460                                           _get_tbm_surface_bo_name(tbm_surface));
2461
2462                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2463                                                                                                 tbm_surface);
2464                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2465                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2466
2467                         tbm_surface_internal_unref(tbm_surface);
2468                 }
2469
2470                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2471
2472         } else {
2473                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2474         }
2475 }
2476
2477 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2478         __cb_buffer_fenced_release,
2479         __cb_buffer_immediate_release,
2480 };
2481 #endif
2482
2483 static void
2484 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2485 {
2486         tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2487         tbm_surface_h tbm_surface = NULL;
2488
2489         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2490
2491         tbm_surface = wl_vk_buffer->tbm_surface;
2492
2493         if (tbm_surface_internal_is_valid(tbm_surface)) {
2494                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2495                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2496                 tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2497
2498                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2499                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2500                         tbm_surface_internal_unref(tbm_surface);
2501                         return;
2502                 }
2503
2504                 swapchain = wl_vk_surface->swapchain;
2505
2506                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2507
2508                 if (wl_vk_buffer->status == COMMITTED) {
2509
2510                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2511                                                                                                 tbm_surface);
2512                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2513                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2514
2515                         wl_vk_buffer->status = RELEASED;
2516
2517                         TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2518                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2519                                                         wl_vk_buffer->bo_name);
2520
2521                         TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2522                                           wl_vk_buffer->wl_buffer, tbm_surface,
2523                                           wl_vk_buffer->bo_name);
2524
2525                         tbm_surface_internal_unref(tbm_surface);
2526                 }
2527
2528                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2529         } else {
2530                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2531         }
2532 }
2533
2534 static void
2535 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2536                                            unsigned int sequence, unsigned int tv_sec,
2537                                            unsigned int tv_usec, void *user_data)
2538 {
2539         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2540         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
2541
2542         TRACE_ASYNC_END((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2543         TPL_LOG_D("[VBLANK_DONE]", "wl_vk_surface(%p)", wl_vk_surface);
2544
2545         if (error == TDM_ERROR_TIMEOUT)
2546                 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2547                                  wl_vk_surface);
2548
2549         wl_vk_surface->vblank_done = TPL_TRUE;
2550
2551         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2552         wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front(
2553                                                 wl_vk_surface->vblank_waiting_buffers,
2554                                                 NULL);
2555         if (wl_vk_buffer)
2556                 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2557         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2558 }
2559
2560 static tpl_result_t
2561 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2562 {
2563         tdm_error tdm_err                     = TDM_ERROR_NONE;
2564         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2565
2566         if (wl_vk_surface->vblank == NULL) {
2567                 wl_vk_surface->vblank =
2568                         _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client);
2569                 if (!wl_vk_surface->vblank) {
2570                         TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2571                                          wl_vk_surface);
2572                         return TPL_ERROR_OUT_OF_MEMORY;
2573                 }
2574         }
2575
2576         tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2577                         wl_vk_surface->post_interval,
2578                         __cb_tdm_client_vblank,
2579                         (void *)wl_vk_surface);
2580
2581         if (tdm_err == TDM_ERROR_NONE) {
2582                 wl_vk_surface->vblank_done = TPL_FALSE;
2583                 TRACE_ASYNC_BEGIN((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2584         } else {
2585                 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2586                 return TPL_ERROR_INVALID_OPERATION;
2587         }
2588
2589         return TPL_ERROR_NONE;
2590 }
2591
2592 static void
2593 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2594                                                   tpl_wl_vk_buffer_t *wl_vk_buffer)
2595 {
2596         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2597         struct wl_surface *wl_surface         = wl_vk_surface->wl_surface;
2598         uint32_t version;
2599
2600         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2601                                                                    "wl_vk_buffer sould be not NULL");
2602
2603         if (wl_vk_buffer->wl_buffer == NULL) {
2604                 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2605                                                                                         wl_vk_display->wl_tbm_client,
2606                                                                                         wl_vk_buffer->tbm_surface);
2607                 if (wl_vk_buffer->wl_buffer &&
2608                         (wl_vk_buffer->acquire_fence_fd == -1 ||
2609                          wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2610                                 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2611                                                                            &wl_buffer_release_listener, wl_vk_buffer);
2612                 }
2613         }
2614         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2615                                                                    "[FATAL] Failed to create wl_buffer");
2616
2617         version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2618
2619         wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2620                                           wl_vk_buffer->dx, wl_vk_buffer->dy);
2621
2622         if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2623                 if (version < 4) {
2624                         wl_surface_damage(wl_surface,
2625                                                           wl_vk_buffer->dx, wl_vk_buffer->dy,
2626                                                           wl_vk_buffer->width, wl_vk_buffer->height);
2627                 } else {
2628                         wl_surface_damage_buffer(wl_surface,
2629                                                                          0, 0,
2630                                                                          wl_vk_buffer->width, wl_vk_buffer->height);
2631                 }
2632         } else {
2633                 int i;
2634                 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2635                         int inverted_y =
2636                                 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2637                                                 wl_vk_buffer->rects[i * 4 + 3]);
2638                         if (version < 4) {
2639                                 wl_surface_damage(wl_surface,
2640                                                                   wl_vk_buffer->rects[i * 4 + 0],
2641                                                                   inverted_y,
2642                                                                   wl_vk_buffer->rects[i * 4 + 2],
2643                                                                   wl_vk_buffer->rects[i * 4 + 3]);
2644                         } else {
2645                                 wl_surface_damage_buffer(wl_surface,
2646                                                                                  wl_vk_buffer->rects[i * 4 + 0],
2647                                                                                  inverted_y,
2648                                                                                  wl_vk_buffer->rects[i * 4 + 2],
2649                                                                                  wl_vk_buffer->rects[i * 4 + 3]);
2650                         }
2651                 }
2652         }
2653
2654 #if TIZEN_FEATURE_ENABLE
2655         if (wl_vk_display->use_explicit_sync &&
2656                 wl_vk_surface->surface_sync &&
2657                 wl_vk_buffer->acquire_fence_fd != -1) {
2658
2659                 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2660                                                                                                                            wl_vk_buffer->acquire_fence_fd);
2661                 TPL_LOG_D("[SET_ACQUIRE_FENCE][1/2]", "wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2662                                   wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2663                 close(wl_vk_buffer->acquire_fence_fd);
2664                 wl_vk_buffer->acquire_fence_fd = -1;
2665
2666                 wl_vk_buffer->buffer_release =
2667                         zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2668                 if (!wl_vk_buffer->buffer_release) {
2669                         TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2670                 } else {
2671                         zwp_linux_buffer_release_v1_add_listener(
2672                                 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2673                         TPL_LOG_D("[SET_ACQUIRE_FENCE][2/2]", "add explicit_sync_release_listener.");
2674                 }
2675         }
2676 #endif
2677
2678         wl_surface_commit(wl_surface);
2679
2680         wl_display_flush(wl_vk_display->wl_display);
2681
2682         TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2683                                           wl_vk_buffer->bo_name);
2684
2685         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2686
2687         wl_vk_buffer->need_to_commit   = TPL_FALSE;
2688         wl_vk_buffer->status           = COMMITTED;
2689
2690         tpl_gcond_signal(&wl_vk_buffer->cond);
2691
2692         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2693
2694         TPL_LOG_T("WL_VK",
2695                           "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2696                           wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2697                           wl_vk_buffer->bo_name);
2698
2699         if (wl_vk_display->use_wait_vblank &&
2700                 _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2701                 TPL_ERR("Failed to set wait vblank.");
2702 }
2703
2704 tpl_bool_t
2705 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2706 {
2707         if (!native_dpy) return TPL_FALSE;
2708
2709         if (_check_native_handle_is_wl_display(native_dpy))
2710                 return TPL_TRUE;
2711
2712         return TPL_FALSE;
2713 }
2714
2715 void
2716 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2717 {
2718         TPL_ASSERT(backend);
2719
2720         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2721         backend->data = NULL;
2722
2723         backend->init = __tpl_wl_vk_display_init;
2724         backend->fini = __tpl_wl_vk_display_fini;
2725         backend->query_config = __tpl_wl_vk_display_query_config;
2726         backend->filter_config = __tpl_wl_vk_display_filter_config;
2727         backend->query_window_supported_buffer_count =
2728                 __tpl_wl_vk_display_query_window_supported_buffer_count;
2729         backend->query_window_supported_present_modes =
2730                 __tpl_wl_vk_display_query_window_supported_present_modes;
2731 }
2732
2733 void
2734 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2735 {
2736         TPL_ASSERT(backend);
2737
2738         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2739         backend->data = NULL;
2740
2741         backend->init = __tpl_wl_vk_surface_init;
2742         backend->fini = __tpl_wl_vk_surface_fini;
2743         backend->validate = __tpl_wl_vk_surface_validate;
2744         backend->cancel_dequeued_buffer =
2745                 __tpl_wl_vk_surface_cancel_buffer;
2746         backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2747         backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2748         backend->get_swapchain_buffers =
2749                 __tpl_wl_vk_surface_get_swapchain_buffers;
2750         backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2751         backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2752         backend->set_post_interval =
2753                 __tpl_wl_vk_surface_set_post_interval;
2754 }
2755
2756 static int
2757 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2758 {
2759         return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2760 }
2761
2762 static void
2763 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2764 {
2765         int idx = 0;
2766
2767         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2768         TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2769                          wl_vk_surface, wl_vk_surface->buffer_cnt);
2770         for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2771                 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2772                 if (wl_vk_buffer) {
2773                         TPL_INFO("[INFO]",
2774                                          "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2775                                          idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2776                                          wl_vk_buffer->bo_name,
2777                                          status_to_string[wl_vk_buffer->status]);
2778                 }
2779         }
2780         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2781 }