Move protected area by mutex
[platform/core/uifw/libtpl-egl.git] / src / tpl_wl_vk_thread.c
1 #define inline __inline__
2 #undef inline
3
4 #include "tpl_internal.h"
5
6 #include <string.h>
7 #include <fcntl.h>
8 #include <unistd.h>
9 #include <sys/eventfd.h>
10
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
15
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
19
20 #include <tdm_client.h>
21
22 #ifndef TIZEN_FEATURE_ENABLE
23 #define TIZEN_FEATURE_ENABLE 1
24 #endif
25
26 #if TIZEN_FEATURE_ENABLE
27 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
28 #endif
29
30 #include "tpl_utils_gthread.h"
31
32 #define BUFFER_ARRAY_SIZE 10
33 #define VK_CLIENT_QUEUE_SIZE 3
34
35 static int wl_vk_buffer_key;
36 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
37
38 typedef struct _tpl_wl_vk_display       tpl_wl_vk_display_t;
39 typedef struct _tpl_wl_vk_surface       tpl_wl_vk_surface_t;
40 typedef struct _tpl_wl_vk_swapchain     tpl_wl_vk_swapchain_t;
41 typedef struct _tpl_wl_vk_buffer        tpl_wl_vk_buffer_t;
42
43 struct _tpl_wl_vk_display {
44         tpl_gsource                  *disp_source;
45         tpl_gthread                  *thread;
46         tpl_gmutex                    wl_event_mutex;
47
48         struct wl_display            *wl_display;
49         struct wl_event_queue        *ev_queue;
50         struct wayland_tbm_client    *wl_tbm_client;
51         int                           last_error; /* errno of the last wl_display error*/
52
53         tpl_bool_t                    wl_initialized;
54
55         struct {
56                 tdm_client                   *tdm_client;
57                 tpl_gsource                  *tdm_source;
58                 int                           tdm_display_fd;
59                 tpl_bool_t                    tdm_initialized;
60                 /* To make sure that tpl_gsource has been successfully finalized. */
61                 tpl_bool_t                    gsource_finalized;
62                 tpl_gmutex                    tdm_mutex;
63                 tpl_gcond                     tdm_cond;
64         } tdm;
65
66         tpl_bool_t                    use_wait_vblank;
67         tpl_bool_t                    use_explicit_sync;
68         tpl_bool_t                    prepared;
69
70         /* To make sure that tpl_gsource has been successfully finalized. */
71         tpl_bool_t                    gsource_finalized;
72         tpl_gmutex                    disp_mutex;
73         tpl_gcond                     disp_cond;
74
75         /* device surface capabilities */
76         int                           min_buffer;
77         int                           max_buffer;
78         int                           present_modes;
79 #if TIZEN_FEATURE_ENABLE
80         struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
81 #endif
82 };
83
84 struct _tpl_wl_vk_swapchain {
85         tpl_wl_vk_surface_t          *wl_vk_surface;
86
87         tbm_surface_queue_h           tbm_queue;
88         tpl_result_t                  result;
89
90         tpl_bool_t                    create_done;
91
92         struct {
93                 int                       width;
94                 int                       height;
95                 tbm_format                format;
96                 int                       buffer_count;
97                 int                       present_mode;
98         } properties;
99
100         tbm_surface_h                *swapchain_buffers;
101
102         /* [TEMP] To fix dEQP-VK.wsi.wayland.swapchain.modify.resize crash issue 
103          * It will be fixed properly using old_swapchain handle */
104         tbm_surface_h                *old_swapchain_buffers;
105
106         tpl_util_atomic_uint          ref_cnt;
107 };
108
109 typedef enum surf_message {
110         NONE_MESSAGE = 0,
111         INIT_SURFACE = 1,
112         ACQUIRABLE = 2,
113         CREATE_QUEUE = 4,
114         DESTROY_QUEUE = 8,
115 } surf_message;
116
117 struct _tpl_wl_vk_surface {
118         tpl_gsource                  *surf_source;
119
120         tpl_wl_vk_swapchain_t        *swapchain;
121
122         struct wl_surface            *wl_surface;
123 #if TIZEN_FEATURE_ENABLE
124         struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
125 #endif
126         tdm_client_vblank            *vblank;
127
128         /* surface information */
129         int                           render_done_cnt;
130
131         tpl_wl_vk_display_t          *wl_vk_display;
132         tpl_surface_t                *tpl_surface;
133
134         /* wl_vk_buffer array for buffer tracing */
135         tpl_wl_vk_buffer_t           *buffers[BUFFER_ARRAY_SIZE];
136         int                           buffer_cnt; /* the number of using wl_vk_buffers */
137         tpl_gmutex                    buffers_mutex;
138
139         tpl_list_t                   *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
140
141         tpl_gmutex                    surf_mutex;
142         tpl_gcond                     surf_cond;
143
144         /* for waiting draw done */
145         tpl_bool_t                    is_activated;
146         tpl_bool_t                    reset; /* TRUE if queue reseted by external  */
147         tpl_bool_t                    vblank_done;
148         tpl_bool_t                    vblank_enable;
149         tpl_bool_t                    initialized_in_thread;
150
151         /* To make sure that tpl_gsource has been successfully finalized. */
152         tpl_bool_t                    gsource_finalized;
153
154         surf_message                  sent_message;
155
156         int                           post_interval;
157 };
158
159 typedef enum buffer_status {
160         RELEASED = 0,             // 0
161         DEQUEUED,                 // 1
162         ENQUEUED,                 // 2
163         ACQUIRED,                 // 3
164         WAITING_SIGNALED,         // 4
165         WAITING_VBLANK,           // 5
166         COMMITTED,                // 6
167 } buffer_status_t;
168
169 static const char *status_to_string[7] = {
170         "RELEASED",                 // 0
171         "DEQUEUED",                 // 1
172         "ENQUEUED",                 // 2
173         "ACQUIRED",                 // 3
174         "WAITING_SIGNALED",         // 4
175         "WAITING_VBLANK",           // 5
176         "COMMITTED",                // 6
177 };
178
179 struct _tpl_wl_vk_buffer {
180         tbm_surface_h                 tbm_surface;
181         int                           bo_name;
182
183         struct wl_buffer             *wl_buffer;
184         int                           dx, dy; /* position to attach to wl_surface */
185         int                           width, height; /* size to attach to wl_surface */
186
187         buffer_status_t               status; /* for tracing buffer status */
188         int                           idx; /* position index in buffers array of wl_vk_surface */
189
190         /* for damage region */
191         int                           num_rects;
192         int                          *rects;
193
194         /* for checking need_to_commit (frontbuffer mode) */
195         tpl_bool_t                    need_to_commit;
196
197 #if TIZEN_FEATURE_ENABLE
198         /* to get release event via zwp_linux_buffer_release_v1 */
199         struct zwp_linux_buffer_release_v1 *buffer_release;
200 #endif
201
202         /* each buffers own its release_fence_fd, until it passes ownership
203          * to it to EGL */
204         int32_t                       release_fence_fd;
205
206         /* each buffers own its acquire_fence_fd.
207          * If it use zwp_linux_buffer_release_v1 the ownership of this fd
208          * will be passed to display server
209          * Otherwise it will be used as a fence waiting for render done
210          * on tpl thread */
211         int32_t                       acquire_fence_fd;
212
213         tpl_gmutex                    mutex;
214         tpl_gcond                     cond;
215
216         tpl_wl_vk_surface_t          *wl_vk_surface;
217 };
218
219 static void
220 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
221 static int
222 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
223 static void
224 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
225 static void
226 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
227 static tpl_result_t
228 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
229 static void
230 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
231 static tpl_result_t
232 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
233 static void
234 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
235                                                   tpl_wl_vk_buffer_t *wl_vk_buffer);
236
237 static tpl_bool_t
238 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
239 {
240         struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
241
242         if (!wl_vk_native_dpy) {
243                 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
244                 return TPL_FALSE;
245         }
246
247         /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
248            is a memory address pointing the structure of wl_display_interface. */
249         if (wl_vk_native_dpy == &wl_display_interface)
250                 return TPL_TRUE;
251
252         if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
253                                 strlen(wl_display_interface.name)) == 0) {
254                 return TPL_TRUE;
255         }
256
257         return TPL_FALSE;
258 }
259
260 static tpl_bool_t
261 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
262 {
263         tpl_wl_vk_display_t        *wl_vk_display = NULL;
264         tdm_error                   tdm_err = TDM_ERROR_NONE;
265
266         TPL_IGNORE(message);
267
268         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
269         if (!wl_vk_display) {
270                 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
271                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
272                 return TPL_FALSE;
273         }
274
275         tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client);
276
277         /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
278          * When tdm_source is no longer available due to an unexpected situation,
279          * wl_vk_thread must remove it from the thread and destroy it.
280          * In that case, tdm_vblank can no longer be used for surfaces and displays
281          * that used this tdm_source. */
282         if (tdm_err != TDM_ERROR_NONE) {
283                 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
284                                 tdm_err);
285                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
286
287                 tpl_gsource_destroy(gsource, TPL_FALSE);
288
289                 wl_vk_display->tdm.tdm_source = NULL;
290
291                 return TPL_FALSE;
292         }
293
294         return TPL_TRUE;
295 }
296
297 static void
298 __thread_func_tdm_finalize(tpl_gsource *gsource)
299 {
300         tpl_wl_vk_display_t *wl_vk_display = NULL;
301
302         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
303
304         tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
305
306         TPL_INFO("[TDM_CLIENT_FINI]",
307                          "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
308                          wl_vk_display, wl_vk_display->tdm.tdm_client,
309                          wl_vk_display->tdm.tdm_display_fd);
310
311         if (wl_vk_display->tdm.tdm_client) {
312                 tdm_client_destroy(wl_vk_display->tdm.tdm_client);
313                 wl_vk_display->tdm.tdm_client = NULL;
314                 wl_vk_display->tdm.tdm_display_fd = -1;
315         }
316
317         wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
318         wl_vk_display->tdm.gsource_finalized = TPL_TRUE;
319
320         tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond);
321         tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
322 }
323
324 static tpl_gsource_functions tdm_funcs = {
325         .prepare  = NULL,
326         .check    = NULL,
327         .dispatch = __thread_func_tdm_dispatch,
328         .finalize = __thread_func_tdm_finalize,
329 };
330
331 static tpl_result_t
332 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
333 {
334         tdm_client       *tdm_client = NULL;
335         int               tdm_display_fd = -1;
336         tdm_error         tdm_err = TDM_ERROR_NONE;
337
338         tdm_client = tdm_client_create(&tdm_err);
339         if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
340                 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
341                 return TPL_ERROR_INVALID_OPERATION;
342         }
343
344         tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
345         if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
346                 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
347                 tdm_client_destroy(tdm_client);
348                 return TPL_ERROR_INVALID_OPERATION;
349         }
350
351         wl_vk_display->tdm.tdm_display_fd  = tdm_display_fd;
352         wl_vk_display->tdm.tdm_client      = tdm_client;
353         wl_vk_display->tdm.tdm_source      = NULL;
354         wl_vk_display->tdm.tdm_initialized = TPL_TRUE;
355
356         TPL_INFO("[TDM_CLIENT_INIT]",
357                          "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
358                          wl_vk_display, tdm_client, tdm_display_fd);
359
360         return TPL_ERROR_NONE;
361 }
362
363 static void
364 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
365                                                           uint32_t name, const char *interface,
366                                                           uint32_t version)
367 {
368 #if TIZEN_FEATURE_ENABLE
369         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
370
371         if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
372                 char *env = tpl_getenv("TPL_EFS");
373                 if (env && !atoi(env)) {
374                         wl_vk_display->use_explicit_sync = TPL_FALSE;
375                 } else {
376                         wl_vk_display->explicit_sync =
377                                         wl_registry_bind(wl_registry, name,
378                                                                          &zwp_linux_explicit_synchronization_v1_interface, 1);
379                         wl_vk_display->use_explicit_sync = TPL_TRUE;
380                         TPL_LOG_D("[REGISTRY_BIND]",
381                                           "wl_vk_display(%p) bind zwp_linux_explicit_synchronization_v1_interface",
382                                           wl_vk_display);
383                 }
384         }
385 #endif
386 }
387
388 static void
389 __cb_wl_resistry_global_remove_callback(void *data,
390                                                                                 struct wl_registry *wl_registry,
391                                                                                 uint32_t name)
392 {
393 }
394
395 static const struct wl_registry_listener registry_listener = {
396         __cb_wl_resistry_global_callback,
397         __cb_wl_resistry_global_remove_callback
398 };
399
400 static void
401 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
402                                           const char *func_name)
403 {
404         int dpy_err;
405         char buf[1024];
406         strerror_r(errno, buf, sizeof(buf));
407
408         if (wl_vk_display->last_error == errno)
409                 return;
410
411         TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
412
413         dpy_err = wl_display_get_error(wl_vk_display->wl_display);
414         if (dpy_err == EPROTO) {
415                 const struct wl_interface *err_interface;
416                 uint32_t err_proxy_id, err_code;
417                 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
418                                                                                                  &err_interface,
419                                                                                                  &err_proxy_id);
420                 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
421                                 (err_interface ? err_interface->name : "UNKNOWN"),
422                                 err_code, err_proxy_id);
423         }
424
425         wl_vk_display->last_error = errno;
426 }
427
428 static tpl_result_t
429 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
430 {
431         struct wl_registry *registry                = NULL;
432         struct wl_event_queue *queue                = NULL;
433         struct wl_display *display_wrapper          = NULL;
434         struct wl_proxy *wl_tbm                     = NULL;
435         struct wayland_tbm_client *wl_tbm_client    = NULL;
436         int ret;
437         tpl_result_t result = TPL_ERROR_NONE;
438
439         queue = wl_display_create_queue(wl_vk_display->wl_display);
440         if (!queue) {
441                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
442                                 wl_vk_display->wl_display);
443                 result = TPL_ERROR_INVALID_OPERATION;
444                 goto fini;
445         }
446
447         wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
448         if (!wl_vk_display->ev_queue) {
449                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
450                                 wl_vk_display->wl_display);
451                 result = TPL_ERROR_INVALID_OPERATION;
452                 goto fini;
453         }
454
455         display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
456         if (!display_wrapper) {
457                 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
458                                 wl_vk_display->wl_display);
459                 result = TPL_ERROR_INVALID_OPERATION;
460                 goto fini;
461         }
462
463         wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
464
465         registry = wl_display_get_registry(display_wrapper);
466         if (!registry) {
467                 TPL_ERR("Failed to create wl_registry");
468                 result = TPL_ERROR_INVALID_OPERATION;
469                 goto fini;
470         }
471
472         wl_proxy_wrapper_destroy(display_wrapper);
473         display_wrapper = NULL;
474
475         wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
476         if (!wl_tbm_client) {
477                 TPL_ERR("Failed to initialize wl_tbm_client.");
478                 result = TPL_ERROR_INVALID_CONNECTION;
479                 goto fini;
480         }
481
482         wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
483         if (!wl_tbm) {
484                 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
485                 result = TPL_ERROR_INVALID_CONNECTION;
486                 goto fini;
487         }
488
489         wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
490         wl_vk_display->wl_tbm_client = wl_tbm_client;
491
492         if (wl_registry_add_listener(registry, &registry_listener,
493                                                                  wl_vk_display)) {
494                 TPL_ERR("Failed to wl_registry_add_listener");
495                 result = TPL_ERROR_INVALID_OPERATION;
496                 goto fini;
497         }
498
499         ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
500         if (ret == -1) {
501                 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
502                 result = TPL_ERROR_INVALID_OPERATION;
503                 goto fini;
504         }
505
506 #if TIZEN_FEATURE_ENABLE
507         if (wl_vk_display->explicit_sync) {
508                 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
509                                                    wl_vk_display->ev_queue);
510                 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
511                                   wl_vk_display->explicit_sync);
512         }
513 #endif
514
515         wl_vk_display->wl_initialized = TPL_TRUE;
516
517         TPL_INFO("[WAYLAND_INIT]",
518                          "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
519                          wl_vk_display, wl_vk_display->wl_display,
520                          wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
521 #if TIZEN_FEATURE_ENABLE
522         TPL_INFO("[WAYLAND_INIT]",
523                          "explicit_sync(%p)",
524                          wl_vk_display->explicit_sync);
525 #endif
526 fini:
527         if (display_wrapper)
528                 wl_proxy_wrapper_destroy(display_wrapper);
529         if (registry)
530                 wl_registry_destroy(registry);
531         if (queue)
532                 wl_event_queue_destroy(queue);
533
534         return result;
535 }
536
537 static void
538 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
539 {
540         /* If wl_vk_display is in prepared state, cancel it */
541         if (wl_vk_display->prepared) {
542                 wl_display_cancel_read(wl_vk_display->wl_display);
543                 wl_vk_display->prepared = TPL_FALSE;
544         }
545
546         if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
547                                                                                   wl_vk_display->ev_queue) == -1) {
548                 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
549         }
550
551 #if TIZEN_FEATURE_ENABLE
552         if (wl_vk_display->explicit_sync) {
553                 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
554                                  "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
555                                  wl_vk_display, wl_vk_display->explicit_sync);
556                 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
557                 wl_vk_display->explicit_sync = NULL;
558         }
559 #endif
560
561         if (wl_vk_display->wl_tbm_client) {
562                 struct wl_proxy *wl_tbm = NULL;
563
564                 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
565                                                                                 wl_vk_display->wl_tbm_client);
566                 if (wl_tbm) {
567                         wl_proxy_set_queue(wl_tbm, NULL);
568                 }
569
570                 TPL_INFO("[WL_TBM_DEINIT]",
571                                  "wl_vk_display(%p) wl_tbm_client(%p)",
572                                  wl_vk_display, wl_vk_display->wl_tbm_client);
573                 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
574                 wl_vk_display->wl_tbm_client = NULL;
575         }
576
577         wl_event_queue_destroy(wl_vk_display->ev_queue);
578
579         wl_vk_display->wl_initialized = TPL_FALSE;
580
581         TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
582                          wl_vk_display, wl_vk_display->wl_display);
583 }
584
585 static void*
586 _thread_init(void *data)
587 {
588         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
589
590         if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
591                 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
592                                 wl_vk_display, wl_vk_display->wl_display);
593         }
594
595         if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
596                 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
597         }
598
599         return wl_vk_display;
600 }
601
602 static tpl_bool_t
603 __thread_func_disp_prepare(tpl_gsource *gsource)
604 {
605         tpl_wl_vk_display_t *wl_vk_display =
606                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
607
608         /* If this wl_vk_display is already prepared,
609          * do nothing in this function. */
610         if (wl_vk_display->prepared)
611                 return TPL_FALSE;
612
613         /* If there is a last_error, there is no need to poll,
614          * so skip directly to dispatch.
615          * prepare -> dispatch */
616         if (wl_vk_display->last_error)
617                 return TPL_TRUE;
618
619         while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
620                                                                                  wl_vk_display->ev_queue) != 0) {
621                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
622                                                                                           wl_vk_display->ev_queue) == -1) {
623                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
624                 }
625         }
626
627         wl_vk_display->prepared = TPL_TRUE;
628
629         wl_display_flush(wl_vk_display->wl_display);
630
631         return TPL_FALSE;
632 }
633
634 static tpl_bool_t
635 __thread_func_disp_check(tpl_gsource *gsource)
636 {
637         tpl_wl_vk_display_t *wl_vk_display =
638                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
639         tpl_bool_t ret = TPL_FALSE;
640
641         if (!wl_vk_display->prepared)
642                 return ret;
643
644         /* If prepared, but last_error is set,
645          * cancel_read is executed and FALSE is returned.
646          * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
647          * and skipping disp_check from prepare to disp_dispatch.
648          * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
649         if (wl_vk_display->prepared && wl_vk_display->last_error) {
650                 wl_display_cancel_read(wl_vk_display->wl_display);
651                 return ret;
652         }
653
654         if (tpl_gsource_check_io_condition(gsource)) {
655                 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
656                         _wl_display_print_err(wl_vk_display, "read_event");
657                 ret = TPL_TRUE;
658         } else {
659                 wl_display_cancel_read(wl_vk_display->wl_display);
660                 ret = TPL_FALSE;
661         }
662
663         wl_vk_display->prepared = TPL_FALSE;
664
665         return ret;
666 }
667
668 static tpl_bool_t
669 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
670 {
671         tpl_wl_vk_display_t *wl_vk_display =
672                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
673
674         TPL_IGNORE(message);
675
676         /* If there is last_error, SOURCE_REMOVE should be returned
677          * to remove the gsource from the main loop.
678          * This is because wl_vk_display is not valid since last_error was set.*/
679         if (wl_vk_display->last_error) {
680                 return TPL_FALSE;
681         }
682
683         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
684         if (tpl_gsource_check_io_condition(gsource)) {
685                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
686                                                                                           wl_vk_display->ev_queue) == -1) {
687                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
688                 }
689         }
690
691         wl_display_flush(wl_vk_display->wl_display);
692         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
693
694         return TPL_TRUE;
695 }
696
697 static void
698 __thread_func_disp_finalize(tpl_gsource *gsource)
699 {
700         tpl_wl_vk_display_t *wl_vk_display =
701                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
702
703         tpl_gmutex_lock(&wl_vk_display->disp_mutex);
704         TPL_LOG_D("[D_FINALIZE]", "wl_vk_display(%p) tpl_gsource(%p)",
705                           wl_vk_display, gsource);
706
707         if (wl_vk_display->wl_initialized)
708                 _thread_wl_display_fini(wl_vk_display);
709
710         wl_vk_display->gsource_finalized = TPL_TRUE;
711
712         tpl_gcond_signal(&wl_vk_display->disp_cond);
713         tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
714
715         return;
716 }
717
718
719 static tpl_gsource_functions disp_funcs = {
720         .prepare  = __thread_func_disp_prepare,
721         .check    = __thread_func_disp_check,
722         .dispatch = __thread_func_disp_dispatch,
723         .finalize = __thread_func_disp_finalize,
724 };
725
726 static tpl_result_t
727 __tpl_wl_vk_display_init(tpl_display_t *display)
728 {
729         TPL_ASSERT(display);
730
731         tpl_wl_vk_display_t *wl_vk_display = NULL;
732
733         /* Do not allow default display in wayland */
734         if (!display->native_handle) {
735                 TPL_ERR("Invalid native handle for display.");
736                 return TPL_ERROR_INVALID_PARAMETER;
737         }
738
739         if (!_check_native_handle_is_wl_display(display->native_handle)) {
740                 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
741                 return TPL_ERROR_INVALID_PARAMETER;
742         }
743
744         wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
745                                                         sizeof(tpl_wl_vk_display_t));
746         if (!wl_vk_display) {
747                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
748                 return TPL_ERROR_OUT_OF_MEMORY;
749         }
750
751         display->backend.data             = wl_vk_display;
752         display->bufmgr_fd                = -1;
753
754         wl_vk_display->tdm.tdm_initialized    = TPL_FALSE;
755         wl_vk_display->wl_initialized     = TPL_FALSE;
756
757         wl_vk_display->ev_queue           = NULL;
758         wl_vk_display->wl_display         = (struct wl_display *)display->native_handle;
759         wl_vk_display->last_error         = 0;
760         wl_vk_display->use_explicit_sync  = TPL_FALSE;   // default disabled
761         wl_vk_display->prepared           = TPL_FALSE;
762
763         /* Wayland Interfaces */
764 #if TIZEN_FEATURE_ENABLE
765         wl_vk_display->explicit_sync      = NULL;
766 #endif
767         wl_vk_display->wl_tbm_client      = NULL;
768
769         /* Vulkan specific surface capabilities */
770         wl_vk_display->min_buffer         = 2;
771         wl_vk_display->max_buffer         = VK_CLIENT_QUEUE_SIZE;
772         wl_vk_display->present_modes      = TPL_DISPLAY_PRESENT_MODE_FIFO;
773
774         wl_vk_display->use_wait_vblank    = TPL_TRUE;   // default enabled
775         {
776                 char *env = tpl_getenv("TPL_WAIT_VBLANK");
777                 if (env && !atoi(env)) {
778                         wl_vk_display->use_wait_vblank = TPL_FALSE;
779                 }
780         }
781
782         tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
783
784         tpl_gmutex_init(&wl_vk_display->disp_mutex);
785         tpl_gcond_init(&wl_vk_display->disp_cond);
786
787         /* Create gthread */
788         wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
789                                                                                            (tpl_gthread_func)_thread_init,
790                                                                                            (void *)wl_vk_display);
791         if (!wl_vk_display->thread) {
792                 TPL_ERR("Failed to create wl_vk_thread");
793                 goto free_display;
794         }
795
796         wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
797                                                                                                         (void *)wl_vk_display,
798                                                                                                         wl_display_get_fd(wl_vk_display->wl_display),
799                                                                                                         FD_TYPE_SOCKET,
800                                                                                                         &disp_funcs, SOURCE_TYPE_NORMAL);
801         if (!wl_vk_display->disp_source) {
802                 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
803                                 display->native_handle,
804                                 wl_vk_display->thread);
805                 goto free_display;
806         }
807
808         tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex);
809         tpl_gcond_init(&wl_vk_display->tdm.tdm_cond);
810
811         wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread,
812                                                                                                    (void *)wl_vk_display,
813                                                                                                    wl_vk_display->tdm.tdm_display_fd,
814                                                                                                    FD_TYPE_SOCKET,
815                                                                                                    &tdm_funcs, SOURCE_TYPE_NORMAL);
816         if (!wl_vk_display->tdm.tdm_source) {
817                 TPL_ERR("Failed to create tdm_gsource\n");
818                 goto free_display;
819         }
820
821         TPL_INFO("[DISPLAY_INIT]",
822                          "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
823                          wl_vk_display,
824                          wl_vk_display->thread,
825                          wl_vk_display->wl_display);
826
827         TPL_INFO("[DISPLAY_INIT]",
828                          "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
829                          wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
830                          wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
831
832         return TPL_ERROR_NONE;
833
834 free_display:
835         if (wl_vk_display->tdm.tdm_source) {
836                 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
837                 while (!wl_vk_display->tdm.gsource_finalized) {
838                         tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
839                         tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
840                 }
841                 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
842         }
843
844         if (wl_vk_display->disp_source) {
845                 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
846                 while (!wl_vk_display->gsource_finalized) {
847                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
848                         tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
849                 }
850                 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
851         }
852
853         if (wl_vk_display->thread) {
854                 tpl_gthread_destroy(wl_vk_display->thread);
855         }
856
857         tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
858         tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
859         tpl_gcond_clear(&wl_vk_display->disp_cond);
860         tpl_gmutex_clear(&wl_vk_display->disp_mutex);
861
862         wl_vk_display->thread = NULL;
863         free(wl_vk_display);
864
865         display->backend.data = NULL;
866         return TPL_ERROR_INVALID_OPERATION;
867 }
868
869 static void
870 __tpl_wl_vk_display_fini(tpl_display_t *display)
871 {
872         tpl_wl_vk_display_t *wl_vk_display;
873
874         TPL_ASSERT(display);
875
876         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
877         if (wl_vk_display) {
878                 TPL_INFO("[DISPLAY_FINI]",
879                                  "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
880                                  wl_vk_display,
881                                  wl_vk_display->thread,
882                                  wl_vk_display->wl_display);
883
884                 if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) {
885                         /* This is a protection to prevent problems that arise in unexpected situations
886                          * that g_cond_wait cannot work normally.
887                          * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
888                          * caller should use tpl_gcond_wait() in the loop with checking finalized flag
889                          * */
890                         tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
891                         while (!wl_vk_display->tdm.gsource_finalized) {
892                                 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
893                                 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
894                         }
895                         wl_vk_display->tdm.tdm_source = NULL;
896                         tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
897                 }
898
899                 /* This is a protection to prevent problems that arise in unexpected situations
900                  * that g_cond_wait cannot work normally.
901                  * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
902                  * caller should use tpl_gcond_wait() in the loop with checking finalized flag
903                  * */
904                 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
905                 while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) {
906                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
907                         tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
908                 }
909                 wl_vk_display->disp_source = NULL;
910                 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
911
912                 if (wl_vk_display->thread) {
913                         tpl_gthread_destroy(wl_vk_display->thread);
914                         wl_vk_display->thread = NULL;
915                 }
916
917                 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
918                 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
919                 tpl_gcond_clear(&wl_vk_display->disp_cond);
920                 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
921
922                 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
923
924                 free(wl_vk_display);
925         }
926
927         display->backend.data = NULL;
928 }
929
930 static tpl_result_t
931 __tpl_wl_vk_display_query_config(tpl_display_t *display,
932                 tpl_surface_type_t surface_type,
933                 int red_size, int green_size,
934                 int blue_size, int alpha_size,
935                 int color_depth, int *native_visual_id,
936                 tpl_bool_t *is_slow)
937 {
938         TPL_ASSERT(display);
939
940         if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
941                         green_size == 8 && blue_size == 8 &&
942                         (color_depth == 32 || color_depth == 24)) {
943
944                 if (alpha_size == 8) {
945                         if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
946                         if (is_slow) *is_slow = TPL_FALSE;
947                         return TPL_ERROR_NONE;
948                 }
949                 if (alpha_size == 0) {
950                         if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
951                         if (is_slow) *is_slow = TPL_FALSE;
952                         return TPL_ERROR_NONE;
953                 }
954         }
955
956         return TPL_ERROR_INVALID_PARAMETER;
957 }
958
959 static tpl_result_t
960 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
961                                                                           int *visual_id,
962                                                                           int alpha_size)
963 {
964         TPL_IGNORE(display);
965         TPL_IGNORE(visual_id);
966         TPL_IGNORE(alpha_size);
967         return TPL_ERROR_NONE;
968 }
969
970 static tpl_result_t
971 __tpl_wl_vk_display_query_window_supported_buffer_count(
972         tpl_display_t *display,
973         tpl_handle_t window, int *min, int *max)
974 {
975         tpl_wl_vk_display_t *wl_vk_display = NULL;
976
977         TPL_ASSERT(display);
978         TPL_ASSERT(window);
979
980         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
981         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
982
983         if (min) *min = wl_vk_display->min_buffer;
984         if (max) *max = wl_vk_display->max_buffer;
985
986         return TPL_ERROR_NONE;
987 }
988
989 static tpl_result_t
990 __tpl_wl_vk_display_query_window_supported_present_modes(
991         tpl_display_t *display,
992         tpl_handle_t window, int *present_modes)
993 {
994         tpl_wl_vk_display_t *wl_vk_display = NULL;
995
996         TPL_ASSERT(display);
997         TPL_ASSERT(window);
998
999         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
1000         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1001
1002         if (present_modes) {
1003                 *present_modes = wl_vk_display->present_modes;
1004         }
1005
1006         return TPL_ERROR_NONE;
1007 }
1008
1009 static void
1010 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
1011 {
1012         tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
1013         tpl_wl_vk_display_t *wl_vk_display      = wl_vk_surface->wl_vk_display;
1014         tpl_wl_vk_swapchain_t *swapchain        = wl_vk_surface->swapchain;
1015         tpl_wl_vk_buffer_t *wl_vk_buffer        = NULL;
1016         tpl_bool_t need_to_release              = TPL_FALSE;
1017         tpl_bool_t need_to_cancel               = TPL_FALSE;
1018         buffer_status_t status                  = RELEASED;
1019         int idx                                 = 0;
1020
1021         while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
1022                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1023                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1024                 wl_vk_buffer = wl_vk_surface->buffers[idx];
1025
1026                 if (wl_vk_buffer) {
1027                         wl_vk_surface->buffers[idx] = NULL;
1028                         wl_vk_surface->buffer_cnt--;
1029                 } else {
1030                         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1031                         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1032                         idx++;
1033                         continue;
1034                 }
1035
1036                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1037
1038                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
1039
1040                 status = wl_vk_buffer->status;
1041
1042                 TPL_INFO("[BUFFER_CLEAR]",
1043                                  "[%d] wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
1044                                  idx, wl_vk_surface, wl_vk_buffer,
1045                                  wl_vk_buffer->tbm_surface,
1046                                  status_to_string[status]);
1047
1048                 if (status >= ENQUEUED) {
1049                         tpl_bool_t need_to_wait  = TPL_FALSE;
1050                         tpl_result_t wait_result = TPL_ERROR_NONE;
1051
1052                         if (!wl_vk_display->use_explicit_sync &&
1053                                 status < WAITING_VBLANK)
1054                                 need_to_wait = TPL_TRUE;
1055
1056                         if (wl_vk_display->use_explicit_sync &&
1057                                 status < COMMITTED)
1058                                 need_to_wait = TPL_TRUE;
1059
1060                         if (need_to_wait) {
1061                                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1062                                 wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond,
1063                                                                                                   &wl_vk_buffer->mutex,
1064                                                                                                   16); /* 16ms */
1065                                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1066
1067                                 status = wl_vk_buffer->status;
1068
1069                                 if (wait_result == TPL_ERROR_TIME_OUT)
1070                                         TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
1071                                                          wl_vk_buffer);
1072                         }
1073                 }
1074
1075                 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1076                 /* It has been acquired but has not yet been released, so this
1077                  * buffer must be released. */
1078                 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
1079
1080                 /* After dequeue, it has not been enqueued yet
1081                  * so cancel_dequeue must be performed. */
1082                 need_to_cancel = (status == DEQUEUED);
1083
1084                 if (swapchain && swapchain->tbm_queue) {
1085                         if (need_to_release) {
1086                                 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
1087                                                                                                         wl_vk_buffer->tbm_surface);
1088                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1089                                         TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1090                                                         wl_vk_buffer->tbm_surface, tsq_err);
1091                         }
1092
1093                         if (need_to_cancel) {
1094                                 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
1095                                                                                                                    wl_vk_buffer->tbm_surface);
1096                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1097                                         TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1098                                                         wl_vk_buffer->tbm_surface, tsq_err);
1099                         }
1100                 }
1101
1102                 wl_vk_buffer->status = RELEASED;
1103
1104                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1105
1106                 if (need_to_release || need_to_cancel)
1107                         tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
1108
1109                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1110
1111                 idx++;
1112         }
1113 }
1114
1115 static tdm_client_vblank*
1116 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1117 {
1118         tdm_client_vblank *vblank = NULL;
1119         tdm_client_output *tdm_output = NULL;
1120         tdm_error tdm_err = TDM_ERROR_NONE;
1121
1122         if (!tdm_client) {
1123                 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1124                 return NULL;
1125         }
1126
1127         tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1128         if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1129                 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1130                 return NULL;
1131         }
1132
1133         vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1134         if (!vblank || tdm_err != TDM_ERROR_NONE) {
1135                 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1136                 return NULL;
1137         }
1138
1139         tdm_err = tdm_client_handle_pending_events(tdm_client);
1140         if (tdm_err != TDM_ERROR_NONE) {
1141                 TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err);
1142         }
1143
1144         tdm_client_vblank_set_enable_fake(vblank, 1);
1145         tdm_client_vblank_set_sync(vblank, 0);
1146
1147         return vblank;
1148 }
1149
1150 static void
1151 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1152 {
1153         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1154
1155         /* tbm_surface_queue will be created at swapchain_create */
1156
1157         if (wl_vk_display->use_wait_vblank) {
1158                 wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1159                                                                         wl_vk_display->tdm.tdm_client);
1160                 if (wl_vk_surface->vblank) {
1161                         TPL_INFO("[VBLANK_INIT]",
1162                                         "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1163                                         wl_vk_surface, wl_vk_display->tdm.tdm_client,
1164                                         wl_vk_surface->vblank);
1165
1166                         wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1167                         if (!wl_vk_surface->vblank_waiting_buffers) {
1168                                 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1169                                 wl_vk_surface->vblank = NULL;
1170                         }
1171                 }
1172         }
1173
1174 #if TIZEN_FEATURE_ENABLE
1175         if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1176                 wl_vk_surface->surface_sync =
1177                         zwp_linux_explicit_synchronization_v1_get_synchronization(
1178                                         wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1179                 if (wl_vk_surface->surface_sync) {
1180                         TPL_INFO("[EXPLICIT_SYNC_INIT]",
1181                                          "wl_vk_surface(%p) surface_sync(%p)",
1182                                          wl_vk_surface, wl_vk_surface->surface_sync);
1183                 } else {
1184                         TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1185                                          wl_vk_surface);
1186                         wl_vk_display->use_explicit_sync = TPL_FALSE;
1187                 }
1188         }
1189 #endif
1190
1191         wl_vk_surface->vblank_enable = (wl_vk_surface->vblank != NULL &&
1192                 wl_vk_surface->post_interval > 0);
1193 }
1194
1195 static void
1196 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1197 {
1198         TPL_INFO("[SURFACE_FINI]",
1199                          "wl_vk_surface(%p) wl_surface(%p)",
1200                          wl_vk_surface, wl_vk_surface->wl_surface);
1201
1202         if (wl_vk_surface->vblank_waiting_buffers) {
1203                 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1204                 wl_vk_surface->vblank_waiting_buffers = NULL;
1205         }
1206
1207 #if TIZEN_FEATURE_ENABLE
1208         if (wl_vk_surface->surface_sync) {
1209                 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1210                                  "wl_vk_surface(%p) surface_sync(%p)",
1211                                   wl_vk_surface, wl_vk_surface->surface_sync);
1212                 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1213                 wl_vk_surface->surface_sync = NULL;
1214         }
1215 #endif
1216
1217         if (wl_vk_surface->vblank) {
1218                 TPL_INFO("[VBLANK_DESTROY]",
1219                                  "wl_vk_surface(%p) vblank(%p)",
1220                                  wl_vk_surface, wl_vk_surface->vblank);
1221                 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1222                 wl_vk_surface->vblank = NULL;
1223         }
1224 }
1225
1226 static tpl_bool_t
1227 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1228 {
1229         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1230
1231         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1232
1233         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1234         if (message & INIT_SURFACE) { /* Initialize surface */
1235                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) initialize message received!",
1236                                   wl_vk_surface);
1237                 _thread_wl_vk_surface_init(wl_vk_surface);
1238                 wl_vk_surface->initialized_in_thread = TPL_TRUE;
1239                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1240         }
1241
1242         if (message & ACQUIRABLE) { /* Acquirable message */
1243                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) acquirable message received!",
1244                                   wl_vk_surface);
1245                 if (_thread_surface_queue_acquire(wl_vk_surface)
1246                         != TPL_ERROR_NONE) {
1247                         TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1248                                         wl_vk_surface);
1249                 }
1250         }
1251
1252         if (message & CREATE_QUEUE) { /* Create tbm_surface_queue */
1253                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) queue creation message received!",
1254                                   wl_vk_surface);
1255                 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1256                         != TPL_ERROR_NONE) {
1257                         TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1258                                         wl_vk_surface);
1259                 }
1260                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1261         }
1262
1263         if (message & DESTROY_QUEUE) { /* swapchain destroy */
1264                 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) swapchain destroy message received!",
1265                                   wl_vk_surface);
1266                 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1267                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1268         }
1269
1270         /* init to NONE_MESSAGE */
1271         wl_vk_surface->sent_message = NONE_MESSAGE;
1272
1273         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1274
1275         return TPL_TRUE;
1276 }
1277
1278 static void
1279 __thread_func_surf_finalize(tpl_gsource *gsource)
1280 {
1281         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1282
1283         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1284         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1285
1286         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1287         TPL_LOG_D("[S_FINALIZE]", "wl_vk_surface(%p) tpl_gsource(%p)",
1288                           wl_vk_surface, gsource);
1289
1290         _thread_wl_vk_surface_fini(wl_vk_surface);
1291
1292         wl_vk_surface->gsource_finalized = TPL_TRUE;
1293
1294         tpl_gcond_signal(&wl_vk_surface->surf_cond);
1295         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1296 }
1297
1298 static tpl_gsource_functions surf_funcs = {
1299         .prepare = NULL,
1300         .check = NULL,
1301         .dispatch = __thread_func_surf_dispatch,
1302         .finalize = __thread_func_surf_finalize,
1303 };
1304
1305
1306 static tpl_result_t
1307 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1308 {
1309         tpl_wl_vk_surface_t *wl_vk_surface      = NULL;
1310         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1311         tpl_gsource *surf_source                = NULL;
1312
1313         TPL_ASSERT(surface);
1314         TPL_ASSERT(surface->display);
1315         TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1316         TPL_ASSERT(surface->native_handle);
1317
1318         wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1319         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1320
1321         wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1322                                                          sizeof(tpl_wl_vk_surface_t));
1323         if (!wl_vk_surface) {
1324                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1325                 return TPL_ERROR_OUT_OF_MEMORY;
1326         }
1327
1328         surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1329                                                                          -1, FD_TYPE_NONE, &surf_funcs, SOURCE_TYPE_NORMAL);
1330         if (!surf_source) {
1331                 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1332                                 wl_vk_surface);
1333                 free(wl_vk_surface);
1334                 surface->backend.data = NULL;
1335                 return TPL_ERROR_INVALID_OPERATION;
1336         }
1337
1338         surface->backend.data                  = (void *)wl_vk_surface;
1339         surface->width                                 = -1;
1340         surface->height                        = -1;
1341
1342         wl_vk_surface->surf_source             = surf_source;
1343         wl_vk_surface->swapchain               = NULL;
1344
1345         wl_vk_surface->wl_vk_display           = wl_vk_display;
1346         wl_vk_surface->wl_surface              = (struct wl_surface *)surface->native_handle;
1347         wl_vk_surface->tpl_surface             = surface;
1348
1349         wl_vk_surface->reset                   = TPL_FALSE;
1350         wl_vk_surface->is_activated            = TPL_FALSE;
1351         wl_vk_surface->vblank_done             = TPL_TRUE;
1352         wl_vk_surface->initialized_in_thread   = TPL_FALSE;
1353
1354         wl_vk_surface->render_done_cnt         = 0;
1355
1356         wl_vk_surface->vblank                  = NULL;
1357         wl_vk_surface->vblank_enable           = TPL_FALSE;
1358 #if TIZEN_FEATURE_ENABLE
1359         wl_vk_surface->surface_sync            = NULL;
1360 #endif
1361
1362         wl_vk_surface->sent_message            = NONE_MESSAGE;
1363
1364         wl_vk_surface->post_interval           = surface->post_interval;
1365
1366         {
1367                 int i = 0;
1368                 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1369                         wl_vk_surface->buffers[i]     = NULL;
1370                 wl_vk_surface->buffer_cnt         = 0;
1371         }
1372
1373         tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1374         tpl_gcond_init(&wl_vk_surface->surf_cond);
1375
1376         tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1377
1378         /* Initialize in thread */
1379         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1380         wl_vk_surface->sent_message = INIT_SURFACE;
1381         tpl_gsource_send_message(wl_vk_surface->surf_source,
1382                                                          wl_vk_surface->sent_message);
1383         while (!wl_vk_surface->initialized_in_thread)
1384                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1385         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1386
1387         TPL_INFO("[SURFACE_INIT]",
1388                           "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1389                           surface, wl_vk_surface, wl_vk_surface->surf_source);
1390
1391         return TPL_ERROR_NONE;
1392 }
1393
1394 static void
1395 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1396 {
1397         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1398         tpl_wl_vk_display_t *wl_vk_display = NULL;
1399
1400         TPL_ASSERT(surface);
1401         TPL_ASSERT(surface->display);
1402
1403         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1404         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1405
1406         wl_vk_display = (tpl_wl_vk_display_t *)
1407                                                          surface->display->backend.data;
1408         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1409
1410         TPL_INFO("[SURFACE_FINI][BEGIN]",
1411                          "wl_vk_surface(%p) wl_surface(%p)",
1412                          wl_vk_surface, wl_vk_surface->wl_surface);
1413
1414         if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1415                 /* finalize swapchain */
1416
1417         }
1418
1419         wl_vk_surface->swapchain        = NULL;
1420
1421         /* This is a protection to prevent problems that arise in unexpected situations
1422          * that g_cond_wait cannot work normally.
1423          * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1424          * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1425          * */
1426         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1427         while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) {
1428                 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1429                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1430         }
1431         wl_vk_surface->surf_source = NULL;
1432         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1433
1434         _print_buffer_lists(wl_vk_surface);
1435
1436         wl_vk_surface->wl_surface       = NULL;
1437         wl_vk_surface->wl_vk_display    = NULL;
1438         wl_vk_surface->tpl_surface      = NULL;
1439
1440         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1441         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1442         tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1443         tpl_gcond_clear(&wl_vk_surface->surf_cond);
1444
1445         TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1446
1447         free(wl_vk_surface);
1448         surface->backend.data = NULL;
1449 }
1450
1451 static tpl_result_t
1452 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1453                                                                                   int post_interval)
1454 {
1455         TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1456
1457         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1458
1459         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1460
1461         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1462
1463         TPL_INFO("[SET_POST_INTERVAL]",
1464                          "wl_vk_surface(%p) post_interval(%d -> %d)",
1465                          wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1466
1467         wl_vk_surface->post_interval = post_interval;
1468         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1469
1470
1471
1472         return TPL_ERROR_NONE;
1473 }
1474
1475 static tpl_bool_t
1476 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1477 {
1478         TPL_ASSERT(surface);
1479         TPL_ASSERT(surface->backend.data);
1480
1481         tpl_wl_vk_surface_t *wl_vk_surface =
1482                 (tpl_wl_vk_surface_t *)surface->backend.data;
1483
1484         return !(wl_vk_surface->reset);
1485 }
1486
1487 static void
1488 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1489                                                           void *data)
1490 {
1491         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1492         tpl_wl_vk_display_t *wl_vk_display = NULL;
1493         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1494         tpl_surface_t *surface             = NULL;
1495         tpl_bool_t is_activated            = TPL_FALSE;
1496         int width, height;
1497
1498         wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1499         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1500
1501         wl_vk_display = wl_vk_surface->wl_vk_display;
1502         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1503
1504         surface = wl_vk_surface->tpl_surface;
1505         TPL_CHECK_ON_NULL_RETURN(surface);
1506
1507         swapchain = wl_vk_surface->swapchain;
1508         TPL_CHECK_ON_NULL_RETURN(swapchain);
1509
1510         /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1511          * the changed window size at the next frame. */
1512         width = tbm_surface_queue_get_width(tbm_queue);
1513         height = tbm_surface_queue_get_height(tbm_queue);
1514         if (surface->width != width || surface->height != height) {
1515                 TPL_INFO("[QUEUE_RESIZE]",
1516                                  "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1517                                  wl_vk_surface, tbm_queue,
1518                                  surface->width, surface->height, width, height);
1519         }
1520
1521         /* When queue_reset_callback is called, if is_activated is different from
1522          * its previous state change the reset flag to TPL_TRUE to get a new buffer
1523          * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1524         is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1525                                                                                                                    swapchain->tbm_queue);
1526         if (wl_vk_surface->is_activated != is_activated) {
1527                 if (is_activated) {
1528                         TPL_INFO("[ACTIVATED]",
1529                                           "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1530                                           wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1531                 } else {
1532                         TPL_INFO("[DEACTIVATED]",
1533                                          " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1534                                          wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1535                 }
1536         }
1537
1538         wl_vk_surface->reset = TPL_TRUE;
1539
1540         if (surface->reset_cb)
1541                 surface->reset_cb(surface->reset_data);
1542 }
1543
1544 static void
1545 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1546                                                                    void *data)
1547 {
1548         TPL_IGNORE(tbm_queue);
1549
1550         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1551         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1552
1553         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1554         if (wl_vk_surface->sent_message == NONE_MESSAGE) {
1555                 wl_vk_surface->sent_message = ACQUIRABLE;
1556                 tpl_gsource_send_message(wl_vk_surface->surf_source,
1557                                                                  wl_vk_surface->sent_message);
1558         }
1559         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1560 }
1561
1562 static tpl_result_t
1563 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1564 {
1565         TPL_ASSERT (wl_vk_surface);
1566
1567         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1568         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1569         tbm_surface_queue_h tbm_queue      = NULL;
1570         tbm_bufmgr bufmgr = NULL;
1571         unsigned int capability;
1572
1573         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1574         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1575
1576         if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1577                 TPL_ERR("buffer count(%d) must be higher than (%d)",
1578                                 swapchain->properties.buffer_count,
1579                                 wl_vk_display->min_buffer);
1580                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1581                 return TPL_ERROR_INVALID_PARAMETER;
1582         }
1583
1584         if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1585                 TPL_ERR("buffer count(%d) must be lower than (%d)",
1586                                 swapchain->properties.buffer_count,
1587                                 wl_vk_display->max_buffer);
1588                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1589                 return TPL_ERROR_INVALID_PARAMETER;
1590         }
1591
1592         if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1593                 TPL_ERR("Unsupported present_mode(%d)",
1594                                 swapchain->properties.present_mode);
1595                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1596                 return TPL_ERROR_INVALID_PARAMETER;
1597         }
1598
1599         if (swapchain->old_swapchain_buffers) {
1600                 TPL_ERR("Should be destroy old_swapchain before create");
1601                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1602                 return TPL_ERROR_INVALID_OPERATION;
1603         }
1604
1605         if (swapchain->tbm_queue) {
1606                 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1607                 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1608
1609                 if (swapchain->swapchain_buffers) {
1610                         swapchain->old_swapchain_buffers = swapchain->swapchain_buffers;
1611                         swapchain->swapchain_buffers = NULL;
1612                 }
1613
1614                 if (old_width != swapchain->properties.width ||
1615                         old_height != swapchain->properties.height) {
1616                         tbm_surface_queue_reset(swapchain->tbm_queue,
1617                                                                         swapchain->properties.width,
1618                                                                         swapchain->properties.height,
1619                                                                         TBM_FORMAT_ARGB8888);
1620                         TPL_INFO("[RESIZE]",
1621                                          "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1622                                          wl_vk_surface, swapchain, swapchain->tbm_queue,
1623                                          old_width, old_height,
1624                                          swapchain->properties.width,
1625                                          swapchain->properties.height);
1626                 }
1627
1628                 swapchain->properties.buffer_count =
1629                         tbm_surface_queue_get_size(swapchain->tbm_queue);
1630
1631                 wl_vk_surface->reset = TPL_FALSE;
1632
1633                 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1634                 swapchain->create_done = TPL_TRUE;
1635
1636                 TPL_INFO("[SWAPCHAIN_REUSE]",
1637                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1638                                  wl_vk_surface, swapchain, swapchain->tbm_queue,
1639                                  swapchain->properties.buffer_count);
1640
1641                 return TPL_ERROR_NONE;
1642         }
1643
1644         bufmgr = tbm_bufmgr_init(-1);
1645         capability = tbm_bufmgr_get_capability(bufmgr);
1646         tbm_bufmgr_deinit(bufmgr);
1647
1648         if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1649                 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1650                                                                         wl_vk_display->wl_tbm_client,
1651                                                                         wl_vk_surface->wl_surface,
1652                                                                         swapchain->properties.buffer_count,
1653                                                                         swapchain->properties.width,
1654                                                                         swapchain->properties.height,
1655                                                                         TBM_FORMAT_ARGB8888);
1656         } else {
1657                 tbm_queue = wayland_tbm_client_create_surface_queue(
1658                                                                         wl_vk_display->wl_tbm_client,
1659                                                                         wl_vk_surface->wl_surface,
1660                                                                         swapchain->properties.buffer_count,
1661                                                                         swapchain->properties.width,
1662                                                                         swapchain->properties.height,
1663                                                                         TBM_FORMAT_ARGB8888);
1664         }
1665
1666         if (!tbm_queue) {
1667                 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1668                                 wl_vk_surface);
1669                 swapchain->result = TPL_ERROR_OUT_OF_MEMORY;
1670                 return TPL_ERROR_OUT_OF_MEMORY;
1671         }
1672
1673         if (tbm_surface_queue_set_modes(
1674                         tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1675                         TBM_SURFACE_QUEUE_ERROR_NONE) {
1676                 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1677                                 tbm_queue);
1678                 tbm_surface_queue_destroy(tbm_queue);
1679                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1680                 return TPL_ERROR_INVALID_OPERATION;
1681         }
1682
1683         if (tbm_surface_queue_add_reset_cb(
1684                         tbm_queue,
1685                         __cb_tbm_queue_reset_callback,
1686                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1687                 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1688                                 tbm_queue);
1689                 tbm_surface_queue_destroy(tbm_queue);
1690                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1691                 return TPL_ERROR_INVALID_OPERATION;
1692         }
1693
1694         if (tbm_surface_queue_add_acquirable_cb(
1695                         tbm_queue,
1696                         __cb_tbm_queue_acquirable_callback,
1697                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1698                 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1699                                 tbm_queue);
1700                 tbm_surface_queue_destroy(tbm_queue);
1701                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1702                 return TPL_ERROR_INVALID_OPERATION;
1703         }
1704
1705         swapchain->tbm_queue = tbm_queue;
1706         swapchain->create_done = TPL_TRUE;
1707
1708         TPL_INFO("[TBM_QUEUE_CREATED]",
1709                          "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1710                          wl_vk_surface, swapchain, tbm_queue);
1711
1712         return TPL_ERROR_NONE;
1713 }
1714
1715 static tpl_result_t
1716 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1717                 tbm_format format, int width,
1718                 int height, int buffer_count, int present_mode)
1719 {
1720         tpl_wl_vk_surface_t *wl_vk_surface              = NULL;
1721         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1722         tpl_wl_vk_swapchain_t *swapchain  = NULL;
1723
1724         TPL_ASSERT(surface);
1725         TPL_ASSERT(surface->display);
1726
1727         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1728         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1729
1730         wl_vk_display = (tpl_wl_vk_display_t *)
1731                                                          surface->display->backend.data;
1732         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1733
1734         swapchain = wl_vk_surface->swapchain;
1735
1736         if (swapchain == NULL) {
1737                 swapchain =
1738                         (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1739                         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1740                 swapchain->tbm_queue           = NULL;
1741         }
1742
1743         swapchain->properties.buffer_count = buffer_count;
1744         swapchain->properties.width        = width;
1745         swapchain->properties.height       = height;
1746         swapchain->properties.present_mode = present_mode;
1747         swapchain->wl_vk_surface           = wl_vk_surface;
1748         swapchain->properties.format       = format;
1749         swapchain->swapchain_buffers       = NULL;
1750         swapchain->old_swapchain_buffers   = NULL;
1751
1752         swapchain->result                  = TPL_ERROR_NONE;
1753         swapchain->create_done             = TPL_FALSE;
1754
1755         wl_vk_surface->swapchain           = swapchain;
1756
1757         __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1758
1759         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1760         /* send swapchain create tbm_queue message */
1761         wl_vk_surface->sent_message = CREATE_QUEUE;
1762         tpl_gsource_send_message(wl_vk_surface->surf_source,
1763                                                          wl_vk_surface->sent_message);
1764         while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE)
1765                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1766         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1767
1768         TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1769                 swapchain->tbm_queue != NULL,
1770                 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1771
1772         wl_vk_surface->reset = TPL_FALSE;
1773
1774         return TPL_ERROR_NONE;
1775 }
1776
1777 static void
1778 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1779 {
1780         TPL_ASSERT(wl_vk_surface);
1781
1782         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1783
1784         TPL_CHECK_ON_NULL_RETURN(swapchain);
1785
1786         if (swapchain->tbm_queue) {
1787                 TPL_INFO("[TBM_QUEUE_DESTROY]",
1788                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1789                                  wl_vk_surface, swapchain, swapchain->tbm_queue);
1790                 tbm_surface_queue_destroy(swapchain->tbm_queue);
1791                 swapchain->tbm_queue = NULL;
1792         }
1793 }
1794
1795 void __untrack_swapchain_buffers(tpl_wl_vk_surface_t *wl_vk_surface, tbm_surface_h *sc_buffers)
1796 {
1797         tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1798
1799         for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1800                 if (sc_buffers[i]) {
1801                         TPL_INFO("[UNTRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1802                                          i, wl_vk_surface, swapchain, sc_buffers[i],
1803                                          _get_tbm_surface_bo_name(sc_buffers[i]));
1804                         tbm_surface_internal_unref(sc_buffers[i]);
1805                         sc_buffers[i] = NULL;
1806                 }               
1807         }
1808 }
1809
1810 static tpl_result_t
1811 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1812 {
1813         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1814         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1815         tpl_wl_vk_display_t *wl_vk_display = NULL;
1816
1817         TPL_ASSERT(surface);
1818         TPL_ASSERT(surface->display);
1819
1820         wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1821         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1822
1823         wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1824         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1825
1826         swapchain = wl_vk_surface->swapchain;
1827         if (!swapchain) {
1828                 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1829                                 wl_vk_surface);
1830                 return TPL_ERROR_INVALID_OPERATION;
1831         }
1832
1833         if (!swapchain->tbm_queue) {
1834                 TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.",
1835                                 wl_vk_surface, wl_vk_surface->swapchain);
1836                 return TPL_ERROR_INVALID_OPERATION;
1837         }
1838
1839         if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1840                 TPL_INFO("[DESTROY_SWAPCHAIN]",
1841                                  "wl_vk_surface(%p) swapchain(%p) still valid.",
1842                                  wl_vk_surface, swapchain);
1843                 if (swapchain->old_swapchain_buffers) {
1844                         __untrack_swapchain_buffers(wl_vk_surface, swapchain->old_swapchain_buffers);
1845                         free(swapchain->old_swapchain_buffers);
1846                         swapchain->old_swapchain_buffers = NULL;
1847                 }
1848                 return TPL_ERROR_NONE;
1849         }
1850
1851         TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1852                          "wl_vk_surface(%p) swapchain(%p)",
1853                          wl_vk_surface, wl_vk_surface->swapchain);
1854
1855         if (swapchain->swapchain_buffers) {
1856                 __untrack_swapchain_buffers(wl_vk_surface, swapchain->swapchain_buffers);
1857                 free(swapchain->swapchain_buffers);
1858                 swapchain->swapchain_buffers = NULL;
1859         }
1860
1861         _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1862
1863         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1864         wl_vk_surface->sent_message = DESTROY_QUEUE;
1865         tpl_gsource_send_message(wl_vk_surface->surf_source,
1866                                                          wl_vk_surface->sent_message);
1867         while (swapchain->tbm_queue)
1868                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1869         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1870
1871         _print_buffer_lists(wl_vk_surface);
1872
1873         free(swapchain);
1874         wl_vk_surface->swapchain = NULL;
1875
1876         return TPL_ERROR_NONE;
1877 }
1878
1879 static tpl_result_t
1880 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1881                                                                                           tbm_surface_h **buffers,
1882                                                                                           int *buffer_count)
1883 {
1884         TPL_ASSERT(surface);
1885         TPL_ASSERT(surface->backend.data);
1886         TPL_ASSERT(surface->display);
1887         TPL_ASSERT(surface->display->backend.data);
1888
1889         tpl_wl_vk_surface_t *wl_vk_surface =
1890                 (tpl_wl_vk_surface_t *)surface->backend.data;
1891         tpl_wl_vk_display_t *wl_vk_display =
1892                 (tpl_wl_vk_display_t *)surface->display->backend.data;
1893         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1894         tpl_result_t ret                   = TPL_ERROR_NONE;
1895         int i;
1896
1897         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1898         TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1899
1900         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1901
1902         if (!buffers) {
1903                 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1904                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1905                 return TPL_ERROR_NONE;
1906         }
1907
1908         swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1909                                                                                 *buffer_count,
1910                                                                                 sizeof(tbm_surface_h));
1911         if (!swapchain->swapchain_buffers) {
1912                 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1913                                 *buffer_count);
1914                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1915                 return TPL_ERROR_OUT_OF_MEMORY;
1916         }
1917
1918         ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1919                                                                                                 swapchain->tbm_queue,
1920                                                                                                 swapchain->swapchain_buffers,
1921                                                                                                 buffer_count);
1922         if (!ret) {
1923                 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1924                                 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1925                 free(swapchain->swapchain_buffers);
1926                 swapchain->swapchain_buffers = NULL;
1927                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1928                 return TPL_ERROR_INVALID_OPERATION;
1929         }
1930
1931         for (i = 0; i < *buffer_count; i++) {
1932                 if (swapchain->swapchain_buffers[i]) {
1933                         TPL_INFO("[TRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1934                                           i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i],
1935                                           _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1936                         tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1937                 }
1938         }
1939
1940         *buffers = swapchain->swapchain_buffers;
1941
1942         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1943
1944         return TPL_ERROR_NONE;
1945 }
1946
1947 static void
1948 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1949 {
1950         tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1951         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1952
1953         TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1954                          wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1955
1956         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1957         if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1958                 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1959                 wl_vk_surface->buffer_cnt--;
1960
1961                 wl_vk_buffer->idx = -1;
1962         }
1963         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1964
1965         wl_display_flush(wl_vk_display->wl_display);
1966
1967         if (wl_vk_buffer->wl_buffer) {
1968                 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1969                                                                                   wl_vk_buffer->wl_buffer);
1970                 wl_vk_buffer->wl_buffer = NULL;
1971         }
1972
1973 #if TIZEN_FEATURE_ENABLE
1974         if (wl_vk_buffer->buffer_release) {
1975                 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1976                 wl_vk_buffer->buffer_release = NULL;
1977         }
1978 #endif
1979
1980         if (wl_vk_buffer->release_fence_fd != -1) {
1981                 close(wl_vk_buffer->release_fence_fd);
1982                 wl_vk_buffer->release_fence_fd = -1;
1983         }
1984
1985         if (wl_vk_buffer->rects) {
1986                 free(wl_vk_buffer->rects);
1987                 wl_vk_buffer->rects = NULL;
1988                 wl_vk_buffer->num_rects = 0;
1989         }
1990
1991         wl_vk_buffer->tbm_surface = NULL;
1992         wl_vk_buffer->bo_name = -1;
1993
1994         free(wl_vk_buffer);
1995 }
1996
1997 static tpl_wl_vk_buffer_t *
1998 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1999 {
2000         tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2001         tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2002                                                                            (void **)&wl_vk_buffer);
2003         return wl_vk_buffer;
2004 }
2005
2006 static tpl_wl_vk_buffer_t *
2007 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
2008                                           tbm_surface_h tbm_surface)
2009 {
2010         tpl_wl_vk_buffer_t  *wl_vk_buffer  = NULL;
2011
2012         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2013
2014         if (!wl_vk_buffer) {
2015                 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
2016                 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
2017
2018                 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2019                                                                                    (tbm_data_free)__cb_wl_vk_buffer_free);
2020                 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2021                                                                                    wl_vk_buffer);
2022
2023                 wl_vk_buffer->wl_buffer                = NULL;
2024                 wl_vk_buffer->tbm_surface              = tbm_surface;
2025                 wl_vk_buffer->bo_name                  = _get_tbm_surface_bo_name(tbm_surface);
2026                 wl_vk_buffer->wl_vk_surface            = wl_vk_surface;
2027
2028                 wl_vk_buffer->status                   = RELEASED;
2029
2030                 wl_vk_buffer->acquire_fence_fd         = -1;
2031                 wl_vk_buffer->release_fence_fd         = -1;
2032
2033                 wl_vk_buffer->dx                       = 0;
2034                 wl_vk_buffer->dy                       = 0;
2035                 wl_vk_buffer->width                    = tbm_surface_get_width(tbm_surface);
2036                 wl_vk_buffer->height                   = tbm_surface_get_height(tbm_surface);
2037
2038                 wl_vk_buffer->rects                    = NULL;
2039                 wl_vk_buffer->num_rects                = 0;
2040
2041                 wl_vk_buffer->need_to_commit = TPL_FALSE;
2042 #if TIZEN_FEATURE_ENABLE
2043                 wl_vk_buffer->buffer_release = NULL;
2044 #endif
2045                 tpl_gmutex_init(&wl_vk_buffer->mutex);
2046                 tpl_gcond_init(&wl_vk_buffer->cond);
2047
2048                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2049                 {
2050                         int i;
2051                         for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2052                                 if (wl_vk_surface->buffers[i] == NULL) break;
2053
2054                         /* If this exception is reached,
2055                          * it may be a critical memory leak problem. */
2056                         if (i == BUFFER_ARRAY_SIZE) {
2057                                 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
2058                                 int evicted_idx = 0; /* evict the frontmost buffer */
2059
2060                                 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
2061
2062                                 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
2063                                                  wl_vk_surface);
2064                                 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2065                                                  evicted_buffer, evicted_buffer->tbm_surface,
2066                                                  status_to_string[evicted_buffer->status]);
2067
2068                                 /* [TODO] need to think about whether there will be
2069                                  * better modifications */
2070                                 wl_vk_surface->buffer_cnt--;
2071                                 wl_vk_surface->buffers[evicted_idx]      = NULL;
2072
2073                                 i = evicted_idx;
2074                         }
2075
2076                         wl_vk_surface->buffer_cnt++;
2077                         wl_vk_surface->buffers[i]          = wl_vk_buffer;
2078                         wl_vk_buffer->idx                  = i;
2079                 }
2080                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2081
2082                 TPL_INFO("[WL_VK_BUFFER_CREATE]",
2083                                  "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2084                                  wl_vk_surface, wl_vk_buffer, tbm_surface,
2085                                  wl_vk_buffer->bo_name);
2086         }
2087
2088         return wl_vk_buffer;
2089 }
2090
2091 static tbm_surface_h
2092 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
2093                                                                    uint64_t timeout_ns,
2094                                                                    int32_t *release_fence)
2095 {
2096         TPL_ASSERT(surface);
2097         TPL_ASSERT(surface->backend.data);
2098         TPL_ASSERT(surface->display);
2099         TPL_ASSERT(surface->display->backend.data);
2100         TPL_OBJECT_CHECK_RETURN(surface, NULL);
2101
2102         tpl_wl_vk_surface_t *wl_vk_surface =
2103                 (tpl_wl_vk_surface_t *)surface->backend.data;
2104         tpl_wl_vk_display_t *wl_vk_display =
2105                 (tpl_wl_vk_display_t *)surface->display->backend.data;
2106         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
2107         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
2108
2109         tbm_surface_h tbm_surface          = NULL;
2110         tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_NONE;
2111
2112         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
2113         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
2114
2115         TPL_OBJECT_UNLOCK(surface);
2116         TRACE_BEGIN("WAIT_DEQUEUEABLE");
2117         if (timeout_ns != UINT64_MAX) {
2118                 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2119                                                 swapchain->tbm_queue, timeout_ns/1000);
2120         } else {
2121                 tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
2122         }
2123         TRACE_END();
2124         TPL_OBJECT_LOCK(surface);
2125
2126         if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2127                 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
2128                                 timeout_ns);
2129                 return NULL;
2130         } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2131                 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
2132                                 wl_vk_surface, swapchain->tbm_queue, tsq_err);
2133                 return NULL;
2134         }
2135
2136         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
2137
2138         if (wl_vk_surface->reset) {
2139                 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
2140                                   swapchain, swapchain->tbm_queue);
2141                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2142                 return NULL;
2143         }
2144
2145         tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
2146                                                                                 &tbm_surface);
2147         if (!tbm_surface) {
2148                 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
2149                                 swapchain->tbm_queue, wl_vk_surface, tsq_err);
2150                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2151                 return NULL;
2152         }
2153
2154         tbm_surface_internal_ref(tbm_surface);
2155
2156         wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
2157         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
2158
2159         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2160         wl_vk_buffer->status = DEQUEUED;
2161
2162         if (release_fence) {
2163 #if TIZEN_FEATURE_ENABLE
2164                 if (wl_vk_surface->surface_sync) {
2165                         *release_fence = wl_vk_buffer->release_fence_fd;
2166                         TPL_LOG_D("[EXPLICIT_FENCE]", "wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
2167                                           wl_vk_surface, wl_vk_buffer, *release_fence);
2168                         wl_vk_buffer->release_fence_fd = -1;
2169                 } else
2170 #endif
2171                 {
2172                         *release_fence = -1;
2173                 }
2174         }
2175
2176         wl_vk_surface->reset = TPL_FALSE;
2177
2178         TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2179                           wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
2180                           release_fence ? *release_fence : -1);
2181
2182         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2183         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2184
2185         return tbm_surface;
2186 }
2187
2188 static tpl_result_t
2189 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
2190                                                                           tbm_surface_h tbm_surface)
2191 {
2192         TPL_ASSERT(surface);
2193         TPL_ASSERT(surface->backend.data);
2194
2195         tpl_wl_vk_surface_t *wl_vk_surface  =
2196                 (tpl_wl_vk_surface_t *)surface->backend.data;
2197         tpl_wl_vk_swapchain_t *swapchain    = NULL;
2198         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2199         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2200
2201         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2202                                                                   TPL_ERROR_INVALID_PARAMETER);
2203
2204         swapchain = wl_vk_surface->swapchain;
2205         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2206         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2207                                                                  TPL_ERROR_INVALID_PARAMETER);
2208
2209         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2210         if (wl_vk_buffer) {
2211                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2212                 wl_vk_buffer->status = RELEASED;
2213                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2214         }
2215
2216         tbm_surface_internal_unref(tbm_surface);
2217
2218         TPL_INFO("[CANCEL BUFFER]",
2219                          "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2220                           wl_vk_surface, swapchain, tbm_surface,
2221                           _get_tbm_surface_bo_name(tbm_surface));
2222
2223         tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2224                                                                                            tbm_surface);
2225         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2226                 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2227                 return TPL_ERROR_INVALID_OPERATION;
2228         }
2229
2230         return TPL_ERROR_NONE;
2231 }
2232
2233 static tpl_result_t
2234 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2235                                                                            tbm_surface_h tbm_surface,
2236                                                                            int num_rects, const int *rects,
2237                                                                            int32_t acquire_fence)
2238 {
2239         TPL_ASSERT(surface);
2240         TPL_ASSERT(surface->display);
2241         TPL_ASSERT(surface->backend.data);
2242         TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2243
2244         tpl_wl_vk_surface_t *wl_vk_surface  =
2245                 (tpl_wl_vk_surface_t *) surface->backend.data;
2246         tpl_wl_vk_swapchain_t *swapchain    = wl_vk_surface->swapchain;
2247         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2248         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2249         int bo_name                         = -1;
2250
2251         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2252         TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2253         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2254                                                                   TPL_ERROR_INVALID_PARAMETER);
2255
2256         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2257         if (!wl_vk_buffer) {
2258                 TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
2259                 return TPL_ERROR_INVALID_PARAMETER;
2260         }
2261
2262         bo_name = wl_vk_buffer->bo_name;
2263
2264         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2265
2266         /* If there are received region information, save it to wl_vk_buffer */
2267         if (num_rects && rects) {
2268                 if (wl_vk_buffer->rects != NULL) {
2269                         free(wl_vk_buffer->rects);
2270                         wl_vk_buffer->rects = NULL;
2271                         wl_vk_buffer->num_rects = 0;
2272                 }
2273
2274                 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2275                 wl_vk_buffer->num_rects = num_rects;
2276
2277                 if (wl_vk_buffer->rects) {
2278                         memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2279                                    sizeof(int) * 4 * num_rects);
2280                 } else {
2281                         TPL_ERR("Failed to allocate memory for rects info.");
2282                 }
2283         }
2284
2285         if (wl_vk_buffer->acquire_fence_fd != -1)
2286                 close(wl_vk_buffer->acquire_fence_fd);
2287
2288         wl_vk_buffer->acquire_fence_fd = acquire_fence;
2289
2290         wl_vk_buffer->status = ENQUEUED;
2291         TPL_LOG_T("WL_VK",
2292                           "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2293                           wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2294
2295         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2296
2297         tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2298                                                                                 tbm_surface);
2299         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2300                 tbm_surface_internal_unref(tbm_surface);
2301                 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2302                                 tbm_surface, wl_vk_surface, tsq_err);
2303                 return TPL_ERROR_INVALID_OPERATION;
2304         }
2305
2306         tbm_surface_internal_unref(tbm_surface);
2307
2308         return TPL_ERROR_NONE;
2309 }
2310
2311 static const struct wl_buffer_listener wl_buffer_release_listener = {
2312         (void *)__cb_wl_buffer_release,
2313 };
2314
2315 static tpl_result_t
2316 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2317 {
2318         tbm_surface_h tbm_surface            = NULL;
2319         tbm_surface_queue_error_e tsq_err    = TBM_SURFACE_QUEUE_ERROR_NONE;
2320         tpl_wl_vk_display_t *wl_vk_display   = wl_vk_surface->wl_vk_display;
2321         tpl_wl_vk_swapchain_t *swapchain     = wl_vk_surface->swapchain;
2322         tpl_wl_vk_buffer_t *wl_vk_buffer     = NULL;
2323         tpl_bool_t ready_to_commit           = TPL_TRUE;
2324
2325         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2326
2327         while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2328                 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2329                                                                                         &tbm_surface);
2330                 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2331                         TPL_ERR("Failed to acquire from tbm_queue(%p)",
2332                                         swapchain->tbm_queue);
2333                         return TPL_ERROR_INVALID_OPERATION;
2334                 }
2335
2336                 tbm_surface_internal_ref(tbm_surface);
2337
2338                 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2339                 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2340                                                                            "wl_vk_buffer sould be not NULL");
2341
2342                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2343
2344                 wl_vk_buffer->status = ACQUIRED;
2345
2346                 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2347                                   wl_vk_buffer, tbm_surface,
2348                                   _get_tbm_surface_bo_name(tbm_surface));
2349
2350                 if (wl_vk_buffer->wl_buffer == NULL) {
2351                         wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2352                                                 wl_vk_display->wl_tbm_client, tbm_surface);
2353
2354                         if (!wl_vk_buffer->wl_buffer) {
2355                                 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2356                                                  wl_vk_display->wl_tbm_client, tbm_surface);
2357                         } else {
2358                                 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2359                                         wl_vk_display->use_explicit_sync == TPL_FALSE) {
2360                                         wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2361                                                                                    &wl_buffer_release_listener, wl_vk_buffer);
2362                                 }
2363
2364                                 TPL_LOG_T("WL_VK",
2365                                                   "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2366                                                   wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2367                         }
2368                 }
2369
2370                 if (!wl_vk_surface->vblank_enable || wl_vk_surface->vblank_done)
2371                         ready_to_commit = TPL_TRUE;
2372                 else {
2373                         wl_vk_buffer->status = WAITING_VBLANK;
2374                         __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2375                         ready_to_commit = TPL_FALSE;
2376                 }
2377
2378                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2379
2380                 if (ready_to_commit)
2381                         _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2382         }
2383
2384         return TPL_ERROR_NONE;
2385 }
2386
2387 #if TIZEN_FEATURE_ENABLE
2388 static void
2389 __cb_buffer_fenced_release(void *data,
2390                                                    struct zwp_linux_buffer_release_v1 *release,
2391                                                    int32_t fence)
2392 {
2393         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2394         tbm_surface_h tbm_surface         = NULL;
2395
2396         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2397
2398         tbm_surface = wl_vk_buffer->tbm_surface;
2399
2400         if (tbm_surface_internal_is_valid(tbm_surface)) {
2401                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2402                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2403
2404                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2405                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2406                         tbm_surface_internal_unref(tbm_surface);
2407                         return;
2408                 }
2409
2410                 swapchain = wl_vk_surface->swapchain;
2411
2412                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2413                 if (wl_vk_buffer->status == COMMITTED) {
2414                         tbm_surface_queue_error_e tsq_err;
2415
2416                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2417                         wl_vk_buffer->buffer_release = NULL;
2418
2419                         wl_vk_buffer->release_fence_fd = fence;
2420                         wl_vk_buffer->status = RELEASED;
2421
2422                         TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2423                                            wl_vk_buffer->bo_name,
2424                                            fence);
2425                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2426                                                         wl_vk_buffer->bo_name);
2427
2428                         TPL_LOG_T("WL_VK",
2429                                           "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2430                                           wl_vk_buffer, tbm_surface,
2431                                           wl_vk_buffer->bo_name,
2432                                           fence);
2433
2434                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2435                                                                                                 tbm_surface);
2436                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2437                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2438
2439                         tbm_surface_internal_unref(tbm_surface);
2440                 }
2441
2442                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2443
2444         } else {
2445                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2446         }
2447 }
2448
2449 static void
2450 __cb_buffer_immediate_release(void *data,
2451                                                           struct zwp_linux_buffer_release_v1 *release)
2452 {
2453         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2454         tbm_surface_h tbm_surface           = NULL;
2455
2456         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2457
2458         tbm_surface = wl_vk_buffer->tbm_surface;
2459
2460         if (tbm_surface_internal_is_valid(tbm_surface)) {
2461                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2462                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2463
2464                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2465                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2466                         tbm_surface_internal_unref(tbm_surface);
2467                         return;
2468                 }
2469
2470                 swapchain = wl_vk_surface->swapchain;
2471
2472                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2473                 if (wl_vk_buffer->status == COMMITTED) {
2474                         tbm_surface_queue_error_e tsq_err;
2475
2476                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2477                         wl_vk_buffer->buffer_release = NULL;
2478
2479                         wl_vk_buffer->release_fence_fd = -1;
2480                         wl_vk_buffer->status = RELEASED;
2481
2482                         TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2483                                            _get_tbm_surface_bo_name(tbm_surface));
2484                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2485                                                         _get_tbm_surface_bo_name(tbm_surface));
2486
2487                         TPL_LOG_T("WL_VK",
2488                                           "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2489                                           wl_vk_buffer, tbm_surface,
2490                                           _get_tbm_surface_bo_name(tbm_surface));
2491
2492                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2493                                                                                                 tbm_surface);
2494                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2495                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2496
2497                         tbm_surface_internal_unref(tbm_surface);
2498                 }
2499
2500                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2501
2502         } else {
2503                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2504         }
2505 }
2506
2507 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2508         __cb_buffer_fenced_release,
2509         __cb_buffer_immediate_release,
2510 };
2511 #endif
2512
2513 static void
2514 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2515 {
2516         tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2517         tbm_surface_h tbm_surface = NULL;
2518
2519         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2520
2521         tbm_surface = wl_vk_buffer->tbm_surface;
2522
2523         if (tbm_surface_internal_is_valid(tbm_surface)) {
2524                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2525                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2526                 tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2527
2528                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2529                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2530                         tbm_surface_internal_unref(tbm_surface);
2531                         return;
2532                 }
2533
2534                 swapchain = wl_vk_surface->swapchain;
2535
2536                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2537
2538                 if (wl_vk_buffer->status == COMMITTED) {
2539
2540                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2541                                                                                                 tbm_surface);
2542                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2543                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2544
2545                         wl_vk_buffer->status = RELEASED;
2546
2547                         TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2548                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2549                                                         wl_vk_buffer->bo_name);
2550
2551                         TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2552                                           wl_vk_buffer->wl_buffer, tbm_surface,
2553                                           wl_vk_buffer->bo_name);
2554
2555                         tbm_surface_internal_unref(tbm_surface);
2556                 }
2557
2558                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2559         } else {
2560                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2561         }
2562 }
2563
2564 static void
2565 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2566                                            unsigned int sequence, unsigned int tv_sec,
2567                                            unsigned int tv_usec, void *user_data)
2568 {
2569         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2570
2571         TRACE_ASYNC_END((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2572         TPL_LOG_D("[VBLANK_DONE]", "wl_vk_surface(%p)", wl_vk_surface);
2573
2574         if (error == TDM_ERROR_TIMEOUT)
2575                 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2576                                  wl_vk_surface);
2577
2578         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2579         wl_vk_surface->vblank_done = TPL_TRUE;
2580
2581         if (wl_vk_surface->vblank && wl_vk_surface->vblank_waiting_buffers) {
2582                 tpl_bool_t is_empty = TPL_TRUE;
2583                 do {
2584                         tpl_wl_vk_buffer_t* wl_vk_buffer =(tpl_wl_vk_buffer_t *)
2585                                 __tpl_list_pop_front(wl_vk_surface->vblank_waiting_buffers, NULL);
2586                         is_empty = __tpl_list_is_empty(wl_vk_surface->vblank_waiting_buffers);
2587
2588                         if (!wl_vk_buffer) break;
2589
2590                         _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2591
2592                         /* If tdm error such as TIMEOUT occured,
2593                          * flush all vblank waiting buffers of its wl_vk_surface.
2594                          * Otherwise, only one wl_vk_buffer will be commited per one vblank event.
2595                          */
2596                         if (error == TDM_ERROR_NONE && wl_vk_surface->post_interval > 0)
2597                                 break;
2598                 } while (!is_empty);
2599
2600                 wl_vk_surface->vblank_enable = (wl_vk_surface->post_interval > 0);
2601         }
2602         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2603 }
2604
2605 static tpl_result_t
2606 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2607 {
2608         tdm_error tdm_err                     = TDM_ERROR_NONE;
2609         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2610
2611         if (wl_vk_surface->vblank == NULL) {
2612                 wl_vk_surface->vblank =
2613                         _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client);
2614                 if (!wl_vk_surface->vblank) {
2615                         TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2616                                          wl_vk_surface);
2617                         return TPL_ERROR_OUT_OF_MEMORY;
2618                 } else {
2619                         wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
2620                         if (!wl_vk_surface->vblank_waiting_buffers) {
2621                                 tdm_client_vblank_destroy(wl_vk_surface->vblank);
2622                                 wl_vk_surface->vblank = NULL;
2623                         }
2624                 }
2625         }
2626
2627         tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2628                         wl_vk_surface->post_interval,
2629                         __cb_tdm_client_vblank,
2630                         (void *)wl_vk_surface);
2631
2632         if (tdm_err == TDM_ERROR_NONE) {
2633                 wl_vk_surface->vblank_done = TPL_FALSE;
2634                 TRACE_ASYNC_BEGIN((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2635         } else {
2636                 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2637                 return TPL_ERROR_INVALID_OPERATION;
2638         }
2639
2640         return TPL_ERROR_NONE;
2641 }
2642
2643 static void
2644 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2645                                                   tpl_wl_vk_buffer_t *wl_vk_buffer)
2646 {
2647         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2648         struct wl_surface *wl_surface         = wl_vk_surface->wl_surface;
2649         uint32_t version;
2650
2651         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2652                                                                    "wl_vk_buffer sould be not NULL");
2653
2654         if (wl_vk_buffer->wl_buffer == NULL) {
2655                 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2656                                                                                         wl_vk_display->wl_tbm_client,
2657                                                                                         wl_vk_buffer->tbm_surface);
2658                 if (wl_vk_buffer->wl_buffer &&
2659                         (wl_vk_buffer->acquire_fence_fd == -1 ||
2660                          wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2661                                 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2662                                                                            &wl_buffer_release_listener, wl_vk_buffer);
2663                 }
2664         }
2665         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2666                                                                    "[FATAL] Failed to create wl_buffer");
2667
2668         version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2669
2670         wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2671                                           wl_vk_buffer->dx, wl_vk_buffer->dy);
2672
2673         if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2674                 if (version < 4) {
2675                         wl_surface_damage(wl_surface,
2676                                                           wl_vk_buffer->dx, wl_vk_buffer->dy,
2677                                                           wl_vk_buffer->width, wl_vk_buffer->height);
2678                 } else {
2679                         wl_surface_damage_buffer(wl_surface,
2680                                                                          0, 0,
2681                                                                          wl_vk_buffer->width, wl_vk_buffer->height);
2682                 }
2683         } else {
2684                 int i;
2685                 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2686                         int inverted_y =
2687                                 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2688                                                 wl_vk_buffer->rects[i * 4 + 3]);
2689                         if (version < 4) {
2690                                 wl_surface_damage(wl_surface,
2691                                                                   wl_vk_buffer->rects[i * 4 + 0],
2692                                                                   inverted_y,
2693                                                                   wl_vk_buffer->rects[i * 4 + 2],
2694                                                                   wl_vk_buffer->rects[i * 4 + 3]);
2695                         } else {
2696                                 wl_surface_damage_buffer(wl_surface,
2697                                                                                  wl_vk_buffer->rects[i * 4 + 0],
2698                                                                                  inverted_y,
2699                                                                                  wl_vk_buffer->rects[i * 4 + 2],
2700                                                                                  wl_vk_buffer->rects[i * 4 + 3]);
2701                         }
2702                 }
2703         }
2704
2705 #if TIZEN_FEATURE_ENABLE
2706         if (wl_vk_display->use_explicit_sync &&
2707                 wl_vk_surface->surface_sync &&
2708                 wl_vk_buffer->acquire_fence_fd != -1) {
2709
2710                 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2711                                                                                                                            wl_vk_buffer->acquire_fence_fd);
2712                 TPL_LOG_D("[SET_ACQUIRE_FENCE][1/2]", "wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2713                                   wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2714                 close(wl_vk_buffer->acquire_fence_fd);
2715                 wl_vk_buffer->acquire_fence_fd = -1;
2716
2717                 wl_vk_buffer->buffer_release =
2718                         zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2719                 if (!wl_vk_buffer->buffer_release) {
2720                         TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2721                 } else {
2722                         zwp_linux_buffer_release_v1_add_listener(
2723                                 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2724                         TPL_LOG_D("[SET_ACQUIRE_FENCE][2/2]", "add explicit_sync_release_listener.");
2725                 }
2726         }
2727 #endif
2728
2729         wl_surface_commit(wl_surface);
2730
2731         wl_display_flush(wl_vk_display->wl_display);
2732
2733         TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2734                                           wl_vk_buffer->bo_name);
2735
2736         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2737
2738         wl_vk_buffer->need_to_commit   = TPL_FALSE;
2739         wl_vk_buffer->status           = COMMITTED;
2740
2741         tpl_gcond_signal(&wl_vk_buffer->cond);
2742
2743         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2744
2745         TPL_LOG_T("WL_VK",
2746                           "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2747                           wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2748                           wl_vk_buffer->bo_name);
2749
2750         if (wl_vk_surface->post_interval > 0 && wl_vk_surface->vblank != NULL) {
2751                 wl_vk_surface->vblank_enable = TPL_TRUE;
2752                 if (_thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2753                         TPL_ERR("Failed to set wait vblank.");
2754         }
2755 }
2756
2757 tpl_bool_t
2758 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2759 {
2760         if (!native_dpy) return TPL_FALSE;
2761
2762         if (_check_native_handle_is_wl_display(native_dpy))
2763                 return TPL_TRUE;
2764
2765         return TPL_FALSE;
2766 }
2767
2768 void
2769 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2770 {
2771         TPL_ASSERT(backend);
2772
2773         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2774         backend->data = NULL;
2775
2776         backend->init = __tpl_wl_vk_display_init;
2777         backend->fini = __tpl_wl_vk_display_fini;
2778         backend->query_config = __tpl_wl_vk_display_query_config;
2779         backend->filter_config = __tpl_wl_vk_display_filter_config;
2780         backend->query_window_supported_buffer_count =
2781                 __tpl_wl_vk_display_query_window_supported_buffer_count;
2782         backend->query_window_supported_present_modes =
2783                 __tpl_wl_vk_display_query_window_supported_present_modes;
2784 }
2785
2786 void
2787 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2788 {
2789         TPL_ASSERT(backend);
2790
2791         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2792         backend->data = NULL;
2793
2794         backend->init = __tpl_wl_vk_surface_init;
2795         backend->fini = __tpl_wl_vk_surface_fini;
2796         backend->validate = __tpl_wl_vk_surface_validate;
2797         backend->cancel_dequeued_buffer =
2798                 __tpl_wl_vk_surface_cancel_buffer;
2799         backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2800         backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2801         backend->get_swapchain_buffers =
2802                 __tpl_wl_vk_surface_get_swapchain_buffers;
2803         backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2804         backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2805         backend->set_post_interval =
2806                 __tpl_wl_vk_surface_set_post_interval;
2807 }
2808
2809 static int
2810 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2811 {
2812         return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2813 }
2814
2815 static void
2816 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2817 {
2818         int idx = 0;
2819
2820         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2821         TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2822                          wl_vk_surface, wl_vk_surface->buffer_cnt);
2823         for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2824                 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2825                 if (wl_vk_buffer) {
2826                         TPL_INFO("[INFO]",
2827                                          "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2828                                          idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2829                                          wl_vk_buffer->bo_name,
2830                                          status_to_string[wl_vk_buffer->status]);
2831                 }
2832         }
2833         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2834 }