c1e20177db23f89bb5f83fab68b83e85d1f9f6a5
[platform/core/uifw/libtpl-egl.git] / src / tpl_wl_vk_thread.c
1 #define inline __inline__
2 #undef inline
3
4 #include "tpl_internal.h"
5
6 #include <string.h>
7 #include <fcntl.h>
8 #include <unistd.h>
9 #include <sys/eventfd.h>
10
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
15
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
19
20 #include <tdm_client.h>
21
22 #ifndef TIZEN_FEATURE_ENABLE
23 #define TIZEN_FEATURE_ENABLE 1
24 #endif
25
26 #if TIZEN_FEATURE_ENABLE
27 #include <tizen-surface-client-protocol.h>
28 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
29 #endif
30
31 #include "tpl_utils_gthread.h"
32
33 #define BUFFER_ARRAY_SIZE 10
34 #define VK_CLIENT_QUEUE_SIZE 3
35
36 static int wl_vk_buffer_key;
37 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
38
39 typedef struct _tpl_wl_vk_display       tpl_wl_vk_display_t;
40 typedef struct _tpl_wl_vk_surface       tpl_wl_vk_surface_t;
41 typedef struct _tpl_wl_vk_swapchain     tpl_wl_vk_swapchain_t;
42 typedef struct _tpl_wl_vk_buffer        tpl_wl_vk_buffer_t;
43
44 struct _tpl_wl_vk_display {
45         tpl_gsource                  *disp_source;
46         tpl_gthread                  *thread;
47         tpl_gmutex                    wl_event_mutex;
48
49         struct wl_display            *wl_display;
50         struct wl_event_queue        *ev_queue;
51         struct wayland_tbm_client    *wl_tbm_client;
52         int                           last_error; /* errno of the last wl_display error*/
53
54         tpl_bool_t                    wl_initialized;
55         tpl_bool_t                    tdm_initialized;
56
57         tdm_client                   *tdm_client;
58         tpl_gsource                  *tdm_source;
59         int                           tdm_display_fd;
60
61         tpl_bool_t                    use_wait_vblank;
62         tpl_bool_t                    use_explicit_sync;
63         tpl_bool_t                    prepared;
64
65         /* device surface capabilities */
66         int                           min_buffer;
67         int                           max_buffer;
68         int                           present_modes;
69 #if TIZEN_FEATURE_ENABLE
70         struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
71 #endif
72 };
73
74 struct _tpl_wl_vk_swapchain {
75         tpl_wl_vk_surface_t          *wl_vk_surface;
76
77         tbm_surface_queue_h           tbm_queue;
78
79         struct {
80                 int                       width;
81                 int                       height;
82                 tbm_format                format;
83                 int                       buffer_count;
84                 int                       present_mode;
85         } properties;
86
87         tbm_surface_h                *swapchain_buffers;
88
89         tpl_util_atomic_uint          ref_cnt;
90 };
91
92 typedef enum surf_message {
93         NONE_MESSAGE = 0,
94         INIT_SURFACE,
95         CREATE_QUEUE,
96         DESTROY_QUEUE,
97         ACQUIRABLE,
98 } surf_message;
99
100 struct _tpl_wl_vk_surface {
101         tpl_gsource                  *surf_source;
102
103         tpl_wl_vk_swapchain_t        *swapchain;
104
105         struct wl_surface            *wl_surface;
106 #if TIZEN_FEATURE_ENABLE
107         struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
108 #endif
109         tdm_client_vblank            *vblank;
110
111         /* surface information */
112         int                           render_done_cnt;
113
114         tpl_wl_vk_display_t          *wl_vk_display;
115         tpl_surface_t                *tpl_surface;
116
117         /* wl_vk_buffer array for buffer tracing */
118         tpl_wl_vk_buffer_t           *buffers[BUFFER_ARRAY_SIZE];
119         int                           buffer_cnt; /* the number of using wl_vk_buffers */
120         tpl_gmutex                    buffers_mutex;
121
122         tpl_list_t                   *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
123
124         tpl_gmutex                    surf_mutex;
125         tpl_gcond                     surf_cond;
126
127         /* for waiting draw done */
128         tpl_bool_t                    is_activated;
129         tpl_bool_t                    reset; /* TRUE if queue reseted by external  */
130         tpl_bool_t                    vblank_done;
131
132         surf_message                  sent_message;
133
134         int                           post_interval;
135 };
136
137 typedef enum buffer_status {
138         RELEASED = 0,             // 0
139         DEQUEUED,                 // 1
140         ENQUEUED,                 // 2
141         ACQUIRED,                 // 3
142         WAITING_SIGNALED,         // 4
143         WAITING_VBLANK,           // 5
144         COMMITTED,                // 6
145 } buffer_status_t;
146
147 static const char *status_to_string[7] = {
148         "RELEASED",                 // 0
149         "DEQUEUED",                 // 1
150         "ENQUEUED",                 // 2
151         "ACQUIRED",                 // 3
152         "WAITING_SIGNALED",         // 4
153         "WAITING_VBLANK",           // 5
154         "COMMITTED",                // 6
155 };
156
157 struct _tpl_wl_vk_buffer {
158         tbm_surface_h                 tbm_surface;
159         int                           bo_name;
160
161         struct wl_buffer             *wl_buffer;
162         int                           dx, dy; /* position to attach to wl_surface */
163         int                           width, height; /* size to attach to wl_surface */
164
165         buffer_status_t               status; /* for tracing buffer status */
166         int                           idx; /* position index in buffers array of wl_vk_surface */
167
168         /* for damage region */
169         int                           num_rects;
170         int                          *rects;
171
172         /* for checking need_to_commit (frontbuffer mode) */
173         tpl_bool_t                    need_to_commit;
174
175 #if TIZEN_FEATURE_ENABLE
176         /* to get release event via zwp_linux_buffer_release_v1 */
177         struct zwp_linux_buffer_release_v1 *buffer_release;
178 #endif
179
180         /* each buffers own its release_fence_fd, until it passes ownership
181          * to it to EGL */
182         int32_t                       release_fence_fd;
183
184         /* each buffers own its acquire_fence_fd.
185          * If it use zwp_linux_buffer_release_v1 the ownership of this fd
186          * will be passed to display server
187          * Otherwise it will be used as a fence waiting for render done
188          * on tpl thread */
189         int32_t                       acquire_fence_fd;
190
191         tpl_gmutex                    mutex;
192         tpl_gcond                     cond;
193
194         tpl_wl_vk_surface_t          *wl_vk_surface;
195 };
196
197 static void
198 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
199 static int
200 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
201 static void
202 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
203 static void
204 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
205 static tpl_result_t
206 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
207 static void
208 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
209 static tpl_result_t
210 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
211 static void
212 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
213                                                   tpl_wl_vk_buffer_t *wl_vk_buffer);
214
215 static tpl_bool_t
216 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
217 {
218         struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
219
220         if (!wl_vk_native_dpy) {
221                 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
222                 return TPL_FALSE;
223         }
224
225         /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
226            is a memory address pointing the structure of wl_display_interface. */
227         if (wl_vk_native_dpy == &wl_display_interface)
228                 return TPL_TRUE;
229
230         if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
231                                 strlen(wl_display_interface.name)) == 0) {
232                 return TPL_TRUE;
233         }
234
235         return TPL_FALSE;
236 }
237
238 static tpl_bool_t
239 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
240 {
241         tpl_wl_vk_display_t        *wl_vk_display = NULL;
242         tdm_error                   tdm_err = TDM_ERROR_NONE;
243
244         TPL_IGNORE(message);
245
246         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
247         if (!wl_vk_display) {
248                 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
249                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
250                 return TPL_FALSE;
251         }
252
253         tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client);
254
255         /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
256          * When tdm_source is no longer available due to an unexpected situation,
257          * wl_vk_thread must remove it from the thread and destroy it.
258          * In that case, tdm_vblank can no longer be used for surfaces and displays
259          * that used this tdm_source. */
260         if (tdm_err != TDM_ERROR_NONE) {
261                 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
262                                 tdm_err);
263                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
264
265                 tpl_gsource_destroy(gsource, TPL_FALSE);
266
267                 wl_vk_display->tdm_source = NULL;
268
269                 return TPL_FALSE;
270         }
271
272         return TPL_TRUE;
273 }
274
275 static void
276 __thread_func_tdm_finalize(tpl_gsource *gsource)
277 {
278         tpl_wl_vk_display_t *wl_vk_display = NULL;
279
280         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
281
282         TPL_LOG_T("WL_VK",
283                           "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)",
284                           wl_vk_display, wl_vk_display->tdm_client, gsource);
285
286         if (wl_vk_display->tdm_client) {
287                 tdm_client_destroy(wl_vk_display->tdm_client);
288                 wl_vk_display->tdm_client = NULL;
289                 wl_vk_display->tdm_display_fd = -1;
290         }
291
292         wl_vk_display->tdm_initialized = TPL_FALSE;
293 }
294
295 static tpl_gsource_functions tdm_funcs = {
296         .prepare  = NULL,
297         .check    = NULL,
298         .dispatch = __thread_func_tdm_dispatch,
299         .finalize = __thread_func_tdm_finalize,
300 };
301
302 static tpl_result_t
303 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
304 {
305         tdm_client       *tdm_client = NULL;
306         int               tdm_display_fd = -1;
307         tdm_error         tdm_err = TDM_ERROR_NONE;
308
309         tdm_client = tdm_client_create(&tdm_err);
310         if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
311                 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
312                 return TPL_ERROR_INVALID_OPERATION;
313         }
314
315         tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
316         if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
317                 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
318                 tdm_client_destroy(tdm_client);
319                 return TPL_ERROR_INVALID_OPERATION;
320         }
321
322         wl_vk_display->tdm_display_fd  = tdm_display_fd;
323         wl_vk_display->tdm_client      = tdm_client;
324         wl_vk_display->tdm_source      = NULL;
325         wl_vk_display->tdm_initialized = TPL_TRUE;
326
327         TPL_INFO("[TDM_CLIENT_INIT]",
328                          "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
329                          wl_vk_display, tdm_client, tdm_display_fd);
330
331         return TPL_ERROR_NONE;
332 }
333
334 static void
335 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
336                                                           uint32_t name, const char *interface,
337                                                           uint32_t version)
338 {
339 #if TIZEN_FEATURE_ENABLE
340         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
341
342         if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
343                 char *env = tpl_getenv("TPL_EFS");
344                 if (env && !atoi(env)) {
345                         wl_vk_display->use_explicit_sync = TPL_FALSE;
346                 } else {
347                         wl_vk_display->explicit_sync =
348                                         wl_registry_bind(wl_registry, name,
349                                                                          &zwp_linux_explicit_synchronization_v1_interface, 1);
350                         wl_vk_display->use_explicit_sync = TPL_TRUE;
351                         TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
352                 }
353         }
354 #endif
355 }
356
357 static void
358 __cb_wl_resistry_global_remove_callback(void *data,
359                                                                                 struct wl_registry *wl_registry,
360                                                                                 uint32_t name)
361 {
362 }
363
364 static const struct wl_registry_listener registry_listener = {
365         __cb_wl_resistry_global_callback,
366         __cb_wl_resistry_global_remove_callback
367 };
368
369 static void
370 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
371                                           const char *func_name)
372 {
373         int dpy_err;
374         char buf[1024];
375         strerror_r(errno, buf, sizeof(buf));
376
377         if (wl_vk_display->last_error == errno)
378                 return;
379
380         TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
381
382         dpy_err = wl_display_get_error(wl_vk_display->wl_display);
383         if (dpy_err == EPROTO) {
384                 const struct wl_interface *err_interface;
385                 uint32_t err_proxy_id, err_code;
386                 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
387                                                                                                  &err_interface,
388                                                                                                  &err_proxy_id);
389                 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
390                                 err_interface->name, err_code, err_proxy_id);
391         }
392
393         wl_vk_display->last_error = errno;
394 }
395
396 static tpl_result_t
397 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
398 {
399         struct wl_registry *registry                = NULL;
400         struct wl_event_queue *queue                = NULL;
401         struct wl_display *display_wrapper          = NULL;
402         struct wl_proxy *wl_tbm                     = NULL;
403         struct wayland_tbm_client *wl_tbm_client    = NULL;
404         int ret;
405         tpl_result_t result = TPL_ERROR_NONE;
406
407         queue = wl_display_create_queue(wl_vk_display->wl_display);
408         if (!queue) {
409                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
410                                 wl_vk_display->wl_display);
411                 result = TPL_ERROR_INVALID_OPERATION;
412                 goto fini;
413         }
414
415         wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
416         if (!wl_vk_display->ev_queue) {
417                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
418                                 wl_vk_display->wl_display);
419                 result = TPL_ERROR_INVALID_OPERATION;
420                 goto fini;
421         }
422
423         display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
424         if (!display_wrapper) {
425                 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
426                                 wl_vk_display->wl_display);
427                 result = TPL_ERROR_INVALID_OPERATION;
428                 goto fini;
429         }
430
431         wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
432
433         registry = wl_display_get_registry(display_wrapper);
434         if (!registry) {
435                 TPL_ERR("Failed to create wl_registry");
436                 result = TPL_ERROR_INVALID_OPERATION;
437                 goto fini;
438         }
439
440         wl_proxy_wrapper_destroy(display_wrapper);
441         display_wrapper = NULL;
442
443         wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
444         if (!wl_tbm_client) {
445                 TPL_ERR("Failed to initialize wl_tbm_client.");
446                 result = TPL_ERROR_INVALID_CONNECTION;
447                 goto fini;
448         }
449
450         wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
451         if (!wl_tbm) {
452                 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
453                 result = TPL_ERROR_INVALID_CONNECTION;
454                 goto fini;
455         }
456
457         wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
458         wl_vk_display->wl_tbm_client = wl_tbm_client;
459
460         if (wl_registry_add_listener(registry, &registry_listener,
461                                                                  wl_vk_display)) {
462                 TPL_ERR("Failed to wl_registry_add_listener");
463                 result = TPL_ERROR_INVALID_OPERATION;
464                 goto fini;
465         }
466
467         ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
468         if (ret == -1) {
469                 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
470                 result = TPL_ERROR_INVALID_OPERATION;
471                 goto fini;
472         }
473
474 #if TIZEN_FEATURE_ENABLE
475         if (wl_vk_display->explicit_sync) {
476                 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
477                                                    wl_vk_display->ev_queue);
478                 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
479                                   wl_vk_display->explicit_sync);
480         }
481 #endif
482
483         wl_vk_display->wl_initialized = TPL_TRUE;
484
485         TPL_INFO("[WAYLAND_INIT]",
486                          "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
487                          wl_vk_display, wl_vk_display->wl_display,
488                          wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
489 #if TIZEN_FEATURE_ENABLE
490         TPL_INFO("[WAYLAND_INIT]",
491                          "explicit_sync(%p)",
492                          wl_vk_display->explicit_sync);
493 #endif
494 fini:
495         if (display_wrapper)
496                 wl_proxy_wrapper_destroy(display_wrapper);
497         if (registry)
498                 wl_registry_destroy(registry);
499         if (queue)
500                 wl_event_queue_destroy(queue);
501
502         return result;
503 }
504
505 static void
506 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
507 {
508         /* If wl_vk_display is in prepared state, cancel it */
509         if (wl_vk_display->prepared) {
510                 wl_display_cancel_read(wl_vk_display->wl_display);
511                 wl_vk_display->prepared = TPL_FALSE;
512         }
513
514         if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
515                                                                                   wl_vk_display->ev_queue) == -1) {
516                 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
517         }
518
519 #if TIZEN_FEATURE_ENABLE
520         if (wl_vk_display->explicit_sync) {
521                 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
522                                  "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
523                                  wl_vk_display, wl_vk_display->explicit_sync);
524                 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
525                 wl_vk_display->explicit_sync = NULL;
526         }
527 #endif
528
529         if (wl_vk_display->wl_tbm_client) {
530                 struct wl_proxy *wl_tbm = NULL;
531
532                 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
533                                                                                 wl_vk_display->wl_tbm_client);
534                 if (wl_tbm) {
535                         wl_proxy_set_queue(wl_tbm, NULL);
536                 }
537
538                 TPL_INFO("[WL_TBM_DEINIT]",
539                                  "wl_vk_display(%p) wl_tbm_client(%p)",
540                                  wl_vk_display, wl_vk_display->wl_tbm_client);
541                 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
542                 wl_vk_display->wl_tbm_client = NULL;
543         }
544
545         wl_event_queue_destroy(wl_vk_display->ev_queue);
546
547         wl_vk_display->wl_initialized = TPL_FALSE;
548
549         TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
550                          wl_vk_display, wl_vk_display->wl_display);
551 }
552
553 static void*
554 _thread_init(void *data)
555 {
556         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
557
558         if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
559                 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
560                                 wl_vk_display, wl_vk_display->wl_display);
561         }
562
563         if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
564                 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
565         }
566
567         return wl_vk_display;
568 }
569
570 static tpl_bool_t
571 __thread_func_disp_prepare(tpl_gsource *gsource)
572 {
573         tpl_wl_vk_display_t *wl_vk_display =
574                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
575
576         /* If this wl_vk_display is already prepared,
577          * do nothing in this function. */
578         if (wl_vk_display->prepared)
579                 return TPL_FALSE;
580
581         /* If there is a last_error, there is no need to poll,
582          * so skip directly to dispatch.
583          * prepare -> dispatch */
584         if (wl_vk_display->last_error)
585                 return TPL_TRUE;
586
587         while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
588                                                                                  wl_vk_display->ev_queue) != 0) {
589                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
590                                                                                           wl_vk_display->ev_queue) == -1) {
591                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
592                 }
593         }
594
595         wl_vk_display->prepared = TPL_TRUE;
596
597         wl_display_flush(wl_vk_display->wl_display);
598
599         return TPL_FALSE;
600 }
601
602 static tpl_bool_t
603 __thread_func_disp_check(tpl_gsource *gsource)
604 {
605         tpl_wl_vk_display_t *wl_vk_display =
606                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
607         tpl_bool_t ret = TPL_FALSE;
608
609         if (!wl_vk_display->prepared)
610                 return ret;
611
612         /* If prepared, but last_error is set,
613          * cancel_read is executed and FALSE is returned.
614          * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
615          * and skipping disp_check from prepare to disp_dispatch.
616          * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
617         if (wl_vk_display->prepared && wl_vk_display->last_error) {
618                 wl_display_cancel_read(wl_vk_display->wl_display);
619                 return ret;
620         }
621
622         if (tpl_gsource_check_io_condition(gsource)) {
623                 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
624                         _wl_display_print_err(wl_vk_display, "read_event");
625                 ret = TPL_TRUE;
626         } else {
627                 wl_display_cancel_read(wl_vk_display->wl_display);
628                 ret = TPL_FALSE;
629         }
630
631         wl_vk_display->prepared = TPL_FALSE;
632
633         return ret;
634 }
635
636 static tpl_bool_t
637 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
638 {
639         tpl_wl_vk_display_t *wl_vk_display =
640                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
641
642         TPL_IGNORE(message);
643
644         /* If there is last_error, SOURCE_REMOVE should be returned
645          * to remove the gsource from the main loop.
646          * This is because wl_vk_display is not valid since last_error was set.*/
647         if (wl_vk_display->last_error) {
648                 return TPL_FALSE;
649         }
650
651         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
652         if (tpl_gsource_check_io_condition(gsource)) {
653                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
654                                                                                           wl_vk_display->ev_queue) == -1) {
655                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
656                 }
657         }
658
659         wl_display_flush(wl_vk_display->wl_display);
660         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
661
662         return TPL_TRUE;
663 }
664
665 static void
666 __thread_func_disp_finalize(tpl_gsource *gsource)
667 {
668         tpl_wl_vk_display_t *wl_vk_display =
669                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
670
671         if (wl_vk_display->wl_initialized)
672                 _thread_wl_display_fini(wl_vk_display);
673
674         TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)",
675                           wl_vk_display, gsource);
676
677         return;
678 }
679
680
681 static tpl_gsource_functions disp_funcs = {
682         .prepare  = __thread_func_disp_prepare,
683         .check    = __thread_func_disp_check,
684         .dispatch = __thread_func_disp_dispatch,
685         .finalize = __thread_func_disp_finalize,
686 };
687
688 static tpl_result_t
689 __tpl_wl_vk_display_init(tpl_display_t *display)
690 {
691         TPL_ASSERT(display);
692
693         tpl_wl_vk_display_t *wl_vk_display = NULL;
694
695         /* Do not allow default display in wayland */
696         if (!display->native_handle) {
697                 TPL_ERR("Invalid native handle for display.");
698                 return TPL_ERROR_INVALID_PARAMETER;
699         }
700
701         if (!_check_native_handle_is_wl_display(display->native_handle)) {
702                 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
703                 return TPL_ERROR_INVALID_PARAMETER;
704         }
705
706         wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
707                                                         sizeof(tpl_wl_vk_display_t));
708         if (!wl_vk_display) {
709                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
710                 return TPL_ERROR_OUT_OF_MEMORY;
711         }
712
713         display->backend.data             = wl_vk_display;
714         display->bufmgr_fd                = -1;
715
716         wl_vk_display->tdm_initialized    = TPL_FALSE;
717         wl_vk_display->wl_initialized     = TPL_FALSE;
718
719         wl_vk_display->ev_queue           = NULL;
720         wl_vk_display->wl_display         = (struct wl_display *)display->native_handle;
721         wl_vk_display->last_error         = 0;
722         wl_vk_display->use_explicit_sync  = TPL_FALSE;   // default disabled
723         wl_vk_display->prepared           = TPL_FALSE;
724
725         /* Wayland Interfaces */
726 #if TIZEN_FEATURE_ENABLE
727         wl_vk_display->explicit_sync      = NULL;
728 #endif
729         wl_vk_display->wl_tbm_client      = NULL;
730
731         /* Vulkan specific surface capabilities */
732         wl_vk_display->min_buffer         = 2;
733         wl_vk_display->max_buffer         = VK_CLIENT_QUEUE_SIZE;
734         wl_vk_display->present_modes      = TPL_DISPLAY_PRESENT_MODE_FIFO;
735
736         wl_vk_display->use_wait_vblank    = TPL_TRUE;   // default enabled
737         {
738                 char *env = tpl_getenv("TPL_WAIT_VBLANK");
739                 if (env && !atoi(env)) {
740                         wl_vk_display->use_wait_vblank = TPL_FALSE;
741                 }
742         }
743
744         tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
745
746         /* Create gthread */
747         wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
748                                                                                            (tpl_gthread_func)_thread_init,
749                                                                                            (void *)wl_vk_display);
750         if (!wl_vk_display->thread) {
751                 TPL_ERR("Failed to create wl_vk_thread");
752                 goto free_display;
753         }
754
755         wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
756                                                                                                         (void *)wl_vk_display,
757                                                                                                         wl_display_get_fd(wl_vk_display->wl_display),
758                                                                                                         &disp_funcs, SOURCE_TYPE_NORMAL);
759         if (!wl_vk_display->disp_source) {
760                 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
761                                 display->native_handle,
762                                 wl_vk_display->thread);
763                 goto free_display;
764         }
765
766         wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread,
767                                                                                                    (void *)wl_vk_display,
768                                                                                                    wl_vk_display->tdm_display_fd,
769                                                                                                    &tdm_funcs, SOURCE_TYPE_NORMAL);
770         if (!wl_vk_display->tdm_source) {
771                 TPL_ERR("Failed to create tdm_gsource\n");
772                 goto free_display;
773         }
774
775         TPL_INFO("[DISPLAY_INIT]",
776                          "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
777                          wl_vk_display,
778                          wl_vk_display->thread,
779                          wl_vk_display->wl_display);
780
781         TPL_INFO("[DISPLAY_INIT]",
782                          "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
783                          wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
784                          wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
785
786         return TPL_ERROR_NONE;
787
788 free_display:
789         if (wl_vk_display->thread) {
790                 if (wl_vk_display->tdm_source)
791                         tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
792                 if (wl_vk_display->disp_source)
793                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
794
795                 tpl_gthread_destroy(wl_vk_display->thread);
796         }
797
798         wl_vk_display->thread = NULL;
799         free(wl_vk_display);
800
801         display->backend.data = NULL;
802         return TPL_ERROR_INVALID_OPERATION;
803 }
804
805 static void
806 __tpl_wl_vk_display_fini(tpl_display_t *display)
807 {
808         tpl_wl_vk_display_t *wl_vk_display;
809
810         TPL_ASSERT(display);
811
812         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
813         if (wl_vk_display) {
814                 TPL_INFO("[DISPLAY_FINI]",
815                                  "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
816                                  wl_vk_display,
817                                  wl_vk_display->thread,
818                                  wl_vk_display->wl_display);
819
820                 if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) {
821                         tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
822                         wl_vk_display->tdm_source = NULL;
823                 }
824
825                 if (wl_vk_display->disp_source) {
826                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
827                         wl_vk_display->disp_source = NULL;
828                 }
829
830                 if (wl_vk_display->thread) {
831                         tpl_gthread_destroy(wl_vk_display->thread);
832                         wl_vk_display->thread = NULL;
833                 }
834
835                 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
836
837                 free(wl_vk_display);
838         }
839
840         display->backend.data = NULL;
841 }
842
843 static tpl_result_t
844 __tpl_wl_vk_display_query_config(tpl_display_t *display,
845                 tpl_surface_type_t surface_type,
846                 int red_size, int green_size,
847                 int blue_size, int alpha_size,
848                 int color_depth, int *native_visual_id,
849                 tpl_bool_t *is_slow)
850 {
851         TPL_ASSERT(display);
852
853         if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
854                         green_size == 8 && blue_size == 8 &&
855                         (color_depth == 32 || color_depth == 24)) {
856
857                 if (alpha_size == 8) {
858                         if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
859                         if (is_slow) *is_slow = TPL_FALSE;
860                         return TPL_ERROR_NONE;
861                 }
862                 if (alpha_size == 0) {
863                         if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
864                         if (is_slow) *is_slow = TPL_FALSE;
865                         return TPL_ERROR_NONE;
866                 }
867         }
868
869         return TPL_ERROR_INVALID_PARAMETER;
870 }
871
872 static tpl_result_t
873 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
874                                                                           int *visual_id,
875                                                                           int alpha_size)
876 {
877         TPL_IGNORE(display);
878         TPL_IGNORE(visual_id);
879         TPL_IGNORE(alpha_size);
880         return TPL_ERROR_NONE;
881 }
882
883 static tpl_result_t
884 __tpl_wl_vk_display_query_window_supported_buffer_count(
885         tpl_display_t *display,
886         tpl_handle_t window, int *min, int *max)
887 {
888         tpl_wl_vk_display_t *wl_vk_display = NULL;
889
890         TPL_ASSERT(display);
891         TPL_ASSERT(window);
892
893         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
894         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
895
896         if (min) *min = wl_vk_display->min_buffer;
897         if (max) *max = wl_vk_display->max_buffer;
898
899         return TPL_ERROR_NONE;
900 }
901
902 static tpl_result_t
903 __tpl_wl_vk_display_query_window_supported_present_modes(
904         tpl_display_t *display,
905         tpl_handle_t window, int *present_modes)
906 {
907         tpl_wl_vk_display_t *wl_vk_display = NULL;
908
909         TPL_ASSERT(display);
910         TPL_ASSERT(window);
911
912         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
913         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
914
915         if (present_modes) {
916                 *present_modes = wl_vk_display->present_modes;
917         }
918
919         return TPL_ERROR_NONE;
920 }
921
922 static void
923 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
924 {
925         tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
926         tpl_wl_vk_display_t *wl_vk_display      = wl_vk_surface->wl_vk_display;
927         tpl_wl_vk_swapchain_t *swapchain        = wl_vk_surface->swapchain;
928         tpl_wl_vk_buffer_t *wl_vk_buffer        = NULL;
929         tpl_bool_t need_to_release              = TPL_FALSE;
930         tpl_bool_t need_to_cancel               = TPL_FALSE;
931         buffer_status_t status                  = RELEASED;
932         int idx                                 = 0;
933
934         while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
935                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
936                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
937                 wl_vk_buffer = wl_vk_surface->buffers[idx];
938
939                 if (wl_vk_buffer) {
940                         wl_vk_surface->buffers[idx] = NULL;
941                         wl_vk_surface->buffer_cnt--;
942                 } else {
943                         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
944                         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
945                         idx++;
946                         continue;
947                 }
948
949                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
950
951                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
952
953                 status = wl_vk_buffer->status;
954
955                 TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
956                                   idx, wl_vk_buffer,
957                                   wl_vk_buffer->tbm_surface,
958                                   status_to_string[status]);
959
960                 if (status >= ENQUEUED) {
961                         tpl_bool_t need_to_wait  = TPL_FALSE;
962                         tpl_result_t wait_result = TPL_ERROR_NONE;
963
964                         if (!wl_vk_display->use_explicit_sync &&
965                                 status < WAITING_VBLANK)
966                                 need_to_wait = TPL_TRUE;
967
968                         if (wl_vk_display->use_explicit_sync &&
969                                 status < COMMITTED)
970                                 need_to_wait = TPL_TRUE;
971
972                         if (need_to_wait) {
973                                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
974                                 wait_result = tpl_cond_timed_wait(&wl_vk_buffer->cond,
975                                                                                                   &wl_vk_buffer->mutex,
976                                                                                                   16); /* 16ms */
977                                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
978
979                                 status = wl_vk_buffer->status;
980
981                                 if (wait_result == TPL_ERROR_TIME_OUT)
982                                         TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
983                                                          wl_vk_buffer);
984                         }
985                 }
986
987                 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
988                 /* It has been acquired but has not yet been released, so this
989                  * buffer must be released. */
990                 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
991
992                 /* After dequeue, it has not been enqueued yet
993                  * so cancel_dequeue must be performed. */
994                 need_to_cancel = (status == DEQUEUED);
995
996                 if (swapchain && swapchain->tbm_queue) {
997                         if (need_to_release) {
998                                 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
999                                                                                                         wl_vk_buffer->tbm_surface);
1000                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1001                                         TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1002                                                         wl_vk_buffer->tbm_surface, tsq_err);
1003                         }
1004
1005                         if (need_to_cancel) {
1006                                 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
1007                                                                                                                    wl_vk_buffer->tbm_surface);
1008                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1009                                         TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1010                                                         wl_vk_buffer->tbm_surface, tsq_err);
1011                         }
1012                 }
1013
1014                 wl_vk_buffer->status = RELEASED;
1015
1016                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1017
1018                 if (need_to_release || need_to_cancel)
1019                         tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
1020
1021                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1022
1023                 idx++;
1024         }
1025 }
1026
1027 static tdm_client_vblank*
1028 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1029 {
1030         tdm_client_vblank *vblank = NULL;
1031         tdm_client_output *tdm_output = NULL;
1032         tdm_error tdm_err = TDM_ERROR_NONE;
1033
1034         if (!tdm_client) {
1035                 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1036                 return NULL;
1037         }
1038
1039         tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1040         if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1041                 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1042                 return NULL;
1043         }
1044
1045         vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1046         if (!vblank || tdm_err != TDM_ERROR_NONE) {
1047                 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1048                 return NULL;
1049         }
1050
1051         tdm_client_vblank_set_enable_fake(vblank, 1);
1052         tdm_client_vblank_set_sync(vblank, 0);
1053
1054         return vblank;
1055 }
1056
1057 static void
1058 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1059 {
1060         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1061
1062         /* tbm_surface_queue will be created at swapchain_create */
1063
1064         wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1065                                                                 wl_vk_display->tdm_client);
1066         if (wl_vk_surface->vblank) {
1067                 TPL_INFO("[VBLANK_INIT]",
1068                                  "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1069                                  wl_vk_surface, wl_vk_display->tdm_client,
1070                                  wl_vk_surface->vblank);
1071         }
1072
1073 #if TIZEN_FEATURE_ENABLE
1074         if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1075                 wl_vk_surface->surface_sync =
1076                         zwp_linux_explicit_synchronization_v1_get_synchronization(
1077                                         wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1078                 if (wl_vk_surface->surface_sync) {
1079                         TPL_INFO("[EXPLICIT_SYNC_INIT]",
1080                                          "wl_vk_surface(%p) surface_sync(%p)",
1081                                          wl_vk_surface, wl_vk_surface->surface_sync);
1082                 } else {
1083                         TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1084                                          wl_vk_surface);
1085                         wl_vk_display->use_explicit_sync = TPL_FALSE;
1086                 }
1087         }
1088 #endif
1089         wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1090 }
1091
1092 static void
1093 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1094 {
1095         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1096
1097         TPL_INFO("[SURFACE_FINI]",
1098                          "wl_vk_surface(%p) wl_surface(%p)",
1099                          wl_vk_surface, wl_vk_surface->wl_surface);
1100
1101         if (wl_vk_surface->vblank_waiting_buffers) {
1102                 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1103                 wl_vk_surface->vblank_waiting_buffers = NULL;
1104         }
1105
1106 #if TIZEN_FEATURE_ENABLE
1107         if (wl_vk_surface->surface_sync) {
1108                 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1109                                  "wl_vk_surface(%p) surface_sync(%p)",
1110                                   wl_vk_surface, wl_vk_surface->surface_sync);
1111                 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1112                 wl_vk_surface->surface_sync = NULL;
1113         }
1114 #endif
1115
1116         if (wl_vk_surface->vblank) {
1117                 TPL_INFO("[VBLANK_DESTROY]",
1118                                  "wl_vk_surface(%p) vblank(%p)",
1119                                  wl_vk_surface, wl_vk_surface->vblank);
1120                 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1121                 wl_vk_surface->vblank = NULL;
1122         }
1123
1124         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1125 }
1126
1127 static tpl_bool_t
1128 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1129 {
1130         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1131
1132         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1133
1134         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1135         if (message == INIT_SURFACE) { /* Initialize surface */
1136                 TPL_DEBUG("wl_vk_surface(%p) initialize message received!",
1137                                   wl_vk_surface);
1138                 _thread_wl_vk_surface_init(wl_vk_surface);
1139                 tpl_gcond_signal(&wl_vk_surface->surf_cond);    
1140         } else if (message == CREATE_QUEUE) { /* Create tbm_surface_queue */
1141                 TPL_DEBUG("wl_vk_surface(%p) queue creation message received!",
1142                                   wl_vk_surface);
1143                 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1144                         != TPL_ERROR_NONE) {
1145                         TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1146                                         wl_vk_surface);
1147                 }
1148                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1149         } else if (message == DESTROY_QUEUE) { /* swapchain destroy */
1150                 TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!",
1151                                   wl_vk_surface);
1152                 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1153                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1154         } else if (message == ACQUIRABLE) { /* Acquirable message */
1155                 TPL_DEBUG("wl_vk_surface(%p) acquirable message received!",
1156                                   wl_vk_surface);
1157                 if (_thread_surface_queue_acquire(wl_vk_surface)
1158                         != TPL_ERROR_NONE) {
1159                         TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1160                                         wl_vk_surface);
1161                 }
1162         }
1163
1164         /* init to NONE_MESSAGE */
1165         wl_vk_surface->sent_message = NONE_MESSAGE;
1166
1167         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1168
1169         return TPL_TRUE;
1170 }
1171
1172 static void
1173 __thread_func_surf_finalize(tpl_gsource *gsource)
1174 {
1175         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1176
1177         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1178         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1179
1180         _thread_wl_vk_surface_fini(wl_vk_surface);
1181
1182         TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)",
1183                           wl_vk_surface, gsource);
1184 }
1185
1186 static tpl_gsource_functions surf_funcs = {
1187         .prepare = NULL,
1188         .check = NULL,
1189         .dispatch = __thread_func_surf_dispatch,
1190         .finalize = __thread_func_surf_finalize,
1191 };
1192
1193
1194 static tpl_result_t
1195 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1196 {
1197         tpl_wl_vk_surface_t *wl_vk_surface      = NULL;
1198         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1199         tpl_gsource *surf_source                = NULL;
1200
1201         TPL_ASSERT(surface);
1202         TPL_ASSERT(surface->display);
1203         TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1204         TPL_ASSERT(surface->native_handle);
1205
1206         wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1207         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1208
1209         wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1210                                                          sizeof(tpl_wl_vk_surface_t));
1211         if (!wl_vk_surface) {
1212                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1213                 return TPL_ERROR_OUT_OF_MEMORY;
1214         }
1215
1216         surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1217                                                                          -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1218         if (!surf_source) {
1219                 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1220                                 wl_vk_surface);
1221                 free(wl_vk_surface);
1222                 surface->backend.data = NULL;
1223                 return TPL_ERROR_INVALID_OPERATION;
1224         }
1225
1226         surface->backend.data                  = (void *)wl_vk_surface;
1227         surface->width                                 = -1;
1228         surface->height                        = -1;
1229
1230         wl_vk_surface->surf_source             = surf_source;
1231         wl_vk_surface->swapchain               = NULL;
1232
1233         wl_vk_surface->wl_vk_display           = wl_vk_display;
1234         wl_vk_surface->wl_surface              = (struct wl_surface *)surface->native_handle;
1235
1236         wl_vk_surface->reset                   = TPL_FALSE;
1237         wl_vk_surface->is_activated            = TPL_FALSE;
1238         wl_vk_surface->vblank_done             = TPL_TRUE;
1239
1240         wl_vk_surface->render_done_cnt         = 0;
1241
1242         wl_vk_surface->vblank                  = NULL;
1243 #if TIZEN_FEATURE_ENABLE
1244         wl_vk_surface->surface_sync            = NULL;
1245 #endif
1246
1247         wl_vk_surface->sent_message            = NONE_MESSAGE;
1248
1249         wl_vk_surface->post_interval           = surface->post_interval;
1250
1251         {
1252                 int i = 0;
1253                 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1254                         wl_vk_surface->buffers[i]     = NULL;
1255                 wl_vk_surface->buffer_cnt         = 0;
1256         }
1257
1258         tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1259         tpl_gcond_init(&wl_vk_surface->surf_cond);
1260
1261         tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1262
1263         /* Initialize in thread */
1264         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1265         wl_vk_surface->sent_message = INIT_SURFACE;
1266         tpl_gsource_send_message(wl_vk_surface->surf_source,
1267                                                          wl_vk_surface->sent_message);
1268         tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1269         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1270
1271         TPL_INFO("[SURFACE_INIT]",
1272                           "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1273                           surface, wl_vk_surface, wl_vk_surface->surf_source);
1274
1275         return TPL_ERROR_NONE;
1276 }
1277
1278 static void
1279 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1280 {
1281         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1282         tpl_wl_vk_display_t *wl_vk_display = NULL;
1283
1284         TPL_ASSERT(surface);
1285         TPL_ASSERT(surface->display);
1286
1287         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1288         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1289
1290         wl_vk_display = (tpl_wl_vk_display_t *)
1291                                                          surface->display->backend.data;
1292         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1293
1294         TPL_INFO("[SURFACE_FINI][BEGIN]",
1295                          "wl_vk_surface(%p) wl_surface(%p)",
1296                          wl_vk_surface, wl_vk_surface->wl_surface);
1297
1298         if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1299                 /* finalize swapchain */
1300
1301         }
1302
1303         wl_vk_surface->swapchain        = NULL;
1304
1305         if (wl_vk_surface->surf_source)
1306                 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1307         wl_vk_surface->surf_source      = NULL;
1308
1309         _print_buffer_lists(wl_vk_surface);
1310
1311         wl_vk_surface->wl_surface       = NULL;
1312         wl_vk_surface->wl_vk_display    = NULL;
1313         wl_vk_surface->tpl_surface      = NULL;
1314
1315         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1316         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1317         tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1318         tpl_gcond_clear(&wl_vk_surface->surf_cond);
1319
1320         TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1321
1322         free(wl_vk_surface);
1323         surface->backend.data = NULL;
1324 }
1325
1326 static tpl_result_t
1327 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1328                                                                                   int post_interval)
1329 {
1330         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1331
1332         TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1333
1334         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1335
1336         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1337
1338         TPL_INFO("[SET_POST_INTERVAL]",
1339                          "wl_vk_surface(%p) post_interval(%d -> %d)",
1340                          wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1341
1342         wl_vk_surface->post_interval = post_interval;
1343
1344         return TPL_ERROR_NONE;
1345 }
1346
1347 static tpl_bool_t
1348 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1349 {
1350         TPL_ASSERT(surface);
1351         TPL_ASSERT(surface->backend.data);
1352
1353         tpl_wl_vk_surface_t *wl_vk_surface =
1354                 (tpl_wl_vk_surface_t *)surface->backend.data;
1355
1356         return !(wl_vk_surface->reset);
1357 }
1358
1359 static void
1360 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1361                                                           void *data)
1362 {
1363         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1364         tpl_wl_vk_display_t *wl_vk_display = NULL;
1365         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1366         tpl_surface_t *surface             = NULL;
1367         tpl_bool_t is_activated            = TPL_FALSE;
1368         int width, height;
1369
1370         wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1371         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1372
1373         wl_vk_display = wl_vk_surface->wl_vk_display;
1374         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1375
1376         surface = wl_vk_surface->tpl_surface;
1377         TPL_CHECK_ON_NULL_RETURN(surface);
1378
1379         swapchain = wl_vk_surface->swapchain;
1380         TPL_CHECK_ON_NULL_RETURN(swapchain);
1381
1382         /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1383          * the changed window size at the next frame. */
1384         width = tbm_surface_queue_get_width(tbm_queue);
1385         height = tbm_surface_queue_get_height(tbm_queue);
1386         if (surface->width != width || surface->height != height) {
1387                 TPL_INFO("[QUEUE_RESIZE]",
1388                                  "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1389                                  wl_vk_surface, tbm_queue,
1390                                  surface->width, surface->height, width, height);
1391         }
1392
1393         /* When queue_reset_callback is called, if is_activated is different from
1394          * its previous state change the reset flag to TPL_TRUE to get a new buffer
1395          * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1396         is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1397                                                                                                                    swapchain->tbm_queue);
1398         if (wl_vk_surface->is_activated != is_activated) {
1399                 if (is_activated) {
1400                         TPL_INFO("[ACTIVATED]",
1401                                           "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1402                                           wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1403                 } else {
1404                         TPL_LOG_T("[DEACTIVATED]",
1405                                           " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1406                                           wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1407                 }
1408         }
1409
1410         wl_vk_surface->reset = TPL_TRUE;
1411
1412         if (surface->reset_cb)
1413                 surface->reset_cb(surface->reset_data);
1414 }
1415
1416 static void
1417 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1418                                                                    void *data)
1419 {
1420         TPL_IGNORE(tbm_queue);
1421
1422         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1423         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1424
1425         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1426         if (wl_vk_surface->sent_message == NONE_MESSAGE) {
1427                 wl_vk_surface->sent_message = ACQUIRABLE;
1428                 tpl_gsource_send_message(wl_vk_surface->surf_source,
1429                                                                  wl_vk_surface->sent_message);
1430         }
1431         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1432 }
1433
1434 static tpl_result_t
1435 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1436 {
1437         TPL_ASSERT (wl_vk_surface);
1438
1439         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1440         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1441         tbm_surface_queue_h tbm_queue      = NULL;
1442         tbm_bufmgr bufmgr = NULL;
1443         unsigned int capability;
1444
1445         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1446         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1447
1448         if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1449                 TPL_ERR("buffer count(%d) must be higher than (%d)",
1450                                 swapchain->properties.buffer_count,
1451                                 wl_vk_display->min_buffer);
1452                 return TPL_ERROR_INVALID_PARAMETER;
1453         }
1454
1455         if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1456                 TPL_ERR("buffer count(%d) must be lower than (%d)",
1457                                 swapchain->properties.buffer_count,
1458                                 wl_vk_display->max_buffer);
1459                 return TPL_ERROR_INVALID_PARAMETER;
1460         }
1461
1462         if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1463                 TPL_ERR("Unsupported present_mode(%d)",
1464                                 swapchain->properties.present_mode);
1465                 return TPL_ERROR_INVALID_PARAMETER;
1466         }
1467
1468         if (swapchain->tbm_queue) {
1469                 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1470                 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1471
1472                 if (swapchain->swapchain_buffers) {
1473                         int i;
1474                         for (i = 0; i < swapchain->properties.buffer_count; i++) {
1475                                 if (swapchain->swapchain_buffers[i]) {
1476                                         TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]);
1477                                         tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1478                                         swapchain->swapchain_buffers[i] = NULL;
1479                                 }
1480                         }
1481
1482                         free(swapchain->swapchain_buffers);
1483                         swapchain->swapchain_buffers = NULL;
1484                 }
1485
1486                 if (old_width != swapchain->properties.width ||
1487                         old_height != swapchain->properties.height) {
1488                         tbm_surface_queue_reset(swapchain->tbm_queue,
1489                                                                         swapchain->properties.width,
1490                                                                         swapchain->properties.height,
1491                                                                         swapchain->properties.format);
1492                         TPL_INFO("[RESIZE]",
1493                                          "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1494                                          wl_vk_surface, swapchain, swapchain->tbm_queue,
1495                                          old_width, old_height,
1496                                          swapchain->properties.width,
1497                                          swapchain->properties.height);
1498                 }
1499
1500                 swapchain->properties.buffer_count =
1501                         tbm_surface_queue_get_size(swapchain->tbm_queue);
1502
1503                 wl_vk_surface->reset = TPL_FALSE;
1504
1505                 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1506
1507                 TPL_INFO("[SWAPCHAIN_REUSE]",
1508                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1509                                  wl_vk_surface, swapchain, swapchain->tbm_queue,
1510                                  swapchain->properties.buffer_count);
1511
1512                 return TPL_ERROR_NONE;
1513         }
1514
1515         bufmgr = tbm_bufmgr_init(-1);
1516         capability = tbm_bufmgr_get_capability(bufmgr);
1517         tbm_bufmgr_deinit(bufmgr);
1518
1519         if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1520                 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1521                                                                         wl_vk_display->wl_tbm_client,
1522                                                                         wl_vk_surface->wl_surface,
1523                                                                         swapchain->properties.buffer_count,
1524                                                                         swapchain->properties.width,
1525                                                                         swapchain->properties.height,
1526                                                                         TBM_FORMAT_ARGB8888);
1527         } else {
1528                 tbm_queue = wayland_tbm_client_create_surface_queue(
1529                                                                         wl_vk_display->wl_tbm_client,
1530                                                                         wl_vk_surface->wl_surface,
1531                                                                         swapchain->properties.buffer_count,
1532                                                                         swapchain->properties.width,
1533                                                                         swapchain->properties.height,
1534                                                                         TBM_FORMAT_ARGB8888);
1535         }
1536
1537         if (!tbm_queue) {
1538                 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1539                                 wl_vk_surface);
1540                 return TPL_ERROR_OUT_OF_MEMORY;
1541         }
1542
1543         if (tbm_surface_queue_set_modes(
1544                         tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1545                         TBM_SURFACE_QUEUE_ERROR_NONE) {
1546                 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1547                                 tbm_queue);
1548                 tbm_surface_queue_destroy(tbm_queue);
1549                 return TPL_ERROR_INVALID_OPERATION;
1550         }
1551
1552         if (tbm_surface_queue_add_reset_cb(
1553                         tbm_queue,
1554                         __cb_tbm_queue_reset_callback,
1555                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1556                 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1557                                 tbm_queue);
1558                 tbm_surface_queue_destroy(tbm_queue);
1559                 return TPL_ERROR_INVALID_OPERATION;
1560         }
1561
1562         if (tbm_surface_queue_add_acquirable_cb(
1563                         tbm_queue,
1564                         __cb_tbm_queue_acquirable_callback,
1565                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1566                 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1567                                 tbm_queue);
1568                 tbm_surface_queue_destroy(tbm_queue);
1569                 return TPL_ERROR_INVALID_OPERATION;
1570         }
1571
1572         swapchain->tbm_queue = tbm_queue;
1573
1574         TPL_INFO("[TBM_QUEUE_CREATED]",
1575                          "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)",
1576                          wl_vk_surface, swapchain, tbm_queue);
1577
1578         return TPL_ERROR_NONE;
1579 }
1580
1581 static tpl_result_t
1582 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1583                 tbm_format format, int width,
1584                 int height, int buffer_count, int present_mode)
1585 {
1586         tpl_wl_vk_surface_t *wl_vk_surface              = NULL;
1587         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1588         tpl_wl_vk_swapchain_t *swapchain  = NULL;
1589
1590         TPL_ASSERT(surface);
1591         TPL_ASSERT(surface->display);
1592
1593         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1594         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1595
1596         wl_vk_display = (tpl_wl_vk_display_t *)
1597                                                          surface->display->backend.data;
1598         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1599
1600         swapchain = wl_vk_surface->swapchain;
1601
1602         if (swapchain == NULL) {
1603                 swapchain =
1604                         (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1605                         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1606                 swapchain->tbm_queue           = NULL;
1607         }
1608
1609         swapchain->properties.buffer_count = buffer_count;
1610         swapchain->properties.width        = width;
1611         swapchain->properties.height       = height;
1612         swapchain->properties.present_mode = present_mode;
1613         swapchain->wl_vk_surface           = wl_vk_surface;
1614
1615         wl_vk_surface->swapchain           = swapchain;
1616
1617         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1618         /* send swapchain create tbm_queue message */
1619         wl_vk_surface->sent_message = CREATE_QUEUE;
1620         tpl_gsource_send_message(wl_vk_surface->surf_source,
1621                                                          wl_vk_surface->sent_message);
1622         tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1623         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1624
1625         TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1626                 swapchain->tbm_queue != NULL,
1627                 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1628
1629         wl_vk_surface->reset = TPL_FALSE;
1630
1631         __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1632
1633         return TPL_ERROR_NONE;
1634 }
1635
1636 static void
1637 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1638 {
1639         TPL_ASSERT(wl_vk_surface);
1640
1641         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1642
1643         TPL_CHECK_ON_NULL_RETURN(swapchain);
1644
1645         if (swapchain->tbm_queue) {
1646                 TPL_INFO("[TBM_QUEUE_DESTROY]",
1647                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1648                                  wl_vk_surface, swapchain, swapchain->tbm_queue);
1649                 tbm_surface_queue_destroy(swapchain->tbm_queue);
1650                 swapchain->tbm_queue = NULL;
1651         }
1652 }
1653
1654 static tpl_result_t
1655 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1656 {
1657         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1658         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1659         tpl_wl_vk_display_t *wl_vk_display = NULL;
1660
1661         TPL_ASSERT(surface);
1662         TPL_ASSERT(surface->display);
1663
1664         wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1665         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1666
1667         wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1668         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1669
1670         swapchain = wl_vk_surface->swapchain;
1671         if (!swapchain) {
1672                 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1673                                 wl_vk_surface);
1674                 return TPL_ERROR_INVALID_OPERATION;
1675         }
1676
1677         if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1678                 TPL_INFO("[DESTROY_SWAPCHAIN]",
1679                                  "wl_vk_surface(%p) swapchain(%p) still valid.",
1680                                  wl_vk_surface, swapchain);
1681                 return TPL_ERROR_NONE;
1682         }
1683
1684         TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1685                          "wl_vk_surface(%p) swapchain(%p)",
1686                          wl_vk_surface, wl_vk_surface->swapchain);
1687
1688         if (swapchain->swapchain_buffers) {
1689                 for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1690                         if (swapchain->swapchain_buffers[i]) {
1691                                 TPL_DEBUG("Stop tracking tbm_surface(%p)",
1692                                                   swapchain->swapchain_buffers[i]);
1693                                 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1694                                 swapchain->swapchain_buffers[i] = NULL;
1695                         }
1696                 }
1697
1698                 free(swapchain->swapchain_buffers);
1699                 swapchain->swapchain_buffers = NULL;
1700         }
1701
1702         _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1703
1704         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1705         wl_vk_surface->sent_message = DESTROY_QUEUE;
1706         tpl_gsource_send_message(wl_vk_surface->surf_source,
1707                                                          wl_vk_surface->sent_message);
1708         tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1709         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1710
1711         _print_buffer_lists(wl_vk_surface);
1712
1713         free(swapchain);
1714         wl_vk_surface->swapchain = NULL;
1715
1716         return TPL_ERROR_NONE;
1717 }
1718
1719 static tpl_result_t
1720 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1721                                                                                           tbm_surface_h **buffers,
1722                                                                                           int *buffer_count)
1723 {
1724         TPL_ASSERT(surface);
1725         TPL_ASSERT(surface->backend.data);
1726         TPL_ASSERT(surface->display);
1727         TPL_ASSERT(surface->display->backend.data);
1728
1729         tpl_wl_vk_surface_t *wl_vk_surface =
1730                 (tpl_wl_vk_surface_t *)surface->backend.data;
1731         tpl_wl_vk_display_t *wl_vk_display =
1732                 (tpl_wl_vk_display_t *)surface->display->backend.data;
1733         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1734         tpl_result_t ret                   = TPL_ERROR_NONE;
1735         int i;
1736
1737         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1738         TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1739
1740         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1741
1742         if (!buffers) {
1743                 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1744                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1745                 return TPL_ERROR_NONE;
1746         }
1747
1748         swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1749                                                                                 *buffer_count,
1750                                                                                 sizeof(tbm_surface_h));
1751         if (!swapchain->swapchain_buffers) {
1752                 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1753                                 *buffer_count);
1754                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1755                 return TPL_ERROR_OUT_OF_MEMORY;
1756         }
1757
1758         ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1759                                                                                                 swapchain->tbm_queue,
1760                                                                                                 swapchain->swapchain_buffers,
1761                                                                                                 buffer_count);
1762         if (!ret) {
1763                 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1764                                 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1765                 free(swapchain->swapchain_buffers);
1766                 swapchain->swapchain_buffers = NULL;
1767                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1768                 return TPL_ERROR_INVALID_OPERATION;
1769         }
1770
1771         for (i = 0; i < *buffer_count; i++) {
1772                 if (swapchain->swapchain_buffers[i]) {
1773                         TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)",
1774                                           i, swapchain->swapchain_buffers[i],
1775                                           _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1776                         tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1777                 }
1778         }
1779
1780         *buffers = swapchain->swapchain_buffers;
1781
1782         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1783
1784         return TPL_ERROR_NONE;
1785 }
1786
1787 static void
1788 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1789 {
1790         tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1791         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1792
1793         TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1794                          wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1795
1796         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1797         if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1798                 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1799                 wl_vk_surface->buffer_cnt--;
1800
1801                 wl_vk_buffer->idx = -1;
1802         }
1803         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1804
1805         wl_display_flush(wl_vk_display->wl_display);
1806
1807         if (wl_vk_buffer->wl_buffer) {
1808                 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1809                                                                                   wl_vk_buffer->wl_buffer);
1810                 wl_vk_buffer->wl_buffer = NULL;
1811         }
1812
1813 #if TIZEN_FEATURE_ENABLE
1814         if (wl_vk_buffer->buffer_release) {
1815                 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1816                 wl_vk_buffer->buffer_release = NULL;
1817         }
1818 #endif
1819
1820         if (wl_vk_buffer->release_fence_fd != -1) {
1821                 close(wl_vk_buffer->release_fence_fd);
1822                 wl_vk_buffer->release_fence_fd = -1;
1823         }
1824
1825         if (wl_vk_buffer->rects) {
1826                 free(wl_vk_buffer->rects);
1827                 wl_vk_buffer->rects = NULL;
1828                 wl_vk_buffer->num_rects = 0;
1829         }
1830
1831         wl_vk_buffer->tbm_surface = NULL;
1832         wl_vk_buffer->bo_name = -1;
1833
1834         free(wl_vk_buffer);
1835 }
1836
1837 static tpl_wl_vk_buffer_t *
1838 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1839 {
1840         tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1841         tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1842                                                                            (void **)&wl_vk_buffer);
1843         return wl_vk_buffer;
1844 }
1845
1846 static tpl_wl_vk_buffer_t *
1847 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
1848                                           tbm_surface_h tbm_surface)
1849 {
1850         tpl_wl_vk_buffer_t  *wl_vk_buffer  = NULL;
1851
1852         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
1853
1854         if (!wl_vk_buffer) {
1855                 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
1856                 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
1857
1858                 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1859                                                                                    (tbm_data_free)__cb_wl_vk_buffer_free);
1860                 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1861                                                                                    wl_vk_buffer);
1862
1863                 wl_vk_buffer->wl_buffer                = NULL;
1864                 wl_vk_buffer->tbm_surface              = tbm_surface;
1865                 wl_vk_buffer->bo_name                  = _get_tbm_surface_bo_name(tbm_surface);
1866                 wl_vk_buffer->wl_vk_surface            = wl_vk_surface;
1867
1868                 wl_vk_buffer->status                   = RELEASED;
1869
1870                 wl_vk_buffer->acquire_fence_fd         = -1;
1871                 wl_vk_buffer->release_fence_fd         = -1;
1872
1873                 wl_vk_buffer->dx                       = 0;
1874                 wl_vk_buffer->dy                       = 0;
1875                 wl_vk_buffer->width                    = tbm_surface_get_width(tbm_surface);
1876                 wl_vk_buffer->height                   = tbm_surface_get_height(tbm_surface);
1877
1878                 wl_vk_buffer->rects                    = NULL;
1879                 wl_vk_buffer->num_rects                = 0;
1880
1881                 wl_vk_buffer->need_to_commit = TPL_FALSE;
1882 #if TIZEN_FEATURE_ENABLE
1883                 wl_vk_buffer->buffer_release = NULL;
1884 #endif
1885                 tpl_gmutex_init(&wl_vk_buffer->mutex);
1886                 tpl_gcond_init(&wl_vk_buffer->cond);
1887
1888                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1889                 {
1890                         int i;
1891                         for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1892                                 if (wl_vk_surface->buffers[i] == NULL) break;
1893
1894                         /* If this exception is reached,
1895                          * it may be a critical memory leak problem. */
1896                         if (i == BUFFER_ARRAY_SIZE) {
1897                                 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
1898                                 int evicted_idx = 0; /* evict the frontmost buffer */
1899
1900                                 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
1901
1902                                 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
1903                                                  wl_vk_surface);
1904                                 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
1905                                                  evicted_buffer, evicted_buffer->tbm_surface,
1906                                                  status_to_string[evicted_buffer->status]);
1907
1908                                 /* [TODO] need to think about whether there will be
1909                                  * better modifications */
1910                                 wl_vk_surface->buffer_cnt--;
1911                                 wl_vk_surface->buffers[evicted_idx]      = NULL;
1912
1913                                 i = evicted_idx;
1914                         }
1915
1916                         wl_vk_surface->buffer_cnt++;
1917                         wl_vk_surface->buffers[i]          = wl_vk_buffer;
1918                         wl_vk_buffer->idx                  = i;
1919                 }
1920                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1921
1922                 TPL_INFO("[WL_VK_BUFFER_CREATE]",
1923                                  "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
1924                                  wl_vk_surface, wl_vk_buffer, tbm_surface,
1925                                  wl_vk_buffer->bo_name);
1926         }
1927
1928         return wl_vk_buffer;
1929 }
1930
1931 static tbm_surface_h
1932 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
1933                                                                    uint64_t timeout_ns,
1934                                                                    int32_t *release_fence)
1935 {
1936         TPL_ASSERT(surface);
1937         TPL_ASSERT(surface->backend.data);
1938         TPL_ASSERT(surface->display);
1939         TPL_ASSERT(surface->display->backend.data);
1940         TPL_OBJECT_CHECK_RETURN(surface, NULL);
1941
1942         tpl_wl_vk_surface_t *wl_vk_surface =
1943                 (tpl_wl_vk_surface_t *)surface->backend.data;
1944         tpl_wl_vk_display_t *wl_vk_display =
1945                 (tpl_wl_vk_display_t *)surface->display->backend.data;
1946         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1947         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
1948
1949         tbm_surface_h tbm_surface          = NULL;
1950         tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_NONE;
1951
1952         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
1953         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
1954
1955         TPL_OBJECT_UNLOCK(surface);
1956         TRACE_BEGIN("WAIT_DEQUEUEABLE");
1957         if (timeout_ns != UINT64_MAX) {
1958                 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
1959                                                 swapchain->tbm_queue, timeout_ns/1000);
1960         } else {
1961                 tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
1962         }
1963         TRACE_END();
1964         TPL_OBJECT_LOCK(surface);
1965
1966         if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
1967                 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
1968                                 timeout_ns);
1969                 return NULL;
1970         } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1971                 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
1972                                 wl_vk_surface, swapchain->tbm_queue, tsq_err);
1973                 return NULL;
1974         }
1975
1976         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1977
1978         if (wl_vk_surface->reset) {
1979                 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
1980                                   swapchain, swapchain->tbm_queue);
1981                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1982                 return NULL;
1983         }
1984
1985         tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
1986                                                                                 &tbm_surface);
1987         if (!tbm_surface) {
1988                 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
1989                                 swapchain->tbm_queue, wl_vk_surface, tsq_err);
1990                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1991                 return NULL;
1992         }
1993
1994         tbm_surface_internal_ref(tbm_surface);
1995
1996         wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
1997         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
1998
1999         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2000         wl_vk_buffer->status = DEQUEUED;
2001
2002         if (release_fence) {
2003 #if TIZEN_FEATURE_ENABLE
2004                 if (wl_vk_surface->surface_sync) {
2005                         *release_fence = wl_vk_buffer->release_fence_fd;
2006                         TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
2007                                           wl_vk_surface, wl_vk_buffer, *release_fence);
2008                         wl_vk_buffer->release_fence_fd = -1;
2009                 } else
2010 #endif
2011                 {
2012                         *release_fence = -1;
2013                 }
2014         }
2015
2016         wl_vk_surface->reset = TPL_FALSE;
2017
2018         TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2019                           wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
2020                           release_fence ? *release_fence : -1);
2021
2022         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2023         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2024
2025         return tbm_surface;
2026 }
2027
2028 static tpl_result_t
2029 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
2030                                                                           tbm_surface_h tbm_surface)
2031 {
2032         TPL_ASSERT(surface);
2033         TPL_ASSERT(surface->backend.data);
2034
2035         tpl_wl_vk_surface_t *wl_vk_surface  =
2036                 (tpl_wl_vk_surface_t *)surface->backend.data;
2037         tpl_wl_vk_swapchain_t *swapchain    = NULL;
2038         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2039         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2040
2041         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2042                                                                   TPL_ERROR_INVALID_PARAMETER);
2043
2044         swapchain = wl_vk_surface->swapchain;
2045         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2046         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2047                                                                  TPL_ERROR_INVALID_PARAMETER);
2048
2049         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2050         if (wl_vk_buffer) {
2051                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2052                 wl_vk_buffer->status = RELEASED;
2053                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2054         }
2055
2056         tbm_surface_internal_unref(tbm_surface);
2057
2058         TPL_INFO("[CANCEL BUFFER]",
2059                          "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2060                           wl_vk_surface, swapchain, tbm_surface,
2061                           _get_tbm_surface_bo_name(tbm_surface));
2062
2063         tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2064                                                                                            tbm_surface);
2065         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2066                 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2067                 return TPL_ERROR_INVALID_OPERATION;
2068         }
2069
2070         return TPL_ERROR_NONE;
2071 }
2072
2073 static tpl_result_t
2074 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2075                                                                            tbm_surface_h tbm_surface,
2076                                                                            int num_rects, const int *rects,
2077                                                                            int32_t acquire_fence)
2078 {
2079         TPL_ASSERT(surface);
2080         TPL_ASSERT(surface->display);
2081         TPL_ASSERT(surface->backend.data);
2082         TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2083
2084         tpl_wl_vk_surface_t *wl_vk_surface  =
2085                 (tpl_wl_vk_surface_t *) surface->backend.data;
2086         tpl_wl_vk_swapchain_t *swapchain    = wl_vk_surface->swapchain;
2087         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2088         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2089         int bo_name                         = -1;
2090
2091         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2092         TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2093         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2094                                                                   TPL_ERROR_INVALID_PARAMETER);
2095
2096         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2097         if (!wl_vk_buffer) {
2098                 TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
2099                 return TPL_ERROR_INVALID_PARAMETER;
2100         }
2101
2102         bo_name = wl_vk_buffer->bo_name;
2103
2104         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2105
2106         /* If there are received region information, save it to wl_vk_buffer */
2107         if (num_rects && rects) {
2108                 if (wl_vk_buffer->rects != NULL) {
2109                         free(wl_vk_buffer->rects);
2110                         wl_vk_buffer->rects = NULL;
2111                         wl_vk_buffer->num_rects = 0;
2112                 }
2113
2114                 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2115                 wl_vk_buffer->num_rects = num_rects;
2116
2117                 if (wl_vk_buffer->rects) {
2118                         memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2119                                    sizeof(int) * 4 * num_rects);
2120                 } else {
2121                         TPL_ERR("Failed to allocate memory for rects info.");
2122                 }
2123         }
2124
2125         if (wl_vk_buffer->acquire_fence_fd != -1)
2126                 close(wl_vk_buffer->acquire_fence_fd);
2127
2128         wl_vk_buffer->acquire_fence_fd = acquire_fence;
2129
2130         wl_vk_buffer->status = ENQUEUED;
2131         TPL_LOG_T("WL_VK",
2132                           "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2133                           wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2134
2135         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2136
2137         tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2138                                                                                 tbm_surface);
2139         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2140                 tbm_surface_internal_unref(tbm_surface);
2141                 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2142                                 tbm_surface, wl_vk_surface, tsq_err);
2143                 return TPL_ERROR_INVALID_OPERATION;
2144         }
2145
2146         tbm_surface_internal_unref(tbm_surface);
2147
2148         return TPL_ERROR_NONE;
2149 }
2150
2151 static const struct wl_buffer_listener wl_buffer_release_listener = {
2152         (void *)__cb_wl_buffer_release,
2153 };
2154
2155 static tpl_result_t
2156 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2157 {
2158         tbm_surface_h tbm_surface            = NULL;
2159         tbm_surface_queue_error_e tsq_err    = TBM_SURFACE_QUEUE_ERROR_NONE;
2160         tpl_wl_vk_display_t *wl_vk_display   = wl_vk_surface->wl_vk_display;
2161         tpl_wl_vk_swapchain_t *swapchain     = wl_vk_surface->swapchain;
2162         tpl_wl_vk_buffer_t *wl_vk_buffer     = NULL;
2163         tpl_bool_t ready_to_commit           = TPL_TRUE;
2164
2165         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2166
2167         while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2168                 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2169                                                                                         &tbm_surface);
2170                 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2171                         TPL_ERR("Failed to acquire from tbm_queue(%p)",
2172                                         swapchain->tbm_queue);
2173                         return TPL_ERROR_INVALID_OPERATION;
2174                 }
2175
2176                 tbm_surface_internal_ref(tbm_surface);
2177
2178                 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2179                 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2180                                                                            "wl_vk_buffer sould be not NULL");
2181
2182                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2183
2184                 wl_vk_buffer->status = ACQUIRED;
2185
2186                 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2187                                   wl_vk_buffer, tbm_surface,
2188                                   _get_tbm_surface_bo_name(tbm_surface));
2189
2190                 if (wl_vk_buffer->wl_buffer == NULL) {
2191                         wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2192                                                 wl_vk_display->wl_tbm_client, tbm_surface);
2193
2194                         if (!wl_vk_buffer->wl_buffer) {
2195                                 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2196                                                  wl_vk_display->wl_tbm_client, tbm_surface);
2197                         } else {
2198                                 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2199                                         wl_vk_display->use_explicit_sync == TPL_FALSE) {
2200                                         wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2201                                                                                    &wl_buffer_release_listener, wl_vk_buffer);
2202                                 }
2203
2204                                 TPL_LOG_T("WL_VK",
2205                                                   "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2206                                                   wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2207                         }
2208                 }
2209
2210                 if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done)
2211                         ready_to_commit = TPL_TRUE;
2212                 else {
2213                         wl_vk_buffer->status = WAITING_VBLANK;
2214                         __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2215                         ready_to_commit = TPL_FALSE;
2216                 }
2217
2218                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2219
2220                 if (ready_to_commit)
2221                         _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2222         }
2223
2224         return TPL_ERROR_NONE;
2225 }
2226
2227 #if TIZEN_FEATURE_ENABLE
2228 static void
2229 __cb_buffer_fenced_release(void *data,
2230                                                    struct zwp_linux_buffer_release_v1 *release,
2231                                                    int32_t fence)
2232 {
2233         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2234         tbm_surface_h tbm_surface         = NULL;
2235
2236         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2237
2238         tbm_surface = wl_vk_buffer->tbm_surface;
2239
2240         if (tbm_surface_internal_is_valid(tbm_surface)) {
2241                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2242                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2243
2244                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2245                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2246                         tbm_surface_internal_unref(tbm_surface);
2247                         return;
2248                 }
2249
2250                 swapchain = wl_vk_surface->swapchain;
2251
2252                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2253                 if (wl_vk_buffer->status == COMMITTED) {
2254                         tbm_surface_queue_error_e tsq_err;
2255
2256                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2257                         wl_vk_buffer->buffer_release = NULL;
2258
2259                         wl_vk_buffer->release_fence_fd = fence;
2260                         wl_vk_buffer->status = RELEASED;
2261
2262                         TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2263                                            wl_vk_buffer->bo_name,
2264                                            fence);
2265                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2266                                                         wl_vk_buffer->bo_name);
2267
2268                         TPL_LOG_T("WL_VK",
2269                                           "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2270                                           wl_vk_buffer, tbm_surface,
2271                                           wl_vk_buffer->bo_name,
2272                                           fence);
2273
2274                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2275                                                                                                 tbm_surface);
2276                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2277                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2278
2279                         tbm_surface_internal_unref(tbm_surface);
2280                 }
2281
2282                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2283
2284         } else {
2285                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2286         }
2287 }
2288
2289 static void
2290 __cb_buffer_immediate_release(void *data,
2291                                                           struct zwp_linux_buffer_release_v1 *release)
2292 {
2293         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2294         tbm_surface_h tbm_surface           = NULL;
2295
2296         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2297
2298         tbm_surface = wl_vk_buffer->tbm_surface;
2299
2300         if (tbm_surface_internal_is_valid(tbm_surface)) {
2301                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2302                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2303
2304                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2305                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2306                         tbm_surface_internal_unref(tbm_surface);
2307                         return;
2308                 }
2309
2310                 swapchain = wl_vk_surface->swapchain;
2311
2312                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2313                 if (wl_vk_buffer->status == COMMITTED) {
2314                         tbm_surface_queue_error_e tsq_err;
2315
2316                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2317                         wl_vk_buffer->buffer_release = NULL;
2318
2319                         wl_vk_buffer->release_fence_fd = -1;
2320                         wl_vk_buffer->status = RELEASED;
2321
2322                         TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2323                                            _get_tbm_surface_bo_name(tbm_surface));
2324                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2325                                                         _get_tbm_surface_bo_name(tbm_surface));
2326
2327                         TPL_LOG_T("WL_VK",
2328                                           "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2329                                           wl_vk_buffer, tbm_surface,
2330                                           _get_tbm_surface_bo_name(tbm_surface));
2331
2332                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2333                                                                                                 tbm_surface);
2334                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2335                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2336
2337                         tbm_surface_internal_unref(tbm_surface);
2338                 }
2339
2340                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2341
2342         } else {
2343                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2344         }
2345 }
2346
2347 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2348         __cb_buffer_fenced_release,
2349         __cb_buffer_immediate_release,
2350 };
2351 #endif
2352
2353 static void
2354 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2355 {
2356         tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2357         tbm_surface_h tbm_surface = NULL;
2358
2359         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2360
2361         tbm_surface = wl_vk_buffer->tbm_surface;
2362
2363         if (tbm_surface_internal_is_valid(tbm_surface)) {
2364                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2365                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2366                 tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2367
2368                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2369                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2370                         tbm_surface_internal_unref(tbm_surface);
2371                         return;
2372                 }
2373
2374                 swapchain = wl_vk_surface->swapchain;
2375
2376                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2377
2378                 if (wl_vk_buffer->status == COMMITTED) {
2379
2380                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2381                                                                                                 tbm_surface);
2382                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2383                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2384
2385                         wl_vk_buffer->status = RELEASED;
2386
2387                         TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2388                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2389                                                         wl_vk_buffer->bo_name);
2390
2391                         TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2392                                           wl_vk_buffer->wl_buffer, tbm_surface,
2393                                           wl_vk_buffer->bo_name);
2394
2395                         tbm_surface_internal_unref(tbm_surface);
2396                 }
2397
2398                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2399         } else {
2400                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2401         }
2402 }
2403
2404 static void
2405 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2406                                            unsigned int sequence, unsigned int tv_sec,
2407                                            unsigned int tv_usec, void *user_data)
2408 {
2409         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2410         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
2411
2412         TRACE_ASYNC_END((int)wl_vk_surface, "WAIT_VBLANK");
2413         TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface);
2414
2415         if (error == TDM_ERROR_TIMEOUT)
2416                 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2417                                  wl_vk_surface);
2418
2419         wl_vk_surface->vblank_done = TPL_TRUE;
2420
2421         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2422         wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front(
2423                                                 wl_vk_surface->vblank_waiting_buffers,
2424                                                 NULL);
2425         if (wl_vk_buffer)
2426                 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2427         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2428 }
2429
2430 static tpl_result_t
2431 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2432 {
2433         tdm_error tdm_err                     = TDM_ERROR_NONE;
2434         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2435
2436         if (wl_vk_surface->vblank == NULL) {
2437                 wl_vk_surface->vblank =
2438                         _thread_create_tdm_client_vblank(wl_vk_display->tdm_client);
2439                 if (!wl_vk_surface->vblank) {
2440                         TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2441                                          wl_vk_surface);
2442                         return TPL_ERROR_OUT_OF_MEMORY;
2443                 }
2444         }
2445
2446         tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2447                         wl_vk_surface->post_interval,
2448                         __cb_tdm_client_vblank,
2449                         (void *)wl_vk_surface);
2450
2451         if (tdm_err == TDM_ERROR_NONE) {
2452                 wl_vk_surface->vblank_done = TPL_FALSE;
2453                 TRACE_ASYNC_BEGIN((int)wl_vk_surface, "WAIT_VBLANK");
2454         } else {
2455                 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2456                 return TPL_ERROR_INVALID_OPERATION;
2457         }
2458
2459         return TPL_ERROR_NONE;
2460 }
2461
2462 static void
2463 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2464                                                   tpl_wl_vk_buffer_t *wl_vk_buffer)
2465 {
2466         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2467         struct wl_surface *wl_surface         = wl_vk_surface->wl_surface;
2468         uint32_t version;
2469
2470         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2471                                                                    "wl_vk_buffer sould be not NULL");
2472
2473         if (wl_vk_buffer->wl_buffer == NULL) {
2474                 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2475                                                                                         wl_vk_display->wl_tbm_client,
2476                                                                                         wl_vk_buffer->tbm_surface);
2477                 if (wl_vk_buffer->wl_buffer &&
2478                         (wl_vk_buffer->acquire_fence_fd == -1 ||
2479                          wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2480                                 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2481                                                                            &wl_buffer_release_listener, wl_vk_buffer);
2482                 }
2483         }
2484         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2485                                                                    "[FATAL] Failed to create wl_buffer");
2486
2487         version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2488
2489         wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2490                                           wl_vk_buffer->dx, wl_vk_buffer->dy);
2491
2492         if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2493                 if (version < 4) {
2494                         wl_surface_damage(wl_surface,
2495                                                           wl_vk_buffer->dx, wl_vk_buffer->dy,
2496                                                           wl_vk_buffer->width, wl_vk_buffer->height);
2497                 } else {
2498                         wl_surface_damage_buffer(wl_surface,
2499                                                                          0, 0,
2500                                                                          wl_vk_buffer->width, wl_vk_buffer->height);
2501                 }
2502         } else {
2503                 int i;
2504                 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2505                         int inverted_y =
2506                                 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2507                                                 wl_vk_buffer->rects[i * 4 + 3]);
2508                         if (version < 4) {
2509                                 wl_surface_damage(wl_surface,
2510                                                                   wl_vk_buffer->rects[i * 4 + 0],
2511                                                                   inverted_y,
2512                                                                   wl_vk_buffer->rects[i * 4 + 2],
2513                                                                   wl_vk_buffer->rects[i * 4 + 3]);
2514                         } else {
2515                                 wl_surface_damage_buffer(wl_surface,
2516                                                                                  wl_vk_buffer->rects[i * 4 + 0],
2517                                                                                  inverted_y,
2518                                                                                  wl_vk_buffer->rects[i * 4 + 2],
2519                                                                                  wl_vk_buffer->rects[i * 4 + 3]);
2520                         }
2521                 }
2522         }
2523
2524 #if TIZEN_FEATURE_ENABLE
2525         if (wl_vk_display->use_explicit_sync &&
2526                 wl_vk_surface->surface_sync &&
2527                 wl_vk_buffer->acquire_fence_fd != -1) {
2528
2529                 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2530                                                                                                                            wl_vk_buffer->acquire_fence_fd);
2531                 TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2532                                   wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2533                 close(wl_vk_buffer->acquire_fence_fd);
2534                 wl_vk_buffer->acquire_fence_fd = -1;
2535
2536                 wl_vk_buffer->buffer_release =
2537                         zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2538                 if (!wl_vk_buffer->buffer_release) {
2539                         TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2540                 } else {
2541                         zwp_linux_buffer_release_v1_add_listener(
2542                                 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2543                         TPL_DEBUG("add explicit_sync_release_listener.");
2544                 }
2545         }
2546 #endif
2547
2548         wl_surface_commit(wl_surface);
2549
2550         wl_display_flush(wl_vk_display->wl_display);
2551
2552         TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2553                                           wl_vk_buffer->bo_name);
2554
2555         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2556
2557         wl_vk_buffer->need_to_commit   = TPL_FALSE;
2558         wl_vk_buffer->status           = COMMITTED;
2559
2560         tpl_gcond_signal(&wl_vk_buffer->cond);
2561
2562         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2563
2564         TPL_LOG_T("WL_VK",
2565                           "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2566                           wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2567                           wl_vk_buffer->bo_name);
2568
2569         if (wl_vk_display->use_wait_vblank &&
2570                 _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2571                 TPL_ERR("Failed to set wait vblank.");
2572 }
2573
2574 tpl_bool_t
2575 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2576 {
2577         if (!native_dpy) return TPL_FALSE;
2578
2579         if (_check_native_handle_is_wl_display(native_dpy))
2580                 return TPL_TRUE;
2581
2582         return TPL_FALSE;
2583 }
2584
2585 void
2586 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2587 {
2588         TPL_ASSERT(backend);
2589
2590         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2591         backend->data = NULL;
2592
2593         backend->init = __tpl_wl_vk_display_init;
2594         backend->fini = __tpl_wl_vk_display_fini;
2595         backend->query_config = __tpl_wl_vk_display_query_config;
2596         backend->filter_config = __tpl_wl_vk_display_filter_config;
2597         backend->query_window_supported_buffer_count =
2598                 __tpl_wl_vk_display_query_window_supported_buffer_count;
2599         backend->query_window_supported_present_modes =
2600                 __tpl_wl_vk_display_query_window_supported_present_modes;
2601 }
2602
2603 void
2604 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2605 {
2606         TPL_ASSERT(backend);
2607
2608         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2609         backend->data = NULL;
2610
2611         backend->init = __tpl_wl_vk_surface_init;
2612         backend->fini = __tpl_wl_vk_surface_fini;
2613         backend->validate = __tpl_wl_vk_surface_validate;
2614         backend->cancel_dequeued_buffer =
2615                 __tpl_wl_vk_surface_cancel_buffer;
2616         backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2617         backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2618         backend->get_swapchain_buffers =
2619                 __tpl_wl_vk_surface_get_swapchain_buffers;
2620         backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2621         backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2622         backend->set_post_interval =
2623                 __tpl_wl_vk_surface_set_post_interval;
2624 }
2625
2626 static int
2627 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2628 {
2629         return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2630 }
2631
2632 static void
2633 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2634 {
2635         int idx = 0;
2636
2637         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2638         TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2639                          wl_vk_surface, wl_vk_surface->buffer_cnt);
2640         for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2641                 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2642                 if (wl_vk_buffer) {
2643                         TPL_INFO("[INFO]",
2644                                          "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2645                                          idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2646                                          wl_vk_buffer->bo_name,
2647                                          status_to_string[wl_vk_buffer->status]);
2648                 }
2649         }
2650         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2651 }