Fix memory leak
[platform/core/uifw/libtpl-egl.git] / src / tpl_wl_vk_thread.c
1 #define inline __inline__
2 #undef inline
3
4 #include "tpl_internal.h"
5
6 #include <string.h>
7 #include <fcntl.h>
8 #include <unistd.h>
9 #include <sys/eventfd.h>
10
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
15
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
19
20 #include <tdm_client.h>
21
22 #ifndef TIZEN_FEATURE_ENABLE
23 #define TIZEN_FEATURE_ENABLE 1
24 #endif
25
26 #if TIZEN_FEATURE_ENABLE
27 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
28 #endif
29
30 #include "tpl_utils_gthread.h"
31
32 #define BUFFER_ARRAY_SIZE 10
33 #define VK_CLIENT_QUEUE_SIZE 3
34
35 static int wl_vk_buffer_key;
36 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
37
38 typedef struct _tpl_wl_vk_display       tpl_wl_vk_display_t;
39 typedef struct _tpl_wl_vk_surface       tpl_wl_vk_surface_t;
40 typedef struct _tpl_wl_vk_swapchain     tpl_wl_vk_swapchain_t;
41 typedef struct _tpl_wl_vk_buffer        tpl_wl_vk_buffer_t;
42
43 struct _tpl_wl_vk_display {
44         tpl_gsource                  *disp_source;
45         tpl_gthread                  *thread;
46         tpl_gmutex                    wl_event_mutex;
47
48         struct wl_display            *wl_display;
49         struct wl_event_queue        *ev_queue;
50         struct wayland_tbm_client    *wl_tbm_client;
51         int                           last_error; /* errno of the last wl_display error*/
52
53         tpl_bool_t                    wl_initialized;
54
55         struct {
56                 tdm_client                   *tdm_client;
57                 tpl_gsource                  *tdm_source;
58                 int                           tdm_display_fd;
59                 tpl_bool_t                    tdm_initialized;
60                 /* To make sure that tpl_gsource has been successfully finalized. */
61                 tpl_bool_t                    gsource_finalized;
62                 tpl_gmutex                    tdm_mutex;
63                 tpl_gcond                     tdm_cond;
64         } tdm;
65
66         tpl_bool_t                    use_wait_vblank;
67         tpl_bool_t                    use_explicit_sync;
68         tpl_bool_t                    prepared;
69
70         /* To make sure that tpl_gsource has been successfully finalized. */
71         tpl_bool_t                    gsource_finalized;
72         tpl_gmutex                    disp_mutex;
73         tpl_gcond                     disp_cond;
74
75         /* device surface capabilities */
76         int                           min_buffer;
77         int                           max_buffer;
78         int                           present_modes;
79 #if TIZEN_FEATURE_ENABLE
80         struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
81 #endif
82 };
83
84 struct _tpl_wl_vk_swapchain {
85         tpl_wl_vk_surface_t          *wl_vk_surface;
86
87         tbm_surface_queue_h           tbm_queue;
88         tpl_result_t                  result;
89
90         tpl_bool_t                    create_done;
91
92         struct {
93                 int                       width;
94                 int                       height;
95                 tbm_format                format;
96                 int                       buffer_count;
97                 int                       present_mode;
98         } properties;
99
100         tbm_surface_h                *swapchain_buffers;
101
102         tpl_util_atomic_uint          ref_cnt;
103 };
104
105 typedef enum surf_message {
106         NONE_MESSAGE = 0,
107         INIT_SURFACE,
108         CREATE_QUEUE,
109         DESTROY_QUEUE,
110         ACQUIRABLE,
111 } surf_message;
112
113 struct _tpl_wl_vk_surface {
114         tpl_gsource                  *surf_source;
115
116         tpl_wl_vk_swapchain_t        *swapchain;
117
118         struct wl_surface            *wl_surface;
119 #if TIZEN_FEATURE_ENABLE
120         struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
121 #endif
122         tdm_client_vblank            *vblank;
123
124         /* surface information */
125         int                           render_done_cnt;
126
127         tpl_wl_vk_display_t          *wl_vk_display;
128         tpl_surface_t                *tpl_surface;
129
130         /* wl_vk_buffer array for buffer tracing */
131         tpl_wl_vk_buffer_t           *buffers[BUFFER_ARRAY_SIZE];
132         int                           buffer_cnt; /* the number of using wl_vk_buffers */
133         tpl_gmutex                    buffers_mutex;
134
135         tpl_list_t                   *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
136
137         tpl_gmutex                    surf_mutex;
138         tpl_gcond                     surf_cond;
139
140         /* for waiting draw done */
141         tpl_bool_t                    is_activated;
142         tpl_bool_t                    reset; /* TRUE if queue reseted by external  */
143         tpl_bool_t                    vblank_done;
144         tpl_bool_t                    initialized_in_thread;
145
146         /* To make sure that tpl_gsource has been successfully finalized. */
147         tpl_bool_t                    gsource_finalized;
148
149         surf_message                  sent_message;
150
151         int                           post_interval;
152 };
153
154 typedef enum buffer_status {
155         RELEASED = 0,             // 0
156         DEQUEUED,                 // 1
157         ENQUEUED,                 // 2
158         ACQUIRED,                 // 3
159         WAITING_SIGNALED,         // 4
160         WAITING_VBLANK,           // 5
161         COMMITTED,                // 6
162 } buffer_status_t;
163
164 static const char *status_to_string[7] = {
165         "RELEASED",                 // 0
166         "DEQUEUED",                 // 1
167         "ENQUEUED",                 // 2
168         "ACQUIRED",                 // 3
169         "WAITING_SIGNALED",         // 4
170         "WAITING_VBLANK",           // 5
171         "COMMITTED",                // 6
172 };
173
174 struct _tpl_wl_vk_buffer {
175         tbm_surface_h                 tbm_surface;
176         int                           bo_name;
177
178         struct wl_buffer             *wl_buffer;
179         int                           dx, dy; /* position to attach to wl_surface */
180         int                           width, height; /* size to attach to wl_surface */
181
182         buffer_status_t               status; /* for tracing buffer status */
183         int                           idx; /* position index in buffers array of wl_vk_surface */
184
185         /* for damage region */
186         int                           num_rects;
187         int                          *rects;
188
189         /* for checking need_to_commit (frontbuffer mode) */
190         tpl_bool_t                    need_to_commit;
191
192 #if TIZEN_FEATURE_ENABLE
193         /* to get release event via zwp_linux_buffer_release_v1 */
194         struct zwp_linux_buffer_release_v1 *buffer_release;
195 #endif
196
197         /* each buffers own its release_fence_fd, until it passes ownership
198          * to it to EGL */
199         int32_t                       release_fence_fd;
200
201         /* each buffers own its acquire_fence_fd.
202          * If it use zwp_linux_buffer_release_v1 the ownership of this fd
203          * will be passed to display server
204          * Otherwise it will be used as a fence waiting for render done
205          * on tpl thread */
206         int32_t                       acquire_fence_fd;
207
208         tpl_gmutex                    mutex;
209         tpl_gcond                     cond;
210
211         tpl_wl_vk_surface_t          *wl_vk_surface;
212 };
213
214 static void
215 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
216 static int
217 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
218 static void
219 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
220 static void
221 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
222 static tpl_result_t
223 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
224 static void
225 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
226 static tpl_result_t
227 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
228 static void
229 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
230                                                   tpl_wl_vk_buffer_t *wl_vk_buffer);
231
232 static tpl_bool_t
233 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
234 {
235         struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
236
237         if (!wl_vk_native_dpy) {
238                 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
239                 return TPL_FALSE;
240         }
241
242         /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
243            is a memory address pointing the structure of wl_display_interface. */
244         if (wl_vk_native_dpy == &wl_display_interface)
245                 return TPL_TRUE;
246
247         if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
248                                 strlen(wl_display_interface.name)) == 0) {
249                 return TPL_TRUE;
250         }
251
252         return TPL_FALSE;
253 }
254
255 static tpl_bool_t
256 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
257 {
258         tpl_wl_vk_display_t        *wl_vk_display = NULL;
259         tdm_error                   tdm_err = TDM_ERROR_NONE;
260
261         TPL_IGNORE(message);
262
263         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
264         if (!wl_vk_display) {
265                 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
266                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
267                 return TPL_FALSE;
268         }
269
270         tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client);
271
272         /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
273          * When tdm_source is no longer available due to an unexpected situation,
274          * wl_vk_thread must remove it from the thread and destroy it.
275          * In that case, tdm_vblank can no longer be used for surfaces and displays
276          * that used this tdm_source. */
277         if (tdm_err != TDM_ERROR_NONE) {
278                 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
279                                 tdm_err);
280                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
281
282                 tpl_gsource_destroy(gsource, TPL_FALSE);
283
284                 wl_vk_display->tdm.tdm_source = NULL;
285
286                 return TPL_FALSE;
287         }
288
289         return TPL_TRUE;
290 }
291
292 static void
293 __thread_func_tdm_finalize(tpl_gsource *gsource)
294 {
295         tpl_wl_vk_display_t *wl_vk_display = NULL;
296
297         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
298
299         tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
300
301         TPL_INFO("[TDM_CLIENT_FINI]",
302                          "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
303                          wl_vk_display, wl_vk_display->tdm.tdm_client,
304                          wl_vk_display->tdm.tdm_display_fd);
305
306         if (wl_vk_display->tdm.tdm_client) {
307                 tdm_client_destroy(wl_vk_display->tdm.tdm_client);
308                 wl_vk_display->tdm.tdm_client = NULL;
309                 wl_vk_display->tdm.tdm_display_fd = -1;
310         }
311
312         wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
313         wl_vk_display->tdm.gsource_finalized = TPL_TRUE;
314
315         tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond);
316         tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
317 }
318
319 static tpl_gsource_functions tdm_funcs = {
320         .prepare  = NULL,
321         .check    = NULL,
322         .dispatch = __thread_func_tdm_dispatch,
323         .finalize = __thread_func_tdm_finalize,
324 };
325
326 static tpl_result_t
327 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
328 {
329         tdm_client       *tdm_client = NULL;
330         int               tdm_display_fd = -1;
331         tdm_error         tdm_err = TDM_ERROR_NONE;
332
333         tdm_client = tdm_client_create(&tdm_err);
334         if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
335                 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
336                 return TPL_ERROR_INVALID_OPERATION;
337         }
338
339         tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
340         if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
341                 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
342                 tdm_client_destroy(tdm_client);
343                 return TPL_ERROR_INVALID_OPERATION;
344         }
345
346         wl_vk_display->tdm.tdm_display_fd  = tdm_display_fd;
347         wl_vk_display->tdm.tdm_client      = tdm_client;
348         wl_vk_display->tdm.tdm_source      = NULL;
349         wl_vk_display->tdm.tdm_initialized = TPL_TRUE;
350
351         TPL_INFO("[TDM_CLIENT_INIT]",
352                          "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
353                          wl_vk_display, tdm_client, tdm_display_fd);
354
355         return TPL_ERROR_NONE;
356 }
357
358 static void
359 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
360                                                           uint32_t name, const char *interface,
361                                                           uint32_t version)
362 {
363 #if TIZEN_FEATURE_ENABLE
364         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
365
366         if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
367                 char *env = tpl_getenv("TPL_EFS");
368                 if (env && !atoi(env)) {
369                         wl_vk_display->use_explicit_sync = TPL_FALSE;
370                 } else {
371                         wl_vk_display->explicit_sync =
372                                         wl_registry_bind(wl_registry, name,
373                                                                          &zwp_linux_explicit_synchronization_v1_interface, 1);
374                         wl_vk_display->use_explicit_sync = TPL_TRUE;
375                         TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
376                 }
377         }
378 #endif
379 }
380
381 static void
382 __cb_wl_resistry_global_remove_callback(void *data,
383                                                                                 struct wl_registry *wl_registry,
384                                                                                 uint32_t name)
385 {
386 }
387
388 static const struct wl_registry_listener registry_listener = {
389         __cb_wl_resistry_global_callback,
390         __cb_wl_resistry_global_remove_callback
391 };
392
393 static void
394 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
395                                           const char *func_name)
396 {
397         int dpy_err;
398         char buf[1024];
399         strerror_r(errno, buf, sizeof(buf));
400
401         if (wl_vk_display->last_error == errno)
402                 return;
403
404         TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
405
406         dpy_err = wl_display_get_error(wl_vk_display->wl_display);
407         if (dpy_err == EPROTO) {
408                 const struct wl_interface *err_interface;
409                 uint32_t err_proxy_id, err_code;
410                 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
411                                                                                                  &err_interface,
412                                                                                                  &err_proxy_id);
413                 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
414                                 err_interface->name, err_code, err_proxy_id);
415         }
416
417         wl_vk_display->last_error = errno;
418 }
419
420 static tpl_result_t
421 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
422 {
423         struct wl_registry *registry                = NULL;
424         struct wl_event_queue *queue                = NULL;
425         struct wl_display *display_wrapper          = NULL;
426         struct wl_proxy *wl_tbm                     = NULL;
427         struct wayland_tbm_client *wl_tbm_client    = NULL;
428         int ret;
429         tpl_result_t result = TPL_ERROR_NONE;
430
431         queue = wl_display_create_queue(wl_vk_display->wl_display);
432         if (!queue) {
433                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
434                                 wl_vk_display->wl_display);
435                 result = TPL_ERROR_INVALID_OPERATION;
436                 goto fini;
437         }
438
439         wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
440         if (!wl_vk_display->ev_queue) {
441                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
442                                 wl_vk_display->wl_display);
443                 result = TPL_ERROR_INVALID_OPERATION;
444                 goto fini;
445         }
446
447         display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
448         if (!display_wrapper) {
449                 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
450                                 wl_vk_display->wl_display);
451                 result = TPL_ERROR_INVALID_OPERATION;
452                 goto fini;
453         }
454
455         wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
456
457         registry = wl_display_get_registry(display_wrapper);
458         if (!registry) {
459                 TPL_ERR("Failed to create wl_registry");
460                 result = TPL_ERROR_INVALID_OPERATION;
461                 goto fini;
462         }
463
464         wl_proxy_wrapper_destroy(display_wrapper);
465         display_wrapper = NULL;
466
467         wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
468         if (!wl_tbm_client) {
469                 TPL_ERR("Failed to initialize wl_tbm_client.");
470                 result = TPL_ERROR_INVALID_CONNECTION;
471                 goto fini;
472         }
473
474         wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
475         if (!wl_tbm) {
476                 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
477                 result = TPL_ERROR_INVALID_CONNECTION;
478                 goto fini;
479         }
480
481         wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
482         wl_vk_display->wl_tbm_client = wl_tbm_client;
483
484         if (wl_registry_add_listener(registry, &registry_listener,
485                                                                  wl_vk_display)) {
486                 TPL_ERR("Failed to wl_registry_add_listener");
487                 result = TPL_ERROR_INVALID_OPERATION;
488                 goto fini;
489         }
490
491         ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
492         if (ret == -1) {
493                 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
494                 result = TPL_ERROR_INVALID_OPERATION;
495                 goto fini;
496         }
497
498 #if TIZEN_FEATURE_ENABLE
499         if (wl_vk_display->explicit_sync) {
500                 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
501                                                    wl_vk_display->ev_queue);
502                 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
503                                   wl_vk_display->explicit_sync);
504         }
505 #endif
506
507         wl_vk_display->wl_initialized = TPL_TRUE;
508
509         TPL_INFO("[WAYLAND_INIT]",
510                          "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
511                          wl_vk_display, wl_vk_display->wl_display,
512                          wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
513 #if TIZEN_FEATURE_ENABLE
514         TPL_INFO("[WAYLAND_INIT]",
515                          "explicit_sync(%p)",
516                          wl_vk_display->explicit_sync);
517 #endif
518 fini:
519         if (display_wrapper)
520                 wl_proxy_wrapper_destroy(display_wrapper);
521         if (registry)
522                 wl_registry_destroy(registry);
523         if (queue)
524                 wl_event_queue_destroy(queue);
525
526         return result;
527 }
528
529 static void
530 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
531 {
532         /* If wl_vk_display is in prepared state, cancel it */
533         if (wl_vk_display->prepared) {
534                 wl_display_cancel_read(wl_vk_display->wl_display);
535                 wl_vk_display->prepared = TPL_FALSE;
536         }
537
538         if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
539                                                                                   wl_vk_display->ev_queue) == -1) {
540                 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
541         }
542
543 #if TIZEN_FEATURE_ENABLE
544         if (wl_vk_display->explicit_sync) {
545                 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
546                                  "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
547                                  wl_vk_display, wl_vk_display->explicit_sync);
548                 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
549                 wl_vk_display->explicit_sync = NULL;
550         }
551 #endif
552
553         if (wl_vk_display->wl_tbm_client) {
554                 struct wl_proxy *wl_tbm = NULL;
555
556                 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
557                                                                                 wl_vk_display->wl_tbm_client);
558                 if (wl_tbm) {
559                         wl_proxy_set_queue(wl_tbm, NULL);
560                 }
561
562                 TPL_INFO("[WL_TBM_DEINIT]",
563                                  "wl_vk_display(%p) wl_tbm_client(%p)",
564                                  wl_vk_display, wl_vk_display->wl_tbm_client);
565                 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
566                 wl_vk_display->wl_tbm_client = NULL;
567         }
568
569         wl_event_queue_destroy(wl_vk_display->ev_queue);
570
571         wl_vk_display->wl_initialized = TPL_FALSE;
572
573         TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
574                          wl_vk_display, wl_vk_display->wl_display);
575 }
576
577 static void*
578 _thread_init(void *data)
579 {
580         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
581
582         if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
583                 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
584                                 wl_vk_display, wl_vk_display->wl_display);
585         }
586
587         if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
588                 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
589         }
590
591         return wl_vk_display;
592 }
593
594 static tpl_bool_t
595 __thread_func_disp_prepare(tpl_gsource *gsource)
596 {
597         tpl_wl_vk_display_t *wl_vk_display =
598                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
599
600         /* If this wl_vk_display is already prepared,
601          * do nothing in this function. */
602         if (wl_vk_display->prepared)
603                 return TPL_FALSE;
604
605         /* If there is a last_error, there is no need to poll,
606          * so skip directly to dispatch.
607          * prepare -> dispatch */
608         if (wl_vk_display->last_error)
609                 return TPL_TRUE;
610
611         while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
612                                                                                  wl_vk_display->ev_queue) != 0) {
613                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
614                                                                                           wl_vk_display->ev_queue) == -1) {
615                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
616                 }
617         }
618
619         wl_vk_display->prepared = TPL_TRUE;
620
621         wl_display_flush(wl_vk_display->wl_display);
622
623         return TPL_FALSE;
624 }
625
626 static tpl_bool_t
627 __thread_func_disp_check(tpl_gsource *gsource)
628 {
629         tpl_wl_vk_display_t *wl_vk_display =
630                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
631         tpl_bool_t ret = TPL_FALSE;
632
633         if (!wl_vk_display->prepared)
634                 return ret;
635
636         /* If prepared, but last_error is set,
637          * cancel_read is executed and FALSE is returned.
638          * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
639          * and skipping disp_check from prepare to disp_dispatch.
640          * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
641         if (wl_vk_display->prepared && wl_vk_display->last_error) {
642                 wl_display_cancel_read(wl_vk_display->wl_display);
643                 return ret;
644         }
645
646         if (tpl_gsource_check_io_condition(gsource)) {
647                 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
648                         _wl_display_print_err(wl_vk_display, "read_event");
649                 ret = TPL_TRUE;
650         } else {
651                 wl_display_cancel_read(wl_vk_display->wl_display);
652                 ret = TPL_FALSE;
653         }
654
655         wl_vk_display->prepared = TPL_FALSE;
656
657         return ret;
658 }
659
660 static tpl_bool_t
661 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
662 {
663         tpl_wl_vk_display_t *wl_vk_display =
664                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
665
666         TPL_IGNORE(message);
667
668         /* If there is last_error, SOURCE_REMOVE should be returned
669          * to remove the gsource from the main loop.
670          * This is because wl_vk_display is not valid since last_error was set.*/
671         if (wl_vk_display->last_error) {
672                 return TPL_FALSE;
673         }
674
675         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
676         if (tpl_gsource_check_io_condition(gsource)) {
677                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
678                                                                                           wl_vk_display->ev_queue) == -1) {
679                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
680                 }
681         }
682
683         wl_display_flush(wl_vk_display->wl_display);
684         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
685
686         return TPL_TRUE;
687 }
688
689 static void
690 __thread_func_disp_finalize(tpl_gsource *gsource)
691 {
692         tpl_wl_vk_display_t *wl_vk_display =
693                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
694
695         tpl_gmutex_lock(&wl_vk_display->disp_mutex);
696         TPL_DEBUG("[FINALIZE] wl_vk_display(%p) tpl_gsource(%p)",
697                           wl_vk_display, gsource);
698
699         if (wl_vk_display->wl_initialized)
700                 _thread_wl_display_fini(wl_vk_display);
701
702         wl_vk_display->gsource_finalized = TPL_TRUE;
703
704         tpl_gcond_signal(&wl_vk_display->disp_cond);
705         tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
706
707         return;
708 }
709
710
711 static tpl_gsource_functions disp_funcs = {
712         .prepare  = __thread_func_disp_prepare,
713         .check    = __thread_func_disp_check,
714         .dispatch = __thread_func_disp_dispatch,
715         .finalize = __thread_func_disp_finalize,
716 };
717
718 static tpl_result_t
719 __tpl_wl_vk_display_init(tpl_display_t *display)
720 {
721         TPL_ASSERT(display);
722
723         tpl_wl_vk_display_t *wl_vk_display = NULL;
724
725         /* Do not allow default display in wayland */
726         if (!display->native_handle) {
727                 TPL_ERR("Invalid native handle for display.");
728                 return TPL_ERROR_INVALID_PARAMETER;
729         }
730
731         if (!_check_native_handle_is_wl_display(display->native_handle)) {
732                 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
733                 return TPL_ERROR_INVALID_PARAMETER;
734         }
735
736         wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
737                                                         sizeof(tpl_wl_vk_display_t));
738         if (!wl_vk_display) {
739                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
740                 return TPL_ERROR_OUT_OF_MEMORY;
741         }
742
743         display->backend.data             = wl_vk_display;
744         display->bufmgr_fd                = -1;
745
746         wl_vk_display->tdm.tdm_initialized    = TPL_FALSE;
747         wl_vk_display->wl_initialized     = TPL_FALSE;
748
749         wl_vk_display->ev_queue           = NULL;
750         wl_vk_display->wl_display         = (struct wl_display *)display->native_handle;
751         wl_vk_display->last_error         = 0;
752         wl_vk_display->use_explicit_sync  = TPL_FALSE;   // default disabled
753         wl_vk_display->prepared           = TPL_FALSE;
754
755         /* Wayland Interfaces */
756 #if TIZEN_FEATURE_ENABLE
757         wl_vk_display->explicit_sync      = NULL;
758 #endif
759         wl_vk_display->wl_tbm_client      = NULL;
760
761         /* Vulkan specific surface capabilities */
762         wl_vk_display->min_buffer         = 2;
763         wl_vk_display->max_buffer         = VK_CLIENT_QUEUE_SIZE;
764         wl_vk_display->present_modes      = TPL_DISPLAY_PRESENT_MODE_FIFO;
765
766         wl_vk_display->use_wait_vblank    = TPL_TRUE;   // default enabled
767         {
768                 char *env = tpl_getenv("TPL_WAIT_VBLANK");
769                 if (env && !atoi(env)) {
770                         wl_vk_display->use_wait_vblank = TPL_FALSE;
771                 }
772         }
773
774         tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
775
776         tpl_gmutex_init(&wl_vk_display->disp_mutex);
777         tpl_gcond_init(&wl_vk_display->disp_cond);
778
779         /* Create gthread */
780         wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
781                                                                                            (tpl_gthread_func)_thread_init,
782                                                                                            (void *)wl_vk_display);
783         if (!wl_vk_display->thread) {
784                 TPL_ERR("Failed to create wl_vk_thread");
785                 goto free_display;
786         }
787
788         wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
789                                                                                                         (void *)wl_vk_display,
790                                                                                                         wl_display_get_fd(wl_vk_display->wl_display),
791                                                                                                         &disp_funcs, SOURCE_TYPE_NORMAL);
792         if (!wl_vk_display->disp_source) {
793                 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
794                                 display->native_handle,
795                                 wl_vk_display->thread);
796                 goto free_display;
797         }
798
799         tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex);
800         tpl_gcond_init(&wl_vk_display->tdm.tdm_cond);
801
802         wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread,
803                                                                                                    (void *)wl_vk_display,
804                                                                                                    wl_vk_display->tdm.tdm_display_fd,
805                                                                                                    &tdm_funcs, SOURCE_TYPE_NORMAL);
806         if (!wl_vk_display->tdm.tdm_source) {
807                 TPL_ERR("Failed to create tdm_gsource\n");
808                 goto free_display;
809         }
810
811         TPL_INFO("[DISPLAY_INIT]",
812                          "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
813                          wl_vk_display,
814                          wl_vk_display->thread,
815                          wl_vk_display->wl_display);
816
817         TPL_INFO("[DISPLAY_INIT]",
818                          "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
819                          wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
820                          wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
821
822         return TPL_ERROR_NONE;
823
824 free_display:
825         if (wl_vk_display->tdm.tdm_source) {
826                 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
827                 while (!wl_vk_display->tdm.gsource_finalized) {
828                         tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
829                         tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
830                 }
831                 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
832         }
833
834         if (wl_vk_display->disp_source) {
835                 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
836                 while (!wl_vk_display->gsource_finalized) {
837                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
838                         tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
839                 }
840                 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
841         }
842
843         if (wl_vk_display->thread) {
844                 tpl_gthread_destroy(wl_vk_display->thread);
845         }
846
847         tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
848         tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
849         tpl_gcond_clear(&wl_vk_display->disp_cond);
850         tpl_gmutex_clear(&wl_vk_display->disp_mutex);
851
852         wl_vk_display->thread = NULL;
853         free(wl_vk_display);
854
855         display->backend.data = NULL;
856         return TPL_ERROR_INVALID_OPERATION;
857 }
858
859 static void
860 __tpl_wl_vk_display_fini(tpl_display_t *display)
861 {
862         tpl_wl_vk_display_t *wl_vk_display;
863
864         TPL_ASSERT(display);
865
866         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
867         if (wl_vk_display) {
868                 TPL_INFO("[DISPLAY_FINI]",
869                                  "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
870                                  wl_vk_display,
871                                  wl_vk_display->thread,
872                                  wl_vk_display->wl_display);
873
874                 if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) {
875                         /* This is a protection to prevent problems that arise in unexpected situations
876                          * that g_cond_wait cannot work normally.
877                          * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
878                          * caller should use tpl_gcond_wait() in the loop with checking finalized flag
879                          * */
880                         tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
881                         while (!wl_vk_display->tdm.gsource_finalized) {
882                                 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
883                                 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
884                         }
885                         wl_vk_display->tdm.tdm_source = NULL;
886                         tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
887                 }
888
889                 /* This is a protection to prevent problems that arise in unexpected situations
890                  * that g_cond_wait cannot work normally.
891                  * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
892                  * caller should use tpl_gcond_wait() in the loop with checking finalized flag
893                  * */
894                 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
895                 while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) {
896                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
897                         tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
898                 }
899                 wl_vk_display->disp_source = NULL;
900                 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
901
902                 if (wl_vk_display->thread) {
903                         tpl_gthread_destroy(wl_vk_display->thread);
904                         wl_vk_display->thread = NULL;
905                 }
906
907                 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
908                 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
909                 tpl_gcond_clear(&wl_vk_display->disp_cond);
910                 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
911
912                 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
913
914                 free(wl_vk_display);
915         }
916
917         display->backend.data = NULL;
918 }
919
920 static tpl_result_t
921 __tpl_wl_vk_display_query_config(tpl_display_t *display,
922                 tpl_surface_type_t surface_type,
923                 int red_size, int green_size,
924                 int blue_size, int alpha_size,
925                 int color_depth, int *native_visual_id,
926                 tpl_bool_t *is_slow)
927 {
928         TPL_ASSERT(display);
929
930         if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
931                         green_size == 8 && blue_size == 8 &&
932                         (color_depth == 32 || color_depth == 24)) {
933
934                 if (alpha_size == 8) {
935                         if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
936                         if (is_slow) *is_slow = TPL_FALSE;
937                         return TPL_ERROR_NONE;
938                 }
939                 if (alpha_size == 0) {
940                         if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
941                         if (is_slow) *is_slow = TPL_FALSE;
942                         return TPL_ERROR_NONE;
943                 }
944         }
945
946         return TPL_ERROR_INVALID_PARAMETER;
947 }
948
949 static tpl_result_t
950 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
951                                                                           int *visual_id,
952                                                                           int alpha_size)
953 {
954         TPL_IGNORE(display);
955         TPL_IGNORE(visual_id);
956         TPL_IGNORE(alpha_size);
957         return TPL_ERROR_NONE;
958 }
959
960 static tpl_result_t
961 __tpl_wl_vk_display_query_window_supported_buffer_count(
962         tpl_display_t *display,
963         tpl_handle_t window, int *min, int *max)
964 {
965         tpl_wl_vk_display_t *wl_vk_display = NULL;
966
967         TPL_ASSERT(display);
968         TPL_ASSERT(window);
969
970         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
971         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
972
973         if (min) *min = wl_vk_display->min_buffer;
974         if (max) *max = wl_vk_display->max_buffer;
975
976         return TPL_ERROR_NONE;
977 }
978
979 static tpl_result_t
980 __tpl_wl_vk_display_query_window_supported_present_modes(
981         tpl_display_t *display,
982         tpl_handle_t window, int *present_modes)
983 {
984         tpl_wl_vk_display_t *wl_vk_display = NULL;
985
986         TPL_ASSERT(display);
987         TPL_ASSERT(window);
988
989         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
990         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
991
992         if (present_modes) {
993                 *present_modes = wl_vk_display->present_modes;
994         }
995
996         return TPL_ERROR_NONE;
997 }
998
999 static void
1000 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
1001 {
1002         tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
1003         tpl_wl_vk_display_t *wl_vk_display      = wl_vk_surface->wl_vk_display;
1004         tpl_wl_vk_swapchain_t *swapchain        = wl_vk_surface->swapchain;
1005         tpl_wl_vk_buffer_t *wl_vk_buffer        = NULL;
1006         tpl_bool_t need_to_release              = TPL_FALSE;
1007         tpl_bool_t need_to_cancel               = TPL_FALSE;
1008         buffer_status_t status                  = RELEASED;
1009         int idx                                 = 0;
1010
1011         while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
1012                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1013                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1014                 wl_vk_buffer = wl_vk_surface->buffers[idx];
1015
1016                 if (wl_vk_buffer) {
1017                         wl_vk_surface->buffers[idx] = NULL;
1018                         wl_vk_surface->buffer_cnt--;
1019                 } else {
1020                         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1021                         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1022                         idx++;
1023                         continue;
1024                 }
1025
1026                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1027
1028                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
1029
1030                 status = wl_vk_buffer->status;
1031
1032                 TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
1033                                   idx, wl_vk_buffer,
1034                                   wl_vk_buffer->tbm_surface,
1035                                   status_to_string[status]);
1036
1037                 if (status >= ENQUEUED) {
1038                         tpl_bool_t need_to_wait  = TPL_FALSE;
1039                         tpl_result_t wait_result = TPL_ERROR_NONE;
1040
1041                         if (!wl_vk_display->use_explicit_sync &&
1042                                 status < WAITING_VBLANK)
1043                                 need_to_wait = TPL_TRUE;
1044
1045                         if (wl_vk_display->use_explicit_sync &&
1046                                 status < COMMITTED)
1047                                 need_to_wait = TPL_TRUE;
1048
1049                         if (need_to_wait) {
1050                                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1051                                 wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond,
1052                                                                                                   &wl_vk_buffer->mutex,
1053                                                                                                   16); /* 16ms */
1054                                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1055
1056                                 status = wl_vk_buffer->status;
1057
1058                                 if (wait_result == TPL_ERROR_TIME_OUT)
1059                                         TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
1060                                                          wl_vk_buffer);
1061                         }
1062                 }
1063
1064                 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1065                 /* It has been acquired but has not yet been released, so this
1066                  * buffer must be released. */
1067                 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
1068
1069                 /* After dequeue, it has not been enqueued yet
1070                  * so cancel_dequeue must be performed. */
1071                 need_to_cancel = (status == DEQUEUED);
1072
1073                 if (swapchain && swapchain->tbm_queue) {
1074                         if (need_to_release) {
1075                                 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
1076                                                                                                         wl_vk_buffer->tbm_surface);
1077                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1078                                         TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1079                                                         wl_vk_buffer->tbm_surface, tsq_err);
1080                         }
1081
1082                         if (need_to_cancel) {
1083                                 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
1084                                                                                                                    wl_vk_buffer->tbm_surface);
1085                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1086                                         TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1087                                                         wl_vk_buffer->tbm_surface, tsq_err);
1088                         }
1089                 }
1090
1091                 wl_vk_buffer->status = RELEASED;
1092
1093                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1094
1095                 if (need_to_release || need_to_cancel)
1096                         tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
1097
1098                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1099
1100                 idx++;
1101         }
1102 }
1103
1104 static tdm_client_vblank*
1105 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1106 {
1107         tdm_client_vblank *vblank = NULL;
1108         tdm_client_output *tdm_output = NULL;
1109         tdm_error tdm_err = TDM_ERROR_NONE;
1110
1111         if (!tdm_client) {
1112                 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1113                 return NULL;
1114         }
1115
1116         tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1117         if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1118                 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1119                 return NULL;
1120         }
1121
1122         vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1123         if (!vblank || tdm_err != TDM_ERROR_NONE) {
1124                 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1125                 return NULL;
1126         }
1127
1128         tdm_client_vblank_set_enable_fake(vblank, 1);
1129         tdm_client_vblank_set_sync(vblank, 0);
1130
1131         return vblank;
1132 }
1133
1134 static void
1135 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1136 {
1137         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1138
1139         /* tbm_surface_queue will be created at swapchain_create */
1140
1141         wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1142                                                                 wl_vk_display->tdm.tdm_client);
1143         if (wl_vk_surface->vblank) {
1144                 TPL_INFO("[VBLANK_INIT]",
1145                                  "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1146                                  wl_vk_surface, wl_vk_display->tdm.tdm_client,
1147                                  wl_vk_surface->vblank);
1148         }
1149
1150 #if TIZEN_FEATURE_ENABLE
1151         if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1152                 wl_vk_surface->surface_sync =
1153                         zwp_linux_explicit_synchronization_v1_get_synchronization(
1154                                         wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1155                 if (wl_vk_surface->surface_sync) {
1156                         TPL_INFO("[EXPLICIT_SYNC_INIT]",
1157                                          "wl_vk_surface(%p) surface_sync(%p)",
1158                                          wl_vk_surface, wl_vk_surface->surface_sync);
1159                 } else {
1160                         TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1161                                          wl_vk_surface);
1162                         wl_vk_display->use_explicit_sync = TPL_FALSE;
1163                 }
1164         }
1165 #endif
1166         wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1167 }
1168
1169 static void
1170 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1171 {
1172         TPL_INFO("[SURFACE_FINI]",
1173                          "wl_vk_surface(%p) wl_surface(%p)",
1174                          wl_vk_surface, wl_vk_surface->wl_surface);
1175
1176         if (wl_vk_surface->vblank_waiting_buffers) {
1177                 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1178                 wl_vk_surface->vblank_waiting_buffers = NULL;
1179         }
1180
1181 #if TIZEN_FEATURE_ENABLE
1182         if (wl_vk_surface->surface_sync) {
1183                 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1184                                  "wl_vk_surface(%p) surface_sync(%p)",
1185                                   wl_vk_surface, wl_vk_surface->surface_sync);
1186                 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1187                 wl_vk_surface->surface_sync = NULL;
1188         }
1189 #endif
1190
1191         if (wl_vk_surface->vblank) {
1192                 TPL_INFO("[VBLANK_DESTROY]",
1193                                  "wl_vk_surface(%p) vblank(%p)",
1194                                  wl_vk_surface, wl_vk_surface->vblank);
1195                 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1196                 wl_vk_surface->vblank = NULL;
1197         }
1198 }
1199
1200 static tpl_bool_t
1201 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1202 {
1203         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1204
1205         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1206
1207         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1208         if (message == INIT_SURFACE) { /* Initialize surface */
1209                 TPL_DEBUG("wl_vk_surface(%p) initialize message received!",
1210                                   wl_vk_surface);
1211                 _thread_wl_vk_surface_init(wl_vk_surface);
1212                 wl_vk_surface->initialized_in_thread = TPL_TRUE;
1213                 tpl_gcond_signal(&wl_vk_surface->surf_cond);    
1214         } else if (message == CREATE_QUEUE) { /* Create tbm_surface_queue */
1215                 TPL_DEBUG("wl_vk_surface(%p) queue creation message received!",
1216                                   wl_vk_surface);
1217                 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1218                         != TPL_ERROR_NONE) {
1219                         TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1220                                         wl_vk_surface);
1221                 }
1222                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1223         } else if (message == DESTROY_QUEUE) { /* swapchain destroy */
1224                 TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!",
1225                                   wl_vk_surface);
1226                 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1227                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1228         } else if (message == ACQUIRABLE) { /* Acquirable message */
1229                 TPL_DEBUG("wl_vk_surface(%p) acquirable message received!",
1230                                   wl_vk_surface);
1231                 if (_thread_surface_queue_acquire(wl_vk_surface)
1232                         != TPL_ERROR_NONE) {
1233                         TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1234                                         wl_vk_surface);
1235                 }
1236         }
1237
1238         /* init to NONE_MESSAGE */
1239         wl_vk_surface->sent_message = NONE_MESSAGE;
1240
1241         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1242
1243         return TPL_TRUE;
1244 }
1245
1246 static void
1247 __thread_func_surf_finalize(tpl_gsource *gsource)
1248 {
1249         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1250
1251         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1252         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1253
1254         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1255         TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)",
1256                           wl_vk_surface, gsource);
1257
1258         _thread_wl_vk_surface_fini(wl_vk_surface);
1259
1260         wl_vk_surface->gsource_finalized = TPL_TRUE;
1261
1262         tpl_gcond_signal(&wl_vk_surface->surf_cond);
1263         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1264 }
1265
1266 static tpl_gsource_functions surf_funcs = {
1267         .prepare = NULL,
1268         .check = NULL,
1269         .dispatch = __thread_func_surf_dispatch,
1270         .finalize = __thread_func_surf_finalize,
1271 };
1272
1273
1274 static tpl_result_t
1275 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1276 {
1277         tpl_wl_vk_surface_t *wl_vk_surface      = NULL;
1278         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1279         tpl_gsource *surf_source                = NULL;
1280
1281         TPL_ASSERT(surface);
1282         TPL_ASSERT(surface->display);
1283         TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1284         TPL_ASSERT(surface->native_handle);
1285
1286         wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1287         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1288
1289         wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1290                                                          sizeof(tpl_wl_vk_surface_t));
1291         if (!wl_vk_surface) {
1292                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1293                 return TPL_ERROR_OUT_OF_MEMORY;
1294         }
1295
1296         surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1297                                                                          -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1298         if (!surf_source) {
1299                 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1300                                 wl_vk_surface);
1301                 free(wl_vk_surface);
1302                 surface->backend.data = NULL;
1303                 return TPL_ERROR_INVALID_OPERATION;
1304         }
1305
1306         surface->backend.data                  = (void *)wl_vk_surface;
1307         surface->width                                 = -1;
1308         surface->height                        = -1;
1309
1310         wl_vk_surface->surf_source             = surf_source;
1311         wl_vk_surface->swapchain               = NULL;
1312
1313         wl_vk_surface->wl_vk_display           = wl_vk_display;
1314         wl_vk_surface->wl_surface              = (struct wl_surface *)surface->native_handle;
1315         wl_vk_surface->tpl_surface             = surface;
1316
1317         wl_vk_surface->reset                   = TPL_FALSE;
1318         wl_vk_surface->is_activated            = TPL_FALSE;
1319         wl_vk_surface->vblank_done             = TPL_TRUE;
1320         wl_vk_surface->initialized_in_thread   = TPL_FALSE;
1321
1322         wl_vk_surface->render_done_cnt         = 0;
1323
1324         wl_vk_surface->vblank                  = NULL;
1325 #if TIZEN_FEATURE_ENABLE
1326         wl_vk_surface->surface_sync            = NULL;
1327 #endif
1328
1329         wl_vk_surface->sent_message            = NONE_MESSAGE;
1330
1331         wl_vk_surface->post_interval           = surface->post_interval;
1332
1333         {
1334                 int i = 0;
1335                 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1336                         wl_vk_surface->buffers[i]     = NULL;
1337                 wl_vk_surface->buffer_cnt         = 0;
1338         }
1339
1340         tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1341         tpl_gcond_init(&wl_vk_surface->surf_cond);
1342
1343         tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1344
1345         /* Initialize in thread */
1346         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1347         wl_vk_surface->sent_message = INIT_SURFACE;
1348         tpl_gsource_send_message(wl_vk_surface->surf_source,
1349                                                          wl_vk_surface->sent_message);
1350         while (!wl_vk_surface->initialized_in_thread)
1351                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1352         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1353
1354         TPL_INFO("[SURFACE_INIT]",
1355                           "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1356                           surface, wl_vk_surface, wl_vk_surface->surf_source);
1357
1358         return TPL_ERROR_NONE;
1359 }
1360
1361 static void
1362 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1363 {
1364         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1365         tpl_wl_vk_display_t *wl_vk_display = NULL;
1366
1367         TPL_ASSERT(surface);
1368         TPL_ASSERT(surface->display);
1369
1370         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1371         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1372
1373         wl_vk_display = (tpl_wl_vk_display_t *)
1374                                                          surface->display->backend.data;
1375         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1376
1377         TPL_INFO("[SURFACE_FINI][BEGIN]",
1378                          "wl_vk_surface(%p) wl_surface(%p)",
1379                          wl_vk_surface, wl_vk_surface->wl_surface);
1380
1381         if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1382                 /* finalize swapchain */
1383
1384         }
1385
1386         wl_vk_surface->swapchain        = NULL;
1387
1388         /* This is a protection to prevent problems that arise in unexpected situations
1389          * that g_cond_wait cannot work normally.
1390          * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1391          * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1392          * */
1393         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1394         while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) {
1395                 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1396                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1397         }
1398         wl_vk_surface->surf_source = NULL;
1399         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1400
1401         _print_buffer_lists(wl_vk_surface);
1402
1403         wl_vk_surface->wl_surface       = NULL;
1404         wl_vk_surface->wl_vk_display    = NULL;
1405         wl_vk_surface->tpl_surface      = NULL;
1406
1407         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1408         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1409         tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1410         tpl_gcond_clear(&wl_vk_surface->surf_cond);
1411
1412         TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1413
1414         free(wl_vk_surface);
1415         surface->backend.data = NULL;
1416 }
1417
1418 static tpl_result_t
1419 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1420                                                                                   int post_interval)
1421 {
1422         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1423
1424         TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1425
1426         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1427
1428         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1429
1430         TPL_INFO("[SET_POST_INTERVAL]",
1431                          "wl_vk_surface(%p) post_interval(%d -> %d)",
1432                          wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1433
1434         wl_vk_surface->post_interval = post_interval;
1435
1436         return TPL_ERROR_NONE;
1437 }
1438
1439 static tpl_bool_t
1440 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1441 {
1442         TPL_ASSERT(surface);
1443         TPL_ASSERT(surface->backend.data);
1444
1445         tpl_wl_vk_surface_t *wl_vk_surface =
1446                 (tpl_wl_vk_surface_t *)surface->backend.data;
1447
1448         return !(wl_vk_surface->reset);
1449 }
1450
1451 static void
1452 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1453                                                           void *data)
1454 {
1455         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1456         tpl_wl_vk_display_t *wl_vk_display = NULL;
1457         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1458         tpl_surface_t *surface             = NULL;
1459         tpl_bool_t is_activated            = TPL_FALSE;
1460         int width, height;
1461
1462         wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1463         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1464
1465         wl_vk_display = wl_vk_surface->wl_vk_display;
1466         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1467
1468         surface = wl_vk_surface->tpl_surface;
1469         TPL_CHECK_ON_NULL_RETURN(surface);
1470
1471         swapchain = wl_vk_surface->swapchain;
1472         TPL_CHECK_ON_NULL_RETURN(swapchain);
1473
1474         /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1475          * the changed window size at the next frame. */
1476         width = tbm_surface_queue_get_width(tbm_queue);
1477         height = tbm_surface_queue_get_height(tbm_queue);
1478         if (surface->width != width || surface->height != height) {
1479                 TPL_INFO("[QUEUE_RESIZE]",
1480                                  "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1481                                  wl_vk_surface, tbm_queue,
1482                                  surface->width, surface->height, width, height);
1483         }
1484
1485         /* When queue_reset_callback is called, if is_activated is different from
1486          * its previous state change the reset flag to TPL_TRUE to get a new buffer
1487          * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1488         is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1489                                                                                                                    swapchain->tbm_queue);
1490         if (wl_vk_surface->is_activated != is_activated) {
1491                 if (is_activated) {
1492                         TPL_INFO("[ACTIVATED]",
1493                                           "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1494                                           wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1495                 } else {
1496                         TPL_LOG_T("[DEACTIVATED]",
1497                                           " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1498                                           wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1499                 }
1500         }
1501
1502         wl_vk_surface->reset = TPL_TRUE;
1503
1504         if (surface->reset_cb)
1505                 surface->reset_cb(surface->reset_data);
1506 }
1507
1508 static void
1509 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1510                                                                    void *data)
1511 {
1512         TPL_IGNORE(tbm_queue);
1513
1514         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1515         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1516
1517         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1518         if (wl_vk_surface->sent_message == NONE_MESSAGE) {
1519                 wl_vk_surface->sent_message = ACQUIRABLE;
1520                 tpl_gsource_send_message(wl_vk_surface->surf_source,
1521                                                                  wl_vk_surface->sent_message);
1522         }
1523         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1524 }
1525
1526 static tpl_result_t
1527 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1528 {
1529         TPL_ASSERT (wl_vk_surface);
1530
1531         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1532         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1533         tbm_surface_queue_h tbm_queue      = NULL;
1534         tbm_bufmgr bufmgr = NULL;
1535         unsigned int capability;
1536
1537         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1538         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1539
1540         if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1541                 TPL_ERR("buffer count(%d) must be higher than (%d)",
1542                                 swapchain->properties.buffer_count,
1543                                 wl_vk_display->min_buffer);
1544                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1545                 return TPL_ERROR_INVALID_PARAMETER;
1546         }
1547
1548         if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1549                 TPL_ERR("buffer count(%d) must be lower than (%d)",
1550                                 swapchain->properties.buffer_count,
1551                                 wl_vk_display->max_buffer);
1552                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1553                 return TPL_ERROR_INVALID_PARAMETER;
1554         }
1555
1556         if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1557                 TPL_ERR("Unsupported present_mode(%d)",
1558                                 swapchain->properties.present_mode);
1559                 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1560                 return TPL_ERROR_INVALID_PARAMETER;
1561         }
1562
1563         if (swapchain->tbm_queue) {
1564                 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1565                 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1566
1567                 if (swapchain->swapchain_buffers) {
1568                         int i;
1569                         for (i = 0; i < swapchain->properties.buffer_count; i++) {
1570                                 if (swapchain->swapchain_buffers[i]) {
1571                                         TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]);
1572                                         tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1573                                         swapchain->swapchain_buffers[i] = NULL;
1574                                 }
1575                         }
1576
1577                         free(swapchain->swapchain_buffers);
1578                         swapchain->swapchain_buffers = NULL;
1579                 }
1580
1581                 if (old_width != swapchain->properties.width ||
1582                         old_height != swapchain->properties.height) {
1583                         tbm_surface_queue_reset(swapchain->tbm_queue,
1584                                                                         swapchain->properties.width,
1585                                                                         swapchain->properties.height,
1586                                                                         TBM_FORMAT_ARGB8888);
1587                         TPL_INFO("[RESIZE]",
1588                                          "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1589                                          wl_vk_surface, swapchain, swapchain->tbm_queue,
1590                                          old_width, old_height,
1591                                          swapchain->properties.width,
1592                                          swapchain->properties.height);
1593                 }
1594
1595                 swapchain->properties.buffer_count =
1596                         tbm_surface_queue_get_size(swapchain->tbm_queue);
1597
1598                 wl_vk_surface->reset = TPL_FALSE;
1599
1600                 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1601                 swapchain->create_done = TPL_TRUE;
1602
1603                 TPL_INFO("[SWAPCHAIN_REUSE]",
1604                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1605                                  wl_vk_surface, swapchain, swapchain->tbm_queue,
1606                                  swapchain->properties.buffer_count);
1607
1608                 return TPL_ERROR_NONE;
1609         }
1610
1611         bufmgr = tbm_bufmgr_init(-1);
1612         capability = tbm_bufmgr_get_capability(bufmgr);
1613         tbm_bufmgr_deinit(bufmgr);
1614
1615         if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1616                 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1617                                                                         wl_vk_display->wl_tbm_client,
1618                                                                         wl_vk_surface->wl_surface,
1619                                                                         swapchain->properties.buffer_count,
1620                                                                         swapchain->properties.width,
1621                                                                         swapchain->properties.height,
1622                                                                         TBM_FORMAT_ARGB8888);
1623         } else {
1624                 tbm_queue = wayland_tbm_client_create_surface_queue(
1625                                                                         wl_vk_display->wl_tbm_client,
1626                                                                         wl_vk_surface->wl_surface,
1627                                                                         swapchain->properties.buffer_count,
1628                                                                         swapchain->properties.width,
1629                                                                         swapchain->properties.height,
1630                                                                         TBM_FORMAT_ARGB8888);
1631         }
1632
1633         if (!tbm_queue) {
1634                 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1635                                 wl_vk_surface);
1636                 swapchain->result = TPL_ERROR_OUT_OF_MEMORY;
1637                 return TPL_ERROR_OUT_OF_MEMORY;
1638         }
1639
1640         if (tbm_surface_queue_set_modes(
1641                         tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1642                         TBM_SURFACE_QUEUE_ERROR_NONE) {
1643                 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1644                                 tbm_queue);
1645                 tbm_surface_queue_destroy(tbm_queue);
1646                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1647                 return TPL_ERROR_INVALID_OPERATION;
1648         }
1649
1650         if (tbm_surface_queue_add_reset_cb(
1651                         tbm_queue,
1652                         __cb_tbm_queue_reset_callback,
1653                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1654                 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1655                                 tbm_queue);
1656                 tbm_surface_queue_destroy(tbm_queue);
1657                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1658                 return TPL_ERROR_INVALID_OPERATION;
1659         }
1660
1661         if (tbm_surface_queue_add_acquirable_cb(
1662                         tbm_queue,
1663                         __cb_tbm_queue_acquirable_callback,
1664                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1665                 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1666                                 tbm_queue);
1667                 tbm_surface_queue_destroy(tbm_queue);
1668                 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1669                 return TPL_ERROR_INVALID_OPERATION;
1670         }
1671
1672         swapchain->tbm_queue = tbm_queue;
1673         swapchain->create_done = TPL_TRUE;
1674
1675         TPL_INFO("[TBM_QUEUE_CREATED]",
1676                          "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)",
1677                          wl_vk_surface, swapchain, tbm_queue);
1678
1679         return TPL_ERROR_NONE;
1680 }
1681
1682 static tpl_result_t
1683 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1684                 tbm_format format, int width,
1685                 int height, int buffer_count, int present_mode)
1686 {
1687         tpl_wl_vk_surface_t *wl_vk_surface              = NULL;
1688         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1689         tpl_wl_vk_swapchain_t *swapchain  = NULL;
1690
1691         TPL_ASSERT(surface);
1692         TPL_ASSERT(surface->display);
1693
1694         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1695         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1696
1697         wl_vk_display = (tpl_wl_vk_display_t *)
1698                                                          surface->display->backend.data;
1699         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1700
1701         swapchain = wl_vk_surface->swapchain;
1702
1703         if (swapchain == NULL) {
1704                 swapchain =
1705                         (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1706                         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1707                 swapchain->tbm_queue           = NULL;
1708         }
1709
1710         swapchain->properties.buffer_count = buffer_count;
1711         swapchain->properties.width        = width;
1712         swapchain->properties.height       = height;
1713         swapchain->properties.present_mode = present_mode;
1714         swapchain->wl_vk_surface           = wl_vk_surface;
1715         swapchain->properties.format       = format;
1716
1717         swapchain->result                  = TPL_ERROR_NONE;
1718         swapchain->create_done             = TPL_FALSE;
1719
1720         wl_vk_surface->swapchain           = swapchain;
1721
1722         __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1723
1724         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1725         /* send swapchain create tbm_queue message */
1726         wl_vk_surface->sent_message = CREATE_QUEUE;
1727         tpl_gsource_send_message(wl_vk_surface->surf_source,
1728                                                          wl_vk_surface->sent_message);
1729         while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE)
1730                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1731         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1732
1733         TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1734                 swapchain->tbm_queue != NULL,
1735                 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1736
1737         wl_vk_surface->reset = TPL_FALSE;
1738
1739         return TPL_ERROR_NONE;
1740 }
1741
1742 static void
1743 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1744 {
1745         TPL_ASSERT(wl_vk_surface);
1746
1747         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1748
1749         TPL_CHECK_ON_NULL_RETURN(swapchain);
1750
1751         if (swapchain->tbm_queue) {
1752                 TPL_INFO("[TBM_QUEUE_DESTROY]",
1753                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1754                                  wl_vk_surface, swapchain, swapchain->tbm_queue);
1755                 tbm_surface_queue_destroy(swapchain->tbm_queue);
1756                 swapchain->tbm_queue = NULL;
1757         }
1758 }
1759
1760 static tpl_result_t
1761 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1762 {
1763         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1764         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1765         tpl_wl_vk_display_t *wl_vk_display = NULL;
1766
1767         TPL_ASSERT(surface);
1768         TPL_ASSERT(surface->display);
1769
1770         wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1771         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1772
1773         wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1774         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1775
1776         swapchain = wl_vk_surface->swapchain;
1777         if (!swapchain) {
1778                 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1779                                 wl_vk_surface);
1780                 return TPL_ERROR_INVALID_OPERATION;
1781         }
1782
1783         if (!swapchain->tbm_queue) {
1784                 TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.",
1785                                 wl_vk_surface, wl_vk_surface->swapchain);
1786                 return TPL_ERROR_INVALID_OPERATION;
1787         }
1788
1789         if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1790                 TPL_INFO("[DESTROY_SWAPCHAIN]",
1791                                  "wl_vk_surface(%p) swapchain(%p) still valid.",
1792                                  wl_vk_surface, swapchain);
1793                 return TPL_ERROR_NONE;
1794         }
1795
1796         TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1797                          "wl_vk_surface(%p) swapchain(%p)",
1798                          wl_vk_surface, wl_vk_surface->swapchain);
1799
1800         if (swapchain->swapchain_buffers) {
1801                 for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1802                         if (swapchain->swapchain_buffers[i]) {
1803                                 TPL_DEBUG("Stop tracking tbm_surface(%p)",
1804                                                   swapchain->swapchain_buffers[i]);
1805                                 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1806                                 swapchain->swapchain_buffers[i] = NULL;
1807                         }
1808                 }
1809
1810                 free(swapchain->swapchain_buffers);
1811                 swapchain->swapchain_buffers = NULL;
1812         }
1813
1814         _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1815
1816         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1817         wl_vk_surface->sent_message = DESTROY_QUEUE;
1818         tpl_gsource_send_message(wl_vk_surface->surf_source,
1819                                                          wl_vk_surface->sent_message);
1820         while (swapchain->tbm_queue)
1821                 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1822         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1823
1824         _print_buffer_lists(wl_vk_surface);
1825
1826         free(swapchain);
1827         wl_vk_surface->swapchain = NULL;
1828
1829         return TPL_ERROR_NONE;
1830 }
1831
1832 static tpl_result_t
1833 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1834                                                                                           tbm_surface_h **buffers,
1835                                                                                           int *buffer_count)
1836 {
1837         TPL_ASSERT(surface);
1838         TPL_ASSERT(surface->backend.data);
1839         TPL_ASSERT(surface->display);
1840         TPL_ASSERT(surface->display->backend.data);
1841
1842         tpl_wl_vk_surface_t *wl_vk_surface =
1843                 (tpl_wl_vk_surface_t *)surface->backend.data;
1844         tpl_wl_vk_display_t *wl_vk_display =
1845                 (tpl_wl_vk_display_t *)surface->display->backend.data;
1846         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1847         tpl_result_t ret                   = TPL_ERROR_NONE;
1848         int i;
1849
1850         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1851         TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1852
1853         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1854
1855         if (!buffers) {
1856                 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1857                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1858                 return TPL_ERROR_NONE;
1859         }
1860
1861         swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1862                                                                                 *buffer_count,
1863                                                                                 sizeof(tbm_surface_h));
1864         if (!swapchain->swapchain_buffers) {
1865                 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1866                                 *buffer_count);
1867                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1868                 return TPL_ERROR_OUT_OF_MEMORY;
1869         }
1870
1871         ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1872                                                                                                 swapchain->tbm_queue,
1873                                                                                                 swapchain->swapchain_buffers,
1874                                                                                                 buffer_count);
1875         if (!ret) {
1876                 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1877                                 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1878                 free(swapchain->swapchain_buffers);
1879                 swapchain->swapchain_buffers = NULL;
1880                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1881                 return TPL_ERROR_INVALID_OPERATION;
1882         }
1883
1884         for (i = 0; i < *buffer_count; i++) {
1885                 if (swapchain->swapchain_buffers[i]) {
1886                         TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)",
1887                                           i, swapchain->swapchain_buffers[i],
1888                                           _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1889                         tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1890                 }
1891         }
1892
1893         *buffers = swapchain->swapchain_buffers;
1894
1895         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1896
1897         return TPL_ERROR_NONE;
1898 }
1899
1900 static void
1901 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1902 {
1903         tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1904         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1905
1906         TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1907                          wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1908
1909         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1910         if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1911                 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1912                 wl_vk_surface->buffer_cnt--;
1913
1914                 wl_vk_buffer->idx = -1;
1915         }
1916         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1917
1918         wl_display_flush(wl_vk_display->wl_display);
1919
1920         if (wl_vk_buffer->wl_buffer) {
1921                 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1922                                                                                   wl_vk_buffer->wl_buffer);
1923                 wl_vk_buffer->wl_buffer = NULL;
1924         }
1925
1926 #if TIZEN_FEATURE_ENABLE
1927         if (wl_vk_buffer->buffer_release) {
1928                 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1929                 wl_vk_buffer->buffer_release = NULL;
1930         }
1931 #endif
1932
1933         if (wl_vk_buffer->release_fence_fd != -1) {
1934                 close(wl_vk_buffer->release_fence_fd);
1935                 wl_vk_buffer->release_fence_fd = -1;
1936         }
1937
1938         if (wl_vk_buffer->rects) {
1939                 free(wl_vk_buffer->rects);
1940                 wl_vk_buffer->rects = NULL;
1941                 wl_vk_buffer->num_rects = 0;
1942         }
1943
1944         wl_vk_buffer->tbm_surface = NULL;
1945         wl_vk_buffer->bo_name = -1;
1946
1947         free(wl_vk_buffer);
1948 }
1949
1950 static tpl_wl_vk_buffer_t *
1951 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1952 {
1953         tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1954         tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1955                                                                            (void **)&wl_vk_buffer);
1956         return wl_vk_buffer;
1957 }
1958
1959 static tpl_wl_vk_buffer_t *
1960 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
1961                                           tbm_surface_h tbm_surface)
1962 {
1963         tpl_wl_vk_buffer_t  *wl_vk_buffer  = NULL;
1964
1965         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
1966
1967         if (!wl_vk_buffer) {
1968                 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
1969                 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
1970
1971                 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1972                                                                                    (tbm_data_free)__cb_wl_vk_buffer_free);
1973                 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1974                                                                                    wl_vk_buffer);
1975
1976                 wl_vk_buffer->wl_buffer                = NULL;
1977                 wl_vk_buffer->tbm_surface              = tbm_surface;
1978                 wl_vk_buffer->bo_name                  = _get_tbm_surface_bo_name(tbm_surface);
1979                 wl_vk_buffer->wl_vk_surface            = wl_vk_surface;
1980
1981                 wl_vk_buffer->status                   = RELEASED;
1982
1983                 wl_vk_buffer->acquire_fence_fd         = -1;
1984                 wl_vk_buffer->release_fence_fd         = -1;
1985
1986                 wl_vk_buffer->dx                       = 0;
1987                 wl_vk_buffer->dy                       = 0;
1988                 wl_vk_buffer->width                    = tbm_surface_get_width(tbm_surface);
1989                 wl_vk_buffer->height                   = tbm_surface_get_height(tbm_surface);
1990
1991                 wl_vk_buffer->rects                    = NULL;
1992                 wl_vk_buffer->num_rects                = 0;
1993
1994                 wl_vk_buffer->need_to_commit = TPL_FALSE;
1995 #if TIZEN_FEATURE_ENABLE
1996                 wl_vk_buffer->buffer_release = NULL;
1997 #endif
1998                 tpl_gmutex_init(&wl_vk_buffer->mutex);
1999                 tpl_gcond_init(&wl_vk_buffer->cond);
2000
2001                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2002                 {
2003                         int i;
2004                         for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2005                                 if (wl_vk_surface->buffers[i] == NULL) break;
2006
2007                         /* If this exception is reached,
2008                          * it may be a critical memory leak problem. */
2009                         if (i == BUFFER_ARRAY_SIZE) {
2010                                 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
2011                                 int evicted_idx = 0; /* evict the frontmost buffer */
2012
2013                                 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
2014
2015                                 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
2016                                                  wl_vk_surface);
2017                                 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2018                                                  evicted_buffer, evicted_buffer->tbm_surface,
2019                                                  status_to_string[evicted_buffer->status]);
2020
2021                                 /* [TODO] need to think about whether there will be
2022                                  * better modifications */
2023                                 wl_vk_surface->buffer_cnt--;
2024                                 wl_vk_surface->buffers[evicted_idx]      = NULL;
2025
2026                                 i = evicted_idx;
2027                         }
2028
2029                         wl_vk_surface->buffer_cnt++;
2030                         wl_vk_surface->buffers[i]          = wl_vk_buffer;
2031                         wl_vk_buffer->idx                  = i;
2032                 }
2033                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2034
2035                 TPL_INFO("[WL_VK_BUFFER_CREATE]",
2036                                  "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2037                                  wl_vk_surface, wl_vk_buffer, tbm_surface,
2038                                  wl_vk_buffer->bo_name);
2039         }
2040
2041         return wl_vk_buffer;
2042 }
2043
2044 static tbm_surface_h
2045 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
2046                                                                    uint64_t timeout_ns,
2047                                                                    int32_t *release_fence)
2048 {
2049         TPL_ASSERT(surface);
2050         TPL_ASSERT(surface->backend.data);
2051         TPL_ASSERT(surface->display);
2052         TPL_ASSERT(surface->display->backend.data);
2053         TPL_OBJECT_CHECK_RETURN(surface, NULL);
2054
2055         tpl_wl_vk_surface_t *wl_vk_surface =
2056                 (tpl_wl_vk_surface_t *)surface->backend.data;
2057         tpl_wl_vk_display_t *wl_vk_display =
2058                 (tpl_wl_vk_display_t *)surface->display->backend.data;
2059         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
2060         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
2061
2062         tbm_surface_h tbm_surface          = NULL;
2063         tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_NONE;
2064
2065         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
2066         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
2067
2068         TPL_OBJECT_UNLOCK(surface);
2069         TRACE_BEGIN("WAIT_DEQUEUEABLE");
2070         if (timeout_ns != UINT64_MAX) {
2071                 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2072                                                 swapchain->tbm_queue, timeout_ns/1000);
2073         } else {
2074                 tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
2075         }
2076         TRACE_END();
2077         TPL_OBJECT_LOCK(surface);
2078
2079         if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2080                 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
2081                                 timeout_ns);
2082                 return NULL;
2083         } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2084                 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
2085                                 wl_vk_surface, swapchain->tbm_queue, tsq_err);
2086                 return NULL;
2087         }
2088
2089         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
2090
2091         if (wl_vk_surface->reset) {
2092                 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
2093                                   swapchain, swapchain->tbm_queue);
2094                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2095                 return NULL;
2096         }
2097
2098         tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
2099                                                                                 &tbm_surface);
2100         if (!tbm_surface) {
2101                 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
2102                                 swapchain->tbm_queue, wl_vk_surface, tsq_err);
2103                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2104                 return NULL;
2105         }
2106
2107         tbm_surface_internal_ref(tbm_surface);
2108
2109         wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
2110         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
2111
2112         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2113         wl_vk_buffer->status = DEQUEUED;
2114
2115         if (release_fence) {
2116 #if TIZEN_FEATURE_ENABLE
2117                 if (wl_vk_surface->surface_sync) {
2118                         *release_fence = wl_vk_buffer->release_fence_fd;
2119                         TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
2120                                           wl_vk_surface, wl_vk_buffer, *release_fence);
2121                         wl_vk_buffer->release_fence_fd = -1;
2122                 } else
2123 #endif
2124                 {
2125                         *release_fence = -1;
2126                 }
2127         }
2128
2129         wl_vk_surface->reset = TPL_FALSE;
2130
2131         TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2132                           wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
2133                           release_fence ? *release_fence : -1);
2134
2135         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2136         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2137
2138         return tbm_surface;
2139 }
2140
2141 static tpl_result_t
2142 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
2143                                                                           tbm_surface_h tbm_surface)
2144 {
2145         TPL_ASSERT(surface);
2146         TPL_ASSERT(surface->backend.data);
2147
2148         tpl_wl_vk_surface_t *wl_vk_surface  =
2149                 (tpl_wl_vk_surface_t *)surface->backend.data;
2150         tpl_wl_vk_swapchain_t *swapchain    = NULL;
2151         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2152         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2153
2154         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2155                                                                   TPL_ERROR_INVALID_PARAMETER);
2156
2157         swapchain = wl_vk_surface->swapchain;
2158         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2159         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2160                                                                  TPL_ERROR_INVALID_PARAMETER);
2161
2162         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2163         if (wl_vk_buffer) {
2164                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2165                 wl_vk_buffer->status = RELEASED;
2166                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2167         }
2168
2169         tbm_surface_internal_unref(tbm_surface);
2170
2171         TPL_INFO("[CANCEL BUFFER]",
2172                          "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2173                           wl_vk_surface, swapchain, tbm_surface,
2174                           _get_tbm_surface_bo_name(tbm_surface));
2175
2176         tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2177                                                                                            tbm_surface);
2178         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2179                 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2180                 return TPL_ERROR_INVALID_OPERATION;
2181         }
2182
2183         return TPL_ERROR_NONE;
2184 }
2185
2186 static tpl_result_t
2187 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2188                                                                            tbm_surface_h tbm_surface,
2189                                                                            int num_rects, const int *rects,
2190                                                                            int32_t acquire_fence)
2191 {
2192         TPL_ASSERT(surface);
2193         TPL_ASSERT(surface->display);
2194         TPL_ASSERT(surface->backend.data);
2195         TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2196
2197         tpl_wl_vk_surface_t *wl_vk_surface  =
2198                 (tpl_wl_vk_surface_t *) surface->backend.data;
2199         tpl_wl_vk_swapchain_t *swapchain    = wl_vk_surface->swapchain;
2200         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2201         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2202         int bo_name                         = -1;
2203
2204         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2205         TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2206         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2207                                                                   TPL_ERROR_INVALID_PARAMETER);
2208
2209         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2210         if (!wl_vk_buffer) {
2211                 TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
2212                 return TPL_ERROR_INVALID_PARAMETER;
2213         }
2214
2215         bo_name = wl_vk_buffer->bo_name;
2216
2217         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2218
2219         /* If there are received region information, save it to wl_vk_buffer */
2220         if (num_rects && rects) {
2221                 if (wl_vk_buffer->rects != NULL) {
2222                         free(wl_vk_buffer->rects);
2223                         wl_vk_buffer->rects = NULL;
2224                         wl_vk_buffer->num_rects = 0;
2225                 }
2226
2227                 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2228                 wl_vk_buffer->num_rects = num_rects;
2229
2230                 if (wl_vk_buffer->rects) {
2231                         memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2232                                    sizeof(int) * 4 * num_rects);
2233                 } else {
2234                         TPL_ERR("Failed to allocate memory for rects info.");
2235                 }
2236         }
2237
2238         if (wl_vk_buffer->acquire_fence_fd != -1)
2239                 close(wl_vk_buffer->acquire_fence_fd);
2240
2241         wl_vk_buffer->acquire_fence_fd = acquire_fence;
2242
2243         wl_vk_buffer->status = ENQUEUED;
2244         TPL_LOG_T("WL_VK",
2245                           "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2246                           wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2247
2248         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2249
2250         tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2251                                                                                 tbm_surface);
2252         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2253                 tbm_surface_internal_unref(tbm_surface);
2254                 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2255                                 tbm_surface, wl_vk_surface, tsq_err);
2256                 return TPL_ERROR_INVALID_OPERATION;
2257         }
2258
2259         tbm_surface_internal_unref(tbm_surface);
2260
2261         return TPL_ERROR_NONE;
2262 }
2263
2264 static const struct wl_buffer_listener wl_buffer_release_listener = {
2265         (void *)__cb_wl_buffer_release,
2266 };
2267
2268 static tpl_result_t
2269 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2270 {
2271         tbm_surface_h tbm_surface            = NULL;
2272         tbm_surface_queue_error_e tsq_err    = TBM_SURFACE_QUEUE_ERROR_NONE;
2273         tpl_wl_vk_display_t *wl_vk_display   = wl_vk_surface->wl_vk_display;
2274         tpl_wl_vk_swapchain_t *swapchain     = wl_vk_surface->swapchain;
2275         tpl_wl_vk_buffer_t *wl_vk_buffer     = NULL;
2276         tpl_bool_t ready_to_commit           = TPL_TRUE;
2277
2278         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2279
2280         while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2281                 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2282                                                                                         &tbm_surface);
2283                 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2284                         TPL_ERR("Failed to acquire from tbm_queue(%p)",
2285                                         swapchain->tbm_queue);
2286                         return TPL_ERROR_INVALID_OPERATION;
2287                 }
2288
2289                 tbm_surface_internal_ref(tbm_surface);
2290
2291                 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2292                 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2293                                                                            "wl_vk_buffer sould be not NULL");
2294
2295                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2296
2297                 wl_vk_buffer->status = ACQUIRED;
2298
2299                 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2300                                   wl_vk_buffer, tbm_surface,
2301                                   _get_tbm_surface_bo_name(tbm_surface));
2302
2303                 if (wl_vk_buffer->wl_buffer == NULL) {
2304                         wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2305                                                 wl_vk_display->wl_tbm_client, tbm_surface);
2306
2307                         if (!wl_vk_buffer->wl_buffer) {
2308                                 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2309                                                  wl_vk_display->wl_tbm_client, tbm_surface);
2310                         } else {
2311                                 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2312                                         wl_vk_display->use_explicit_sync == TPL_FALSE) {
2313                                         wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2314                                                                                    &wl_buffer_release_listener, wl_vk_buffer);
2315                                 }
2316
2317                                 TPL_LOG_T("WL_VK",
2318                                                   "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2319                                                   wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2320                         }
2321                 }
2322
2323                 if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done)
2324                         ready_to_commit = TPL_TRUE;
2325                 else {
2326                         wl_vk_buffer->status = WAITING_VBLANK;
2327                         __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2328                         ready_to_commit = TPL_FALSE;
2329                 }
2330
2331                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2332
2333                 if (ready_to_commit)
2334                         _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2335         }
2336
2337         return TPL_ERROR_NONE;
2338 }
2339
2340 #if TIZEN_FEATURE_ENABLE
2341 static void
2342 __cb_buffer_fenced_release(void *data,
2343                                                    struct zwp_linux_buffer_release_v1 *release,
2344                                                    int32_t fence)
2345 {
2346         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2347         tbm_surface_h tbm_surface         = NULL;
2348
2349         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2350
2351         tbm_surface = wl_vk_buffer->tbm_surface;
2352
2353         if (tbm_surface_internal_is_valid(tbm_surface)) {
2354                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2355                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2356
2357                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2358                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2359                         tbm_surface_internal_unref(tbm_surface);
2360                         return;
2361                 }
2362
2363                 swapchain = wl_vk_surface->swapchain;
2364
2365                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2366                 if (wl_vk_buffer->status == COMMITTED) {
2367                         tbm_surface_queue_error_e tsq_err;
2368
2369                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2370                         wl_vk_buffer->buffer_release = NULL;
2371
2372                         wl_vk_buffer->release_fence_fd = fence;
2373                         wl_vk_buffer->status = RELEASED;
2374
2375                         TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2376                                            wl_vk_buffer->bo_name,
2377                                            fence);
2378                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2379                                                         wl_vk_buffer->bo_name);
2380
2381                         TPL_LOG_T("WL_VK",
2382                                           "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2383                                           wl_vk_buffer, tbm_surface,
2384                                           wl_vk_buffer->bo_name,
2385                                           fence);
2386
2387                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2388                                                                                                 tbm_surface);
2389                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2390                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2391
2392                         tbm_surface_internal_unref(tbm_surface);
2393                 }
2394
2395                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2396
2397         } else {
2398                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2399         }
2400 }
2401
2402 static void
2403 __cb_buffer_immediate_release(void *data,
2404                                                           struct zwp_linux_buffer_release_v1 *release)
2405 {
2406         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2407         tbm_surface_h tbm_surface           = NULL;
2408
2409         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2410
2411         tbm_surface = wl_vk_buffer->tbm_surface;
2412
2413         if (tbm_surface_internal_is_valid(tbm_surface)) {
2414                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2415                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2416
2417                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2418                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2419                         tbm_surface_internal_unref(tbm_surface);
2420                         return;
2421                 }
2422
2423                 swapchain = wl_vk_surface->swapchain;
2424
2425                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2426                 if (wl_vk_buffer->status == COMMITTED) {
2427                         tbm_surface_queue_error_e tsq_err;
2428
2429                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2430                         wl_vk_buffer->buffer_release = NULL;
2431
2432                         wl_vk_buffer->release_fence_fd = -1;
2433                         wl_vk_buffer->status = RELEASED;
2434
2435                         TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2436                                            _get_tbm_surface_bo_name(tbm_surface));
2437                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2438                                                         _get_tbm_surface_bo_name(tbm_surface));
2439
2440                         TPL_LOG_T("WL_VK",
2441                                           "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2442                                           wl_vk_buffer, tbm_surface,
2443                                           _get_tbm_surface_bo_name(tbm_surface));
2444
2445                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2446                                                                                                 tbm_surface);
2447                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2448                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2449
2450                         tbm_surface_internal_unref(tbm_surface);
2451                 }
2452
2453                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2454
2455         } else {
2456                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2457         }
2458 }
2459
2460 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2461         __cb_buffer_fenced_release,
2462         __cb_buffer_immediate_release,
2463 };
2464 #endif
2465
2466 static void
2467 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2468 {
2469         tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2470         tbm_surface_h tbm_surface = NULL;
2471
2472         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2473
2474         tbm_surface = wl_vk_buffer->tbm_surface;
2475
2476         if (tbm_surface_internal_is_valid(tbm_surface)) {
2477                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2478                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2479                 tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2480
2481                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2482                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2483                         tbm_surface_internal_unref(tbm_surface);
2484                         return;
2485                 }
2486
2487                 swapchain = wl_vk_surface->swapchain;
2488
2489                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2490
2491                 if (wl_vk_buffer->status == COMMITTED) {
2492
2493                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2494                                                                                                 tbm_surface);
2495                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2496                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2497
2498                         wl_vk_buffer->status = RELEASED;
2499
2500                         TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2501                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2502                                                         wl_vk_buffer->bo_name);
2503
2504                         TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2505                                           wl_vk_buffer->wl_buffer, tbm_surface,
2506                                           wl_vk_buffer->bo_name);
2507
2508                         tbm_surface_internal_unref(tbm_surface);
2509                 }
2510
2511                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2512         } else {
2513                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2514         }
2515 }
2516
2517 static void
2518 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2519                                            unsigned int sequence, unsigned int tv_sec,
2520                                            unsigned int tv_usec, void *user_data)
2521 {
2522         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2523         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
2524
2525         TRACE_ASYNC_END((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2526         TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface);
2527
2528         if (error == TDM_ERROR_TIMEOUT)
2529                 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2530                                  wl_vk_surface);
2531
2532         wl_vk_surface->vblank_done = TPL_TRUE;
2533
2534         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2535         wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front(
2536                                                 wl_vk_surface->vblank_waiting_buffers,
2537                                                 NULL);
2538         if (wl_vk_buffer)
2539                 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2540         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2541 }
2542
2543 static tpl_result_t
2544 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2545 {
2546         tdm_error tdm_err                     = TDM_ERROR_NONE;
2547         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2548
2549         if (wl_vk_surface->vblank == NULL) {
2550                 wl_vk_surface->vblank =
2551                         _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client);
2552                 if (!wl_vk_surface->vblank) {
2553                         TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2554                                          wl_vk_surface);
2555                         return TPL_ERROR_OUT_OF_MEMORY;
2556                 }
2557         }
2558
2559         tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2560                         wl_vk_surface->post_interval,
2561                         __cb_tdm_client_vblank,
2562                         (void *)wl_vk_surface);
2563
2564         if (tdm_err == TDM_ERROR_NONE) {
2565                 wl_vk_surface->vblank_done = TPL_FALSE;
2566                 TRACE_ASYNC_BEGIN((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2567         } else {
2568                 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2569                 return TPL_ERROR_INVALID_OPERATION;
2570         }
2571
2572         return TPL_ERROR_NONE;
2573 }
2574
2575 static void
2576 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2577                                                   tpl_wl_vk_buffer_t *wl_vk_buffer)
2578 {
2579         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2580         struct wl_surface *wl_surface         = wl_vk_surface->wl_surface;
2581         uint32_t version;
2582
2583         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2584                                                                    "wl_vk_buffer sould be not NULL");
2585
2586         if (wl_vk_buffer->wl_buffer == NULL) {
2587                 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2588                                                                                         wl_vk_display->wl_tbm_client,
2589                                                                                         wl_vk_buffer->tbm_surface);
2590                 if (wl_vk_buffer->wl_buffer &&
2591                         (wl_vk_buffer->acquire_fence_fd == -1 ||
2592                          wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2593                                 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2594                                                                            &wl_buffer_release_listener, wl_vk_buffer);
2595                 }
2596         }
2597         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2598                                                                    "[FATAL] Failed to create wl_buffer");
2599
2600         version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2601
2602         wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2603                                           wl_vk_buffer->dx, wl_vk_buffer->dy);
2604
2605         if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2606                 if (version < 4) {
2607                         wl_surface_damage(wl_surface,
2608                                                           wl_vk_buffer->dx, wl_vk_buffer->dy,
2609                                                           wl_vk_buffer->width, wl_vk_buffer->height);
2610                 } else {
2611                         wl_surface_damage_buffer(wl_surface,
2612                                                                          0, 0,
2613                                                                          wl_vk_buffer->width, wl_vk_buffer->height);
2614                 }
2615         } else {
2616                 int i;
2617                 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2618                         int inverted_y =
2619                                 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2620                                                 wl_vk_buffer->rects[i * 4 + 3]);
2621                         if (version < 4) {
2622                                 wl_surface_damage(wl_surface,
2623                                                                   wl_vk_buffer->rects[i * 4 + 0],
2624                                                                   inverted_y,
2625                                                                   wl_vk_buffer->rects[i * 4 + 2],
2626                                                                   wl_vk_buffer->rects[i * 4 + 3]);
2627                         } else {
2628                                 wl_surface_damage_buffer(wl_surface,
2629                                                                                  wl_vk_buffer->rects[i * 4 + 0],
2630                                                                                  inverted_y,
2631                                                                                  wl_vk_buffer->rects[i * 4 + 2],
2632                                                                                  wl_vk_buffer->rects[i * 4 + 3]);
2633                         }
2634                 }
2635         }
2636
2637 #if TIZEN_FEATURE_ENABLE
2638         if (wl_vk_display->use_explicit_sync &&
2639                 wl_vk_surface->surface_sync &&
2640                 wl_vk_buffer->acquire_fence_fd != -1) {
2641
2642                 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2643                                                                                                                            wl_vk_buffer->acquire_fence_fd);
2644                 TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2645                                   wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2646                 close(wl_vk_buffer->acquire_fence_fd);
2647                 wl_vk_buffer->acquire_fence_fd = -1;
2648
2649                 wl_vk_buffer->buffer_release =
2650                         zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2651                 if (!wl_vk_buffer->buffer_release) {
2652                         TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2653                 } else {
2654                         zwp_linux_buffer_release_v1_add_listener(
2655                                 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2656                         TPL_DEBUG("add explicit_sync_release_listener.");
2657                 }
2658         }
2659 #endif
2660
2661         wl_surface_commit(wl_surface);
2662
2663         wl_display_flush(wl_vk_display->wl_display);
2664
2665         TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2666                                           wl_vk_buffer->bo_name);
2667
2668         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2669
2670         wl_vk_buffer->need_to_commit   = TPL_FALSE;
2671         wl_vk_buffer->status           = COMMITTED;
2672
2673         tpl_gcond_signal(&wl_vk_buffer->cond);
2674
2675         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2676
2677         TPL_LOG_T("WL_VK",
2678                           "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2679                           wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2680                           wl_vk_buffer->bo_name);
2681
2682         if (wl_vk_display->use_wait_vblank &&
2683                 _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2684                 TPL_ERR("Failed to set wait vblank.");
2685 }
2686
2687 tpl_bool_t
2688 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2689 {
2690         if (!native_dpy) return TPL_FALSE;
2691
2692         if (_check_native_handle_is_wl_display(native_dpy))
2693                 return TPL_TRUE;
2694
2695         return TPL_FALSE;
2696 }
2697
2698 void
2699 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2700 {
2701         TPL_ASSERT(backend);
2702
2703         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2704         backend->data = NULL;
2705
2706         backend->init = __tpl_wl_vk_display_init;
2707         backend->fini = __tpl_wl_vk_display_fini;
2708         backend->query_config = __tpl_wl_vk_display_query_config;
2709         backend->filter_config = __tpl_wl_vk_display_filter_config;
2710         backend->query_window_supported_buffer_count =
2711                 __tpl_wl_vk_display_query_window_supported_buffer_count;
2712         backend->query_window_supported_present_modes =
2713                 __tpl_wl_vk_display_query_window_supported_present_modes;
2714 }
2715
2716 void
2717 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2718 {
2719         TPL_ASSERT(backend);
2720
2721         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2722         backend->data = NULL;
2723
2724         backend->init = __tpl_wl_vk_surface_init;
2725         backend->fini = __tpl_wl_vk_surface_fini;
2726         backend->validate = __tpl_wl_vk_surface_validate;
2727         backend->cancel_dequeued_buffer =
2728                 __tpl_wl_vk_surface_cancel_buffer;
2729         backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2730         backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2731         backend->get_swapchain_buffers =
2732                 __tpl_wl_vk_surface_get_swapchain_buffers;
2733         backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2734         backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2735         backend->set_post_interval =
2736                 __tpl_wl_vk_surface_set_post_interval;
2737 }
2738
2739 static int
2740 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2741 {
2742         return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2743 }
2744
2745 static void
2746 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2747 {
2748         int idx = 0;
2749
2750         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2751         TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2752                          wl_vk_surface, wl_vk_surface->buffer_cnt);
2753         for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2754                 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2755                 if (wl_vk_buffer) {
2756                         TPL_INFO("[INFO]",
2757                                          "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2758                                          idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2759                                          wl_vk_buffer->bo_name,
2760                                          status_to_string[wl_vk_buffer->status]);
2761                 }
2762         }
2763         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2764 }