Fix so that wl_buffer is not added to the listener with null.
[platform/core/uifw/libtpl-egl.git] / src / tpl_wl_vk_thread.c
1 #define inline __inline__
2 #undef inline
3
4 #include "tpl_internal.h"
5
6 #include <string.h>
7 #include <fcntl.h>
8 #include <unistd.h>
9 #include <sys/eventfd.h>
10
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
15
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
19
20 #include <tdm_client.h>
21
22 #include <tizen-surface-client-protocol.h>
23 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
24
25 #include "tpl_utils_gthread.h"
26
27 #define BUFFER_ARRAY_SIZE 10
28 #define VK_CLIENT_QUEUE_SIZE 3
29
30 static int wl_vk_buffer_key;
31 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
32
33 typedef struct _tpl_wl_vk_display       tpl_wl_vk_display_t;
34 typedef struct _tpl_wl_vk_surface       tpl_wl_vk_surface_t;
35 typedef struct _tpl_wl_vk_swapchain     tpl_wl_vk_swapchain_t;
36 typedef struct _tpl_wl_vk_buffer        tpl_wl_vk_buffer_t;
37
38 struct _tpl_wl_vk_display {
39         tpl_gsource                  *disp_source;
40         tpl_gthread                  *thread;
41         tpl_gmutex                    wl_event_mutex;
42
43         struct wl_display            *wl_display;
44         struct wl_event_queue        *ev_queue;
45         struct wayland_tbm_client    *wl_tbm_client;
46         int                           last_error; /* errno of the last wl_display error*/
47
48         tpl_bool_t                    wl_initialized;
49         tpl_bool_t                    tdm_initialized;
50
51         tdm_client                   *tdm_client;
52         tpl_gsource                  *tdm_source;
53         int                           tdm_display_fd;
54
55         tpl_bool_t                    use_wait_vblank;
56         tpl_bool_t                    use_explicit_sync;
57         tpl_bool_t                    prepared;
58
59         /* device surface capabilities */
60         int                           min_buffer;
61         int                           max_buffer;
62         int                           present_modes;
63
64         struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
65 };
66
67 struct _tpl_wl_vk_swapchain {
68         tpl_wl_vk_surface_t          *wl_vk_surface;
69
70         tbm_surface_queue_h           tbm_queue;
71
72         struct {
73                 int                       width;
74                 int                       height;
75                 tbm_format                format;
76                 int                       buffer_count;
77                 int                       present_mode;
78         } properties;
79
80         tbm_surface_h                *swapchain_buffers;
81
82         tpl_util_atomic_uint          ref_cnt;
83 };
84
85 struct _tpl_wl_vk_surface {
86         tpl_gsource                  *surf_source;
87
88         tpl_wl_vk_swapchain_t        *swapchain;
89
90         struct wl_surface            *wl_surface;
91         struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
92
93         tdm_client_vblank            *vblank;
94
95         /* surface information */
96         int                           render_done_cnt;
97
98         tpl_wl_vk_display_t          *wl_vk_display;
99         tpl_surface_t                *tpl_surface;
100
101         /* wl_vk_buffer array for buffer tracing */
102         tpl_wl_vk_buffer_t           *buffers[BUFFER_ARRAY_SIZE];
103         int                           buffer_cnt; /* the number of using wl_vk_buffers */
104         tpl_gmutex                    buffers_mutex;
105
106         tpl_list_t                   *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
107
108         tpl_gmutex                    surf_mutex;
109         tpl_gcond                     surf_cond;
110
111         /* for waiting draw done */
112         tpl_bool_t                    is_activated;
113         tpl_bool_t                    reset; /* TRUE if queue reseted by external  */
114         tpl_bool_t                    vblank_done;
115
116         int                           post_interval;
117 };
118
119 typedef enum buffer_status {
120         RELEASED = 0,             // 0
121         DEQUEUED,                 // 1
122         ENQUEUED,                 // 2
123         ACQUIRED,                 // 3
124         WAITING_SIGNALED,         // 4
125         WAITING_VBLANK,           // 5
126         COMMITTED,                // 6
127 } buffer_status_t;
128
129 static const char *status_to_string[7] = {
130         "RELEASED",                 // 0
131         "DEQUEUED",                 // 1
132         "ENQUEUED",                 // 2
133         "ACQUIRED",                 // 3
134         "WAITING_SIGNALED",         // 4
135         "WAITING_VBLANK",           // 5
136         "COMMITTED",                // 6
137 };
138
139 struct _tpl_wl_vk_buffer {
140         tbm_surface_h                 tbm_surface;
141         int                           bo_name;
142
143         struct wl_buffer             *wl_buffer;
144         int                           dx, dy; /* position to attach to wl_surface */
145         int                           width, height; /* size to attach to wl_surface */
146
147         buffer_status_t               status; /* for tracing buffer status */
148         int                           idx; /* position index in buffers array of wl_vk_surface */
149
150         /* for damage region */
151         int                           num_rects;
152         int                          *rects;
153
154         /* for checking need_to_commit (frontbuffer mode) */
155         tpl_bool_t                    need_to_commit;
156
157         /* to get release event via zwp_linux_buffer_release_v1 */
158         struct zwp_linux_buffer_release_v1 *buffer_release;
159
160         /* each buffers own its release_fence_fd, until it passes ownership
161          * to it to EGL */
162         int32_t                       release_fence_fd;
163
164         /* each buffers own its acquire_fence_fd.
165          * If it use zwp_linux_buffer_release_v1 the ownership of this fd
166          * will be passed to display server
167          * Otherwise it will be used as a fence waiting for render done
168          * on tpl thread */
169         int32_t                       acquire_fence_fd;
170
171         tpl_gmutex                    mutex;
172         tpl_gcond                     cond;
173
174         tpl_wl_vk_surface_t          *wl_vk_surface;
175 };
176
177 static void
178 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
179 static int
180 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
181 static void
182 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
183 static void
184 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
185 static tpl_result_t
186 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
187 static void
188 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
189 static tpl_result_t
190 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
191 static void
192 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
193                                                   tpl_wl_vk_buffer_t *wl_vk_buffer);
194
195 static tpl_bool_t
196 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
197 {
198         struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
199
200         if (!wl_vk_native_dpy) {
201                 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
202                 return TPL_FALSE;
203         }
204
205         /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
206            is a memory address pointing the structure of wl_display_interface. */
207         if (wl_vk_native_dpy == &wl_display_interface)
208                 return TPL_TRUE;
209
210         if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
211                                 strlen(wl_display_interface.name)) == 0) {
212                 return TPL_TRUE;
213         }
214
215         return TPL_FALSE;
216 }
217
218 static tpl_bool_t
219 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
220 {
221         tpl_wl_vk_display_t        *wl_vk_display = NULL;
222         tdm_error                   tdm_err = TDM_ERROR_NONE;
223
224         TPL_IGNORE(message);
225
226         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
227         if (!wl_vk_display) {
228                 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
229                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
230                 return TPL_FALSE;
231         }
232
233         tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client);
234
235         /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
236          * When tdm_source is no longer available due to an unexpected situation,
237          * wl_vk_thread must remove it from the thread and destroy it.
238          * In that case, tdm_vblank can no longer be used for surfaces and displays
239          * that used this tdm_source. */
240         if (tdm_err != TDM_ERROR_NONE) {
241                 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
242                                 tdm_err);
243                 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
244
245                 tpl_gsource_destroy(gsource, TPL_FALSE);
246
247                 wl_vk_display->tdm_source = NULL;
248
249                 return TPL_FALSE;
250         }
251
252         return TPL_TRUE;
253 }
254
255 static void
256 __thread_func_tdm_finalize(tpl_gsource *gsource)
257 {
258         tpl_wl_vk_display_t *wl_vk_display = NULL;
259
260         wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
261
262         TPL_LOG_T("WL_VK",
263                           "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)",
264                           wl_vk_display, wl_vk_display->tdm_client, gsource);
265
266         if (wl_vk_display->tdm_client) {
267                 tdm_client_destroy(wl_vk_display->tdm_client);
268                 wl_vk_display->tdm_client = NULL;
269                 wl_vk_display->tdm_display_fd = -1;
270         }
271
272         wl_vk_display->tdm_initialized = TPL_FALSE;
273 }
274
275 static tpl_gsource_functions tdm_funcs = {
276         .prepare  = NULL,
277         .check    = NULL,
278         .dispatch = __thread_func_tdm_dispatch,
279         .finalize = __thread_func_tdm_finalize,
280 };
281
282 static tpl_result_t
283 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
284 {
285         tdm_client       *tdm_client = NULL;
286         int               tdm_display_fd = -1;
287         tdm_error         tdm_err = TDM_ERROR_NONE;
288
289         tdm_client = tdm_client_create(&tdm_err);
290         if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
291                 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
292                 return TPL_ERROR_INVALID_OPERATION;
293         }
294
295         tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
296         if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
297                 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
298                 tdm_client_destroy(tdm_client);
299                 return TPL_ERROR_INVALID_OPERATION;
300         }
301
302         wl_vk_display->tdm_display_fd  = tdm_display_fd;
303         wl_vk_display->tdm_client      = tdm_client;
304         wl_vk_display->tdm_source      = NULL;
305         wl_vk_display->tdm_initialized = TPL_TRUE;
306
307         TPL_INFO("[TDM_CLIENT_INIT]",
308                          "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
309                          wl_vk_display, tdm_client, tdm_display_fd);
310
311         return TPL_ERROR_NONE;
312 }
313
314 #define IMPL_TIZEN_SURFACE_SHM_VERSION 2
315
316 static void
317 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
318                                                           uint32_t name, const char *interface,
319                                                           uint32_t version)
320 {
321         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
322
323         if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
324                 char *env = tpl_getenv("TPL_EFS");
325                 if (env && !atoi(env)) {
326                         wl_vk_display->use_explicit_sync = TPL_FALSE;
327                 } else {
328                         wl_vk_display->explicit_sync =
329                                         wl_registry_bind(wl_registry, name,
330                                                                          &zwp_linux_explicit_synchronization_v1_interface, 1);
331                         wl_vk_display->use_explicit_sync = TPL_TRUE;
332                         TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
333                 }
334         }
335 }
336
337 static void
338 __cb_wl_resistry_global_remove_callback(void *data,
339                                                                                 struct wl_registry *wl_registry,
340                                                                                 uint32_t name)
341 {
342 }
343
344 static const struct wl_registry_listener registry_listener = {
345         __cb_wl_resistry_global_callback,
346         __cb_wl_resistry_global_remove_callback
347 };
348
349 static void
350 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
351                                           const char *func_name)
352 {
353         int dpy_err;
354         char buf[1024];
355         strerror_r(errno, buf, sizeof(buf));
356
357         if (wl_vk_display->last_error == errno)
358                 return;
359
360         TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
361
362         dpy_err = wl_display_get_error(wl_vk_display->wl_display);
363         if (dpy_err == EPROTO) {
364                 const struct wl_interface *err_interface;
365                 uint32_t err_proxy_id, err_code;
366                 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
367                                                                                                  &err_interface,
368                                                                                                  &err_proxy_id);
369                 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
370                                 err_interface->name, err_code, err_proxy_id);
371         }
372
373         wl_vk_display->last_error = errno;
374 }
375
376 static tpl_result_t
377 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
378 {
379         struct wl_registry *registry                = NULL;
380         struct wl_event_queue *queue                = NULL;
381         struct wl_display *display_wrapper          = NULL;
382         struct wl_proxy *wl_tbm                     = NULL;
383         struct wayland_tbm_client *wl_tbm_client    = NULL;
384         int ret;
385         tpl_result_t result = TPL_ERROR_NONE;
386
387         queue = wl_display_create_queue(wl_vk_display->wl_display);
388         if (!queue) {
389                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
390                                 wl_vk_display->wl_display);
391                 result = TPL_ERROR_INVALID_OPERATION;
392                 goto fini;
393         }
394
395         wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
396         if (!wl_vk_display->ev_queue) {
397                 TPL_ERR("Failed to create wl_queue wl_display(%p)",
398                                 wl_vk_display->wl_display);
399                 result = TPL_ERROR_INVALID_OPERATION;
400                 goto fini;
401         }
402
403         display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
404         if (!display_wrapper) {
405                 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
406                                 wl_vk_display->wl_display);
407                 result = TPL_ERROR_INVALID_OPERATION;
408                 goto fini;
409         }
410
411         wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
412
413         registry = wl_display_get_registry(display_wrapper);
414         if (!registry) {
415                 TPL_ERR("Failed to create wl_registry");
416                 result = TPL_ERROR_INVALID_OPERATION;
417                 goto fini;
418         }
419
420         wl_proxy_wrapper_destroy(display_wrapper);
421         display_wrapper = NULL;
422
423         wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
424         if (!wl_tbm_client) {
425                 TPL_ERR("Failed to initialize wl_tbm_client.");
426                 result = TPL_ERROR_INVALID_CONNECTION;
427                 goto fini;
428         }
429
430         wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
431         if (!wl_tbm) {
432                 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
433                 result = TPL_ERROR_INVALID_CONNECTION;
434                 goto fini;
435         }
436
437         wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
438         wl_vk_display->wl_tbm_client = wl_tbm_client;
439
440         if (wl_registry_add_listener(registry, &registry_listener,
441                                                                  wl_vk_display)) {
442                 TPL_ERR("Failed to wl_registry_add_listener");
443                 result = TPL_ERROR_INVALID_OPERATION;
444                 goto fini;
445         }
446
447         ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
448         if (ret == -1) {
449                 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
450                 result = TPL_ERROR_INVALID_OPERATION;
451                 goto fini;
452         }
453
454         if (wl_vk_display->explicit_sync) {
455                 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
456                                                    wl_vk_display->ev_queue);
457                 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
458                                   wl_vk_display->explicit_sync);
459         }
460
461         wl_vk_display->wl_initialized = TPL_TRUE;
462
463         TPL_INFO("[WAYLAND_INIT]",
464                          "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
465                          wl_vk_display, wl_vk_display->wl_display,
466                          wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
467         TPL_INFO("[WAYLAND_INIT]",
468                          "explicit_sync(%p)",
469                          wl_vk_display->explicit_sync);
470
471 fini:
472         if (display_wrapper)
473                 wl_proxy_wrapper_destroy(display_wrapper);
474         if (registry)
475                 wl_registry_destroy(registry);
476         if (queue)
477                 wl_event_queue_destroy(queue);
478
479         return result;
480 }
481
482 static void
483 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
484 {
485         /* If wl_vk_display is in prepared state, cancel it */
486         if (wl_vk_display->prepared) {
487                 wl_display_cancel_read(wl_vk_display->wl_display);
488                 wl_vk_display->prepared = TPL_FALSE;
489         }
490
491         if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
492                                                                                   wl_vk_display->ev_queue) == -1) {
493                 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
494         }
495
496         if (wl_vk_display->explicit_sync) {
497                 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
498                                  "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
499                                  wl_vk_display, wl_vk_display->explicit_sync);
500                 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
501                 wl_vk_display->explicit_sync = NULL;
502         }
503
504         if (wl_vk_display->wl_tbm_client) {
505                 struct wl_proxy *wl_tbm = NULL;
506
507                 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
508                                                                                 wl_vk_display->wl_tbm_client);
509                 if (wl_tbm) {
510                         wl_proxy_set_queue(wl_tbm, NULL);
511                 }
512
513                 TPL_INFO("[WL_TBM_DEINIT]",
514                                  "wl_vk_display(%p) wl_tbm_client(%p)",
515                                  wl_vk_display, wl_vk_display->wl_tbm_client);
516                 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
517                 wl_vk_display->wl_tbm_client = NULL;
518         }
519
520         wl_event_queue_destroy(wl_vk_display->ev_queue);
521
522         wl_vk_display->wl_initialized = TPL_FALSE;
523
524         TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
525                          wl_vk_display, wl_vk_display->wl_display);
526 }
527
528 static void*
529 _thread_init(void *data)
530 {
531         tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
532
533         if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
534                 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
535                                 wl_vk_display, wl_vk_display->wl_display);
536         }
537
538         if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
539                 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
540         }
541
542         return wl_vk_display;
543 }
544
545 static tpl_bool_t
546 __thread_func_disp_prepare(tpl_gsource *gsource)
547 {
548         tpl_wl_vk_display_t *wl_vk_display =
549                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
550
551         /* If this wl_vk_display is already prepared,
552          * do nothing in this function. */
553         if (wl_vk_display->prepared)
554                 return TPL_FALSE;
555
556         /* If there is a last_error, there is no need to poll,
557          * so skip directly to dispatch.
558          * prepare -> dispatch */
559         if (wl_vk_display->last_error)
560                 return TPL_TRUE;
561
562         while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
563                                                                                  wl_vk_display->ev_queue) != 0) {
564                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
565                                                                                           wl_vk_display->ev_queue) == -1) {
566                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
567                 }
568         }
569
570         wl_vk_display->prepared = TPL_TRUE;
571
572         wl_display_flush(wl_vk_display->wl_display);
573
574         return TPL_FALSE;
575 }
576
577 static tpl_bool_t
578 __thread_func_disp_check(tpl_gsource *gsource)
579 {
580         tpl_wl_vk_display_t *wl_vk_display =
581                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
582         tpl_bool_t ret = TPL_FALSE;
583
584         if (!wl_vk_display->prepared)
585                 return ret;
586
587         /* If prepared, but last_error is set,
588          * cancel_read is executed and FALSE is returned.
589          * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
590          * and skipping disp_check from prepare to disp_dispatch.
591          * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
592         if (wl_vk_display->prepared && wl_vk_display->last_error) {
593                 wl_display_cancel_read(wl_vk_display->wl_display);
594                 return ret;
595         }
596
597         if (tpl_gsource_check_io_condition(gsource)) {
598                 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
599                         _wl_display_print_err(wl_vk_display, "read_event");
600                 ret = TPL_TRUE;
601         } else {
602                 wl_display_cancel_read(wl_vk_display->wl_display);
603                 ret = TPL_FALSE;
604         }
605
606         wl_vk_display->prepared = TPL_FALSE;
607
608         return ret;
609 }
610
611 static tpl_bool_t
612 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
613 {
614         tpl_wl_vk_display_t *wl_vk_display =
615                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
616
617         TPL_IGNORE(message);
618
619         /* If there is last_error, SOURCE_REMOVE should be returned
620          * to remove the gsource from the main loop.
621          * This is because wl_vk_display is not valid since last_error was set.*/
622         if (wl_vk_display->last_error) {
623                 return TPL_FALSE;
624         }
625
626         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
627         if (tpl_gsource_check_io_condition(gsource)) {
628                 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
629                                                                                           wl_vk_display->ev_queue) == -1) {
630                         _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
631                 }
632         }
633
634         wl_display_flush(wl_vk_display->wl_display);
635         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
636
637         return TPL_TRUE;
638 }
639
640 static void
641 __thread_func_disp_finalize(tpl_gsource *gsource)
642 {
643         tpl_wl_vk_display_t *wl_vk_display =
644                 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
645
646         if (wl_vk_display->wl_initialized)
647                 _thread_wl_display_fini(wl_vk_display);
648
649         TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)",
650                           wl_vk_display, gsource);
651
652         return;
653 }
654
655
656 static tpl_gsource_functions disp_funcs = {
657         .prepare  = __thread_func_disp_prepare,
658         .check    = __thread_func_disp_check,
659         .dispatch = __thread_func_disp_dispatch,
660         .finalize = __thread_func_disp_finalize,
661 };
662
663 static tpl_result_t
664 __tpl_wl_vk_display_init(tpl_display_t *display)
665 {
666         TPL_ASSERT(display);
667
668         tpl_wl_vk_display_t *wl_vk_display = NULL;
669
670         /* Do not allow default display in wayland */
671         if (!display->native_handle) {
672                 TPL_ERR("Invalid native handle for display.");
673                 return TPL_ERROR_INVALID_PARAMETER;
674         }
675
676         if (!_check_native_handle_is_wl_display(display->native_handle)) {
677                 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
678                 return TPL_ERROR_INVALID_PARAMETER;
679         }
680
681         wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
682                                                         sizeof(tpl_wl_vk_display_t));
683         if (!wl_vk_display) {
684                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
685                 return TPL_ERROR_OUT_OF_MEMORY;
686         }
687
688         display->backend.data             = wl_vk_display;
689         display->bufmgr_fd                = -1;
690
691         wl_vk_display->tdm_initialized    = TPL_FALSE;
692         wl_vk_display->wl_initialized     = TPL_FALSE;
693
694         wl_vk_display->ev_queue           = NULL;
695         wl_vk_display->wl_display         = (struct wl_display *)display->native_handle;
696         wl_vk_display->last_error         = 0;
697         wl_vk_display->use_explicit_sync  = TPL_FALSE;   // default disabled
698         wl_vk_display->prepared           = TPL_FALSE;
699
700         /* Wayland Interfaces */
701         wl_vk_display->explicit_sync      = NULL;
702         wl_vk_display->wl_tbm_client      = NULL;
703
704         /* Vulkan specific surface capabilities */
705         wl_vk_display->min_buffer         = 2;
706         wl_vk_display->max_buffer         = VK_CLIENT_QUEUE_SIZE;
707         wl_vk_display->present_modes      = TPL_DISPLAY_PRESENT_MODE_FIFO;
708
709         wl_vk_display->use_wait_vblank    = TPL_TRUE;   // default enabled
710         {
711                 char *env = tpl_getenv("TPL_WAIT_VBLANK");
712                 if (env && !atoi(env)) {
713                         wl_vk_display->use_wait_vblank = TPL_FALSE;
714                 }
715         }
716
717         tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
718
719         /* Create gthread */
720         wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
721                                                                                            (tpl_gthread_func)_thread_init,
722                                                                                            (void *)wl_vk_display);
723         if (!wl_vk_display->thread) {
724                 TPL_ERR("Failed to create wl_vk_thread");
725                 goto free_display;
726         }
727
728         wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
729                                                                                                         (void *)wl_vk_display,
730                                                                                                         wl_display_get_fd(wl_vk_display->wl_display),
731                                                                                                         &disp_funcs, SOURCE_TYPE_NORMAL);
732         if (!wl_vk_display->disp_source) {
733                 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
734                                 display->native_handle,
735                                 wl_vk_display->thread);
736                 goto free_display;
737         }
738
739         wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread,
740                                                                                                    (void *)wl_vk_display,
741                                                                                                    wl_vk_display->tdm_display_fd,
742                                                                                                    &tdm_funcs, SOURCE_TYPE_NORMAL);
743         if (!wl_vk_display->tdm_source) {
744                 TPL_ERR("Failed to create tdm_gsource\n");
745                 goto free_display;
746         }
747
748         TPL_INFO("[DISPLAY_INIT]",
749                          "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
750                          wl_vk_display,
751                          wl_vk_display->thread,
752                          wl_vk_display->wl_display);
753
754         TPL_INFO("[DISPLAY_INIT]",
755                          "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
756                          wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
757                          wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
758
759         return TPL_ERROR_NONE;
760
761 free_display:
762         if (wl_vk_display->thread) {
763                 if (wl_vk_display->tdm_source)
764                         tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
765                 if (wl_vk_display->disp_source)
766                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
767
768                 tpl_gthread_destroy(wl_vk_display->thread);
769         }
770
771         wl_vk_display->thread = NULL;
772         free(wl_vk_display);
773
774         display->backend.data = NULL;
775         return TPL_ERROR_INVALID_OPERATION;
776 }
777
778 static void
779 __tpl_wl_vk_display_fini(tpl_display_t *display)
780 {
781         tpl_wl_vk_display_t *wl_vk_display;
782
783         TPL_ASSERT(display);
784
785         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
786         if (wl_vk_display) {
787                 TPL_INFO("[DISPLAY_FINI]",
788                                  "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
789                                  wl_vk_display,
790                                  wl_vk_display->thread,
791                                  wl_vk_display->wl_display);
792
793                 if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) {
794                         tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
795                         wl_vk_display->tdm_source = NULL;
796                 }
797
798                 if (wl_vk_display->disp_source) {
799                         tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
800                         wl_vk_display->disp_source = NULL;
801                 }
802
803                 if (wl_vk_display->thread) {
804                         tpl_gthread_destroy(wl_vk_display->thread);
805                         wl_vk_display->thread = NULL;
806                 }
807
808                 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
809
810                 free(wl_vk_display);
811         }
812
813         display->backend.data = NULL;
814 }
815
816 static tpl_result_t
817 __tpl_wl_vk_display_query_config(tpl_display_t *display,
818                 tpl_surface_type_t surface_type,
819                 int red_size, int green_size,
820                 int blue_size, int alpha_size,
821                 int color_depth, int *native_visual_id,
822                 tpl_bool_t *is_slow)
823 {
824         TPL_ASSERT(display);
825
826         if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
827                         green_size == 8 && blue_size == 8 &&
828                         (color_depth == 32 || color_depth == 24)) {
829
830                 if (alpha_size == 8) {
831                         if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
832                         if (is_slow) *is_slow = TPL_FALSE;
833                         return TPL_ERROR_NONE;
834                 }
835                 if (alpha_size == 0) {
836                         if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
837                         if (is_slow) *is_slow = TPL_FALSE;
838                         return TPL_ERROR_NONE;
839                 }
840         }
841
842         return TPL_ERROR_INVALID_PARAMETER;
843 }
844
845 static tpl_result_t
846 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
847                                                                           int *visual_id,
848                                                                           int alpha_size)
849 {
850         TPL_IGNORE(display);
851         TPL_IGNORE(visual_id);
852         TPL_IGNORE(alpha_size);
853         return TPL_ERROR_NONE;
854 }
855
856 static tpl_result_t
857 __tpl_wl_vk_display_query_window_supported_buffer_count(
858         tpl_display_t *display,
859         tpl_handle_t window, int *min, int *max)
860 {
861         tpl_wl_vk_display_t *wl_vk_display = NULL;
862
863         TPL_ASSERT(display);
864         TPL_ASSERT(window);
865
866         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
867         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
868
869         if (min) *min = wl_vk_display->min_buffer;
870         if (max) *max = wl_vk_display->max_buffer;
871
872         return TPL_ERROR_NONE;
873 }
874
875 static tpl_result_t
876 __tpl_wl_vk_display_query_window_supported_present_modes(
877         tpl_display_t *display,
878         tpl_handle_t window, int *present_modes)
879 {
880         tpl_wl_vk_display_t *wl_vk_display = NULL;
881
882         TPL_ASSERT(display);
883         TPL_ASSERT(window);
884
885         wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
886         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
887
888         if (present_modes) {
889                 *present_modes = wl_vk_display->present_modes;
890         }
891
892         return TPL_ERROR_NONE;
893 }
894
895 static void
896 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
897 {
898         tbm_surface_queue_error_e tsq_err       = TBM_SURFACE_QUEUE_ERROR_NONE;
899         tpl_wl_vk_display_t *wl_vk_display      = wl_vk_surface->wl_vk_display;
900         tpl_wl_vk_swapchain_t *swapchain        = wl_vk_surface->swapchain;
901         tpl_wl_vk_buffer_t *wl_vk_buffer        = NULL;
902         tpl_bool_t need_to_release              = TPL_FALSE;
903         tpl_bool_t need_to_cancel               = TPL_FALSE;
904         buffer_status_t status                  = RELEASED;
905         int idx                                 = 0;
906
907         while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
908                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
909                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
910                 wl_vk_buffer = wl_vk_surface->buffers[idx];
911
912                 if (wl_vk_buffer) {
913                         wl_vk_surface->buffers[idx] = NULL;
914                         wl_vk_surface->buffer_cnt--;
915                 } else {
916                         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
917                         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
918                         idx++;
919                         continue;
920                 }
921
922                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
923
924                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
925
926                 status = wl_vk_buffer->status;
927
928                 TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
929                                   idx, wl_vk_buffer,
930                                   wl_vk_buffer->tbm_surface,
931                                   status_to_string[status]);
932
933                 if (status >= ENQUEUED) {
934                         tpl_bool_t need_to_wait  = TPL_FALSE;
935                         tpl_result_t wait_result = TPL_ERROR_NONE;
936
937                         if (!wl_vk_display->use_explicit_sync &&
938                                 status < WAITING_VBLANK)
939                                 need_to_wait = TPL_TRUE;
940
941                         if (wl_vk_display->use_explicit_sync &&
942                                 status < COMMITTED)
943                                 need_to_wait = TPL_TRUE;
944
945                         if (need_to_wait) {
946                                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
947                                 wait_result = tpl_cond_timed_wait(&wl_vk_buffer->cond,
948                                                                                                   &wl_vk_buffer->mutex,
949                                                                                                   16); /* 16ms */
950                                 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
951
952                                 status = wl_vk_buffer->status;
953
954                                 if (wait_result == TPL_ERROR_TIME_OUT)
955                                         TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
956                                                          wl_vk_buffer);
957                         }
958                 }
959
960                 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
961                 /* It has been acquired but has not yet been released, so this
962                  * buffer must be released. */
963                 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
964
965                 /* After dequeue, it has not been enqueued yet
966                  * so cancel_dequeue must be performed. */
967                 need_to_cancel = (status == DEQUEUED);
968
969                 if (swapchain && swapchain->tbm_queue) {
970                         if (need_to_release) {
971                                 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
972                                                                                                         wl_vk_buffer->tbm_surface);
973                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
974                                         TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
975                                                         wl_vk_buffer->tbm_surface, tsq_err);
976                         }
977
978                         if (need_to_cancel) {
979                                 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
980                                                                                                                    wl_vk_buffer->tbm_surface);
981                                 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
982                                         TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
983                                                         wl_vk_buffer->tbm_surface, tsq_err);
984                         }
985                 }
986
987                 wl_vk_buffer->status = RELEASED;
988
989                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
990
991                 if (need_to_release || need_to_cancel)
992                         tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
993
994                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
995
996                 idx++;
997         }
998 }
999
1000 static tdm_client_vblank*
1001 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1002 {
1003         tdm_client_vblank *vblank = NULL;
1004         tdm_client_output *tdm_output = NULL;
1005         tdm_error tdm_err = TDM_ERROR_NONE;
1006
1007         if (!tdm_client) {
1008                 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1009                 return NULL;
1010         }
1011
1012         tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1013         if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1014                 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1015                 return NULL;
1016         }
1017
1018         vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1019         if (!vblank || tdm_err != TDM_ERROR_NONE) {
1020                 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1021                 return NULL;
1022         }
1023
1024         tdm_client_vblank_set_enable_fake(vblank, 1);
1025         tdm_client_vblank_set_sync(vblank, 0);
1026
1027         return vblank;
1028 }
1029
1030 static void
1031 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1032 {
1033         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1034
1035         /* tbm_surface_queue will be created at swapchain_create */
1036
1037         wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1038                                                                 wl_vk_display->tdm_client);
1039         if (wl_vk_surface->vblank) {
1040                 TPL_INFO("[VBLANK_INIT]",
1041                                  "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1042                                  wl_vk_surface, wl_vk_display->tdm_client,
1043                                  wl_vk_surface->vblank);
1044         }
1045
1046         if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1047                 wl_vk_surface->surface_sync =
1048                         zwp_linux_explicit_synchronization_v1_get_synchronization(
1049                                         wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1050                 if (wl_vk_surface->surface_sync) {
1051                         TPL_INFO("[EXPLICIT_SYNC_INIT]",
1052                                          "wl_vk_surface(%p) surface_sync(%p)",
1053                                          wl_vk_surface, wl_vk_surface->surface_sync);
1054                 } else {
1055                         TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1056                                          wl_vk_surface);
1057                         wl_vk_display->use_explicit_sync = TPL_FALSE;
1058                 }
1059         }
1060
1061         wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1062 }
1063
1064 static void
1065 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1066 {
1067         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1068
1069         TPL_INFO("[SURFACE_FINI]",
1070                          "wl_vk_surface(%p) wl_surface(%p)",
1071                          wl_vk_surface, wl_vk_surface->wl_surface);
1072
1073         if (wl_vk_surface->vblank_waiting_buffers) {
1074                 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1075                 wl_vk_surface->vblank_waiting_buffers = NULL;
1076         }
1077
1078         if (wl_vk_surface->surface_sync) {
1079                 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1080                                  "wl_vk_surface(%p) surface_sync(%p)",
1081                                   wl_vk_surface, wl_vk_surface->surface_sync);
1082                 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1083                 wl_vk_surface->surface_sync = NULL;
1084         }
1085
1086         if (wl_vk_surface->vblank) {
1087                 TPL_INFO("[VBLANK_DESTROY]",
1088                                  "wl_vk_surface(%p) vblank(%p)",
1089                                  wl_vk_surface, wl_vk_surface->vblank);
1090                 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1091                 wl_vk_surface->vblank = NULL;
1092         }
1093
1094         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1095 }
1096
1097 static tpl_bool_t
1098 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1099 {
1100         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1101
1102         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1103
1104         if (message == 1) { /* Initialize surface */
1105                 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1106                 TPL_DEBUG("wl_vk_surface(%p) initialize message received!",
1107                                   wl_vk_surface);
1108                 _thread_wl_vk_surface_init(wl_vk_surface);
1109                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1110                 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1111         } else if (message == 2) { /* Create tbm_surface_queue */
1112                 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1113                 TPL_DEBUG("wl_vk_surface(%p) queue creation message received!",
1114                                   wl_vk_surface);
1115                 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1116                         != TPL_ERROR_NONE) {
1117                         TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1118                                         wl_vk_surface);
1119                 }
1120                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1121                 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1122         } else if (message == 3) { /* Acquirable message */
1123                 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1124                 TPL_DEBUG("wl_vk_surface(%p) acquirable message received!",
1125                                   wl_vk_surface);
1126                 if (_thread_surface_queue_acquire(wl_vk_surface)
1127                         != TPL_ERROR_NONE) {
1128                         TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1129                                         wl_vk_surface);
1130                 }
1131                 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1132         } else if (message == 4) { /* swapchain destroy */
1133                 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1134                 TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!",
1135                                   wl_vk_surface);
1136                 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1137                 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1138                 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1139         }
1140
1141         return TPL_TRUE;
1142 }
1143
1144 static void
1145 __thread_func_surf_finalize(tpl_gsource *gsource)
1146 {
1147         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1148
1149         wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1150         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1151
1152         _thread_wl_vk_surface_fini(wl_vk_surface);
1153
1154         TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)",
1155                           wl_vk_surface, gsource);
1156 }
1157
1158 static tpl_gsource_functions surf_funcs = {
1159         .prepare = NULL,
1160         .check = NULL,
1161         .dispatch = __thread_func_surf_dispatch,
1162         .finalize = __thread_func_surf_finalize,
1163 };
1164
1165
1166 static tpl_result_t
1167 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1168 {
1169         tpl_wl_vk_surface_t *wl_vk_surface      = NULL;
1170         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1171         tpl_gsource *surf_source                = NULL;
1172
1173         TPL_ASSERT(surface);
1174         TPL_ASSERT(surface->display);
1175         TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1176         TPL_ASSERT(surface->native_handle);
1177
1178         wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1179         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1180
1181         wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1182                                                          sizeof(tpl_wl_vk_surface_t));
1183         if (!wl_vk_surface) {
1184                 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1185                 return TPL_ERROR_OUT_OF_MEMORY;
1186         }
1187
1188         surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1189                                                                          -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1190         if (!surf_source) {
1191                 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1192                                 wl_vk_surface);
1193                 free(wl_vk_surface);
1194                 surface->backend.data = NULL;
1195                 return TPL_ERROR_INVALID_OPERATION;
1196         }
1197
1198         surface->backend.data                  = (void *)wl_vk_surface;
1199         surface->width                                 = -1;
1200         surface->height                        = -1;
1201
1202         wl_vk_surface->surf_source             = surf_source;
1203         wl_vk_surface->swapchain               = NULL;
1204
1205         wl_vk_surface->wl_vk_display           = wl_vk_display;
1206         wl_vk_surface->wl_surface              = (struct wl_surface *)surface->native_handle;
1207
1208         wl_vk_surface->reset                   = TPL_FALSE;
1209         wl_vk_surface->is_activated            = TPL_FALSE;
1210         wl_vk_surface->vblank_done             = TPL_TRUE;
1211
1212         wl_vk_surface->render_done_cnt         = 0;
1213
1214         wl_vk_surface->vblank                  = NULL;
1215         wl_vk_surface->surface_sync            = NULL;
1216
1217         wl_vk_surface->post_interval           = surface->post_interval;
1218
1219         {
1220                 int i = 0;
1221                 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1222                         wl_vk_surface->buffers[i]     = NULL;
1223                 wl_vk_surface->buffer_cnt         = 0;
1224         }
1225
1226         tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1227         tpl_gcond_init(&wl_vk_surface->surf_cond);
1228
1229         tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1230
1231         /* Initialize in thread */
1232         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1233         tpl_gsource_send_message(wl_vk_surface->surf_source, 1);
1234         tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1235         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1236
1237         TPL_INFO("[SURFACE_INIT]",
1238                           "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1239                           surface, wl_vk_surface, wl_vk_surface->surf_source);
1240
1241         return TPL_ERROR_NONE;
1242 }
1243
1244 static void
1245 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1246 {
1247         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1248         tpl_wl_vk_display_t *wl_vk_display = NULL;
1249
1250         TPL_ASSERT(surface);
1251         TPL_ASSERT(surface->display);
1252
1253         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1254         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1255
1256         wl_vk_display = (tpl_wl_vk_display_t *)
1257                                                          surface->display->backend.data;
1258         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1259
1260         TPL_INFO("[SURFACE_FINI][BEGIN]",
1261                          "wl_vk_surface(%p) wl_surface(%p)",
1262                          wl_vk_surface, wl_vk_surface->wl_surface);
1263
1264         if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1265                 /* finalize swapchain */
1266
1267         }
1268
1269         wl_vk_surface->swapchain        = NULL;
1270
1271         if (wl_vk_surface->surf_source)
1272                 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1273         wl_vk_surface->surf_source      = NULL;
1274
1275         _print_buffer_lists(wl_vk_surface);
1276
1277         wl_vk_surface->wl_surface       = NULL;
1278         wl_vk_surface->wl_vk_display    = NULL;
1279         wl_vk_surface->tpl_surface      = NULL;
1280
1281         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1282         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1283         tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1284         tpl_gcond_clear(&wl_vk_surface->surf_cond);
1285
1286         TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1287
1288         free(wl_vk_surface);
1289         surface->backend.data = NULL;
1290 }
1291
1292 static tpl_result_t
1293 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1294                                                                                   int post_interval)
1295 {
1296         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1297
1298         TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1299
1300         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1301
1302         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1303
1304         TPL_INFO("[SET_POST_INTERVAL]",
1305                          "wl_vk_surface(%p) post_interval(%d -> %d)",
1306                          wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1307
1308         wl_vk_surface->post_interval = post_interval;
1309
1310         return TPL_ERROR_NONE;
1311 }
1312
1313 static tpl_bool_t
1314 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1315 {
1316         TPL_ASSERT(surface);
1317         TPL_ASSERT(surface->backend.data);
1318
1319         tpl_wl_vk_surface_t *wl_vk_surface =
1320                 (tpl_wl_vk_surface_t *)surface->backend.data;
1321
1322         return !(wl_vk_surface->reset);
1323 }
1324
1325 static void
1326 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1327                                                           void *data)
1328 {
1329         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1330         tpl_wl_vk_display_t *wl_vk_display = NULL;
1331         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1332         tpl_surface_t *surface             = NULL;
1333         tpl_bool_t is_activated            = TPL_FALSE;
1334         int width, height;
1335
1336         wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1337         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1338
1339         wl_vk_display = wl_vk_surface->wl_vk_display;
1340         TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1341
1342         surface = wl_vk_surface->tpl_surface;
1343         TPL_CHECK_ON_NULL_RETURN(surface);
1344
1345         swapchain = wl_vk_surface->swapchain;
1346         TPL_CHECK_ON_NULL_RETURN(swapchain);
1347
1348         /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1349          * the changed window size at the next frame. */
1350         width = tbm_surface_queue_get_width(tbm_queue);
1351         height = tbm_surface_queue_get_height(tbm_queue);
1352         if (surface->width != width || surface->height != height) {
1353                 TPL_INFO("[QUEUE_RESIZE]",
1354                                  "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1355                                  wl_vk_surface, tbm_queue,
1356                                  surface->width, surface->height, width, height);
1357         }
1358
1359         /* When queue_reset_callback is called, if is_activated is different from
1360          * its previous state change the reset flag to TPL_TRUE to get a new buffer
1361          * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1362         is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1363                                                                                                                    swapchain->tbm_queue);
1364         if (wl_vk_surface->is_activated != is_activated) {
1365                 if (is_activated) {
1366                         TPL_INFO("[ACTIVATED]",
1367                                           "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1368                                           wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1369                 } else {
1370                         TPL_LOG_T("[DEACTIVATED]",
1371                                           " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1372                                           wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1373                 }
1374         }
1375
1376         wl_vk_surface->reset = TPL_TRUE;
1377
1378         if (surface->reset_cb)
1379                 surface->reset_cb(surface->reset_data);
1380 }
1381
1382 static void
1383 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1384                                                                    void *data)
1385 {
1386         TPL_IGNORE(tbm_queue);
1387
1388         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1389         TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1390
1391         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1392
1393         tpl_gsource_send_message(wl_vk_surface->surf_source, 3);
1394
1395         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1396 }
1397
1398 static tpl_result_t
1399 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1400 {
1401         TPL_ASSERT (wl_vk_surface);
1402
1403         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1404         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1405         tbm_surface_queue_h tbm_queue      = NULL;
1406         tbm_bufmgr bufmgr = NULL;
1407         unsigned int capability;
1408
1409         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1410         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1411
1412         if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1413                 TPL_ERR("buffer count(%d) must be higher than (%d)",
1414                                 swapchain->properties.buffer_count,
1415                                 wl_vk_display->min_buffer);
1416                 return TPL_ERROR_INVALID_PARAMETER;
1417         }
1418
1419         if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1420                 TPL_ERR("buffer count(%d) must be lower than (%d)",
1421                                 swapchain->properties.buffer_count,
1422                                 wl_vk_display->max_buffer);
1423                 return TPL_ERROR_INVALID_PARAMETER;
1424         }
1425
1426         if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1427                 TPL_ERR("Unsupported present_mode(%d)",
1428                                 swapchain->properties.present_mode);
1429                 return TPL_ERROR_INVALID_PARAMETER;
1430         }
1431
1432         if (swapchain->tbm_queue) {
1433                 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1434                 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1435
1436                 if (swapchain->swapchain_buffers) {
1437                         int i;
1438                         for (i = 0; i < swapchain->properties.buffer_count; i++) {
1439                                 if (swapchain->swapchain_buffers[i]) {
1440                                         TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]);
1441                                         tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1442                                         swapchain->swapchain_buffers[i] = NULL;
1443                                 }
1444                         }
1445
1446                         free(swapchain->swapchain_buffers);
1447                         swapchain->swapchain_buffers = NULL;
1448                 }
1449
1450                 if (old_width != swapchain->properties.width ||
1451                         old_height != swapchain->properties.height) {
1452                         tbm_surface_queue_reset(swapchain->tbm_queue,
1453                                                                         swapchain->properties.width,
1454                                                                         swapchain->properties.height,
1455                                                                         swapchain->properties.format);
1456                         TPL_INFO("[RESIZE]",
1457                                          "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1458                                          wl_vk_surface, swapchain, swapchain->tbm_queue,
1459                                          old_width, old_height,
1460                                          swapchain->properties.width,
1461                                          swapchain->properties.height);
1462                 }
1463
1464                 swapchain->properties.buffer_count =
1465                         tbm_surface_queue_get_size(swapchain->tbm_queue);
1466
1467                 wl_vk_surface->reset = TPL_FALSE;
1468
1469                 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1470
1471                 TPL_INFO("[SWAPCHAIN_REUSE]",
1472                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1473                                  wl_vk_surface, swapchain, swapchain->tbm_queue,
1474                                  swapchain->properties.buffer_count);
1475
1476                 return TPL_ERROR_NONE;
1477         }
1478
1479         bufmgr = tbm_bufmgr_init(-1);
1480         capability = tbm_bufmgr_get_capability(bufmgr);
1481         tbm_bufmgr_deinit(bufmgr);
1482
1483         if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1484                 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1485                                                                         wl_vk_display->wl_tbm_client,
1486                                                                         wl_vk_surface->wl_surface,
1487                                                                         swapchain->properties.buffer_count,
1488                                                                         swapchain->properties.width,
1489                                                                         swapchain->properties.height,
1490                                                                         TBM_FORMAT_ARGB8888);
1491         } else {
1492                 tbm_queue = wayland_tbm_client_create_surface_queue(
1493                                                                         wl_vk_display->wl_tbm_client,
1494                                                                         wl_vk_surface->wl_surface,
1495                                                                         swapchain->properties.buffer_count,
1496                                                                         swapchain->properties.width,
1497                                                                         swapchain->properties.height,
1498                                                                         TBM_FORMAT_ARGB8888);
1499         }
1500
1501         if (!tbm_queue) {
1502                 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1503                                 wl_vk_surface);
1504                 return TPL_ERROR_OUT_OF_MEMORY;
1505         }
1506
1507         if (tbm_surface_queue_set_modes(
1508                         tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1509                         TBM_SURFACE_QUEUE_ERROR_NONE) {
1510                 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1511                                 tbm_queue);
1512                 tbm_surface_queue_destroy(tbm_queue);
1513                 return TPL_ERROR_INVALID_OPERATION;
1514         }
1515
1516         if (tbm_surface_queue_add_reset_cb(
1517                         tbm_queue,
1518                         __cb_tbm_queue_reset_callback,
1519                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1520                 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1521                                 tbm_queue);
1522                 tbm_surface_queue_destroy(tbm_queue);
1523                 return TPL_ERROR_INVALID_OPERATION;
1524         }
1525
1526         if (tbm_surface_queue_add_acquirable_cb(
1527                         tbm_queue,
1528                         __cb_tbm_queue_acquirable_callback,
1529                         (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1530                 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1531                                 tbm_queue);
1532                 tbm_surface_queue_destroy(tbm_queue);
1533                 return TPL_ERROR_INVALID_OPERATION;
1534         }
1535
1536         swapchain->tbm_queue = tbm_queue;
1537
1538         TPL_INFO("[TBM_QUEUE_CREATED]",
1539                          "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)",
1540                          wl_vk_surface, swapchain, tbm_queue);
1541
1542         return TPL_ERROR_NONE;
1543 }
1544
1545 static tpl_result_t
1546 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1547                 tbm_format format, int width,
1548                 int height, int buffer_count, int present_mode)
1549 {
1550         tpl_wl_vk_surface_t *wl_vk_surface              = NULL;
1551         tpl_wl_vk_display_t *wl_vk_display      = NULL;
1552         tpl_wl_vk_swapchain_t *swapchain  = NULL;
1553
1554         TPL_ASSERT(surface);
1555         TPL_ASSERT(surface->display);
1556
1557         wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1558         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1559
1560         wl_vk_display = (tpl_wl_vk_display_t *)
1561                                                          surface->display->backend.data;
1562         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1563
1564         swapchain = wl_vk_surface->swapchain;
1565
1566         if (swapchain == NULL) {
1567                 swapchain =
1568                         (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1569                         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1570                 swapchain->tbm_queue           = NULL;
1571         }
1572
1573         swapchain->properties.buffer_count = buffer_count;
1574         swapchain->properties.width        = width;
1575         swapchain->properties.height       = height;
1576         swapchain->properties.present_mode = present_mode;
1577         swapchain->wl_vk_surface           = wl_vk_surface;
1578
1579         wl_vk_surface->swapchain           = swapchain;
1580
1581         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1582         /* send swapchain create tbm_queue message */
1583         tpl_gsource_send_message(wl_vk_surface->surf_source, 2);
1584         tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1585         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1586
1587         TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1588                 swapchain->tbm_queue != NULL,
1589                 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1590
1591         wl_vk_surface->reset = TPL_FALSE;
1592
1593         __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1594
1595         return TPL_ERROR_NONE;
1596 }
1597
1598 static void
1599 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1600 {
1601         TPL_ASSERT(wl_vk_surface);
1602
1603         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1604
1605         TPL_CHECK_ON_NULL_RETURN(swapchain);
1606
1607         if (swapchain->tbm_queue) {
1608                 TPL_INFO("[TBM_QUEUE_DESTROY]",
1609                                  "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1610                                  wl_vk_surface, swapchain, swapchain->tbm_queue);
1611                 tbm_surface_queue_destroy(swapchain->tbm_queue);
1612                 swapchain->tbm_queue = NULL;
1613         }
1614 }
1615
1616 static tpl_result_t
1617 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1618 {
1619         tpl_wl_vk_swapchain_t *swapchain   = NULL;
1620         tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1621         tpl_wl_vk_display_t *wl_vk_display = NULL;
1622
1623         TPL_ASSERT(surface);
1624         TPL_ASSERT(surface->display);
1625
1626         wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1627         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1628
1629         wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1630         TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1631
1632         swapchain = wl_vk_surface->swapchain;
1633         if (!swapchain) {
1634                 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1635                                 wl_vk_surface);
1636                 return TPL_ERROR_INVALID_OPERATION;
1637         }
1638
1639         if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1640                 TPL_INFO("[DESTROY_SWAPCHAIN]",
1641                                  "wl_vk_surface(%p) swapchain(%p) still valid.",
1642                                  wl_vk_surface, swapchain);
1643                 return TPL_ERROR_NONE;
1644         }
1645
1646         TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1647                          "wl_vk_surface(%p) swapchain(%p)",
1648                          wl_vk_surface, wl_vk_surface->swapchain);
1649
1650         if (swapchain->swapchain_buffers) {
1651                 for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1652                         if (swapchain->swapchain_buffers[i]) {
1653                                 TPL_DEBUG("Stop tracking tbm_surface(%p)",
1654                                                   swapchain->swapchain_buffers[i]);
1655                                 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1656                                 swapchain->swapchain_buffers[i] = NULL;
1657                         }
1658                 }
1659
1660                 free(swapchain->swapchain_buffers);
1661                 swapchain->swapchain_buffers = NULL;
1662         }
1663
1664         _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1665
1666         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1667         tpl_gsource_send_message(wl_vk_surface->surf_source, 4);
1668         tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1669         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1670
1671         _print_buffer_lists(wl_vk_surface);
1672
1673         free(swapchain);
1674         wl_vk_surface->swapchain = NULL;
1675
1676         return TPL_ERROR_NONE;
1677 }
1678
1679 static tpl_result_t
1680 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1681                                                                                           tbm_surface_h **buffers,
1682                                                                                           int *buffer_count)
1683 {
1684         TPL_ASSERT(surface);
1685         TPL_ASSERT(surface->backend.data);
1686         TPL_ASSERT(surface->display);
1687         TPL_ASSERT(surface->display->backend.data);
1688
1689         tpl_wl_vk_surface_t *wl_vk_surface =
1690                 (tpl_wl_vk_surface_t *)surface->backend.data;
1691         tpl_wl_vk_display_t *wl_vk_display =
1692                 (tpl_wl_vk_display_t *)surface->display->backend.data;
1693         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1694         tpl_result_t ret                   = TPL_ERROR_NONE;
1695         int i;
1696
1697         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1698         TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1699
1700         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1701
1702         if (!buffers) {
1703                 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1704                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1705                 return TPL_ERROR_NONE;
1706         }
1707
1708         swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1709                                                                                 *buffer_count,
1710                                                                                 sizeof(tbm_surface_h));
1711         if (!swapchain->swapchain_buffers) {
1712                 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1713                                 *buffer_count);
1714                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1715                 return TPL_ERROR_OUT_OF_MEMORY;
1716         }
1717
1718         ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1719                                                                                                 swapchain->tbm_queue,
1720                                                                                                 swapchain->swapchain_buffers,
1721                                                                                                 buffer_count);
1722         if (!ret) {
1723                 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1724                                 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1725                 free(swapchain->swapchain_buffers);
1726                 swapchain->swapchain_buffers = NULL;
1727                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1728                 return TPL_ERROR_INVALID_OPERATION;
1729         }
1730
1731         for (i = 0; i < *buffer_count; i++) {
1732                 if (swapchain->swapchain_buffers[i]) {
1733                         TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)",
1734                                           i, swapchain->swapchain_buffers[i],
1735                                           _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1736                         tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1737                 }
1738         }
1739
1740         *buffers = swapchain->swapchain_buffers;
1741
1742         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1743
1744         return TPL_ERROR_NONE;
1745 }
1746
1747 static void
1748 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1749 {
1750         tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1751         tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1752
1753         TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1754                          wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1755
1756         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1757         if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1758                 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1759                 wl_vk_surface->buffer_cnt--;
1760
1761                 wl_vk_buffer->idx = -1;
1762         }
1763         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1764
1765         wl_display_flush(wl_vk_display->wl_display);
1766
1767         if (wl_vk_buffer->wl_buffer) {
1768                 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1769                                                                                   wl_vk_buffer->wl_buffer);
1770                 wl_vk_buffer->wl_buffer = NULL;
1771         }
1772
1773         if (wl_vk_buffer->buffer_release) {
1774                 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1775                 wl_vk_buffer->buffer_release = NULL;
1776         }
1777
1778         if (wl_vk_buffer->release_fence_fd != -1) {
1779                 close(wl_vk_buffer->release_fence_fd);
1780                 wl_vk_buffer->release_fence_fd = -1;
1781         }
1782
1783         if (wl_vk_buffer->rects) {
1784                 free(wl_vk_buffer->rects);
1785                 wl_vk_buffer->rects = NULL;
1786                 wl_vk_buffer->num_rects = 0;
1787         }
1788
1789         wl_vk_buffer->tbm_surface = NULL;
1790         wl_vk_buffer->bo_name = -1;
1791
1792         free(wl_vk_buffer);
1793 }
1794
1795 static tpl_wl_vk_buffer_t *
1796 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1797 {
1798         tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1799         tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1800                                                                            (void **)&wl_vk_buffer);
1801         return wl_vk_buffer;
1802 }
1803
1804 static tpl_wl_vk_buffer_t *
1805 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
1806                                           tbm_surface_h tbm_surface)
1807 {
1808         tpl_wl_vk_buffer_t  *wl_vk_buffer  = NULL;
1809
1810         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
1811
1812         if (!wl_vk_buffer) {
1813                 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
1814                 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
1815
1816                 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1817                                                                                    (tbm_data_free)__cb_wl_vk_buffer_free);
1818                 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1819                                                                                    wl_vk_buffer);
1820
1821                 wl_vk_buffer->wl_buffer                = NULL;
1822                 wl_vk_buffer->tbm_surface              = tbm_surface;
1823                 wl_vk_buffer->bo_name                  = _get_tbm_surface_bo_name(tbm_surface);
1824                 wl_vk_buffer->wl_vk_surface            = wl_vk_surface;
1825
1826                 wl_vk_buffer->status                   = RELEASED;
1827
1828                 wl_vk_buffer->acquire_fence_fd         = -1;
1829                 wl_vk_buffer->release_fence_fd         = -1;
1830
1831                 wl_vk_buffer->dx                       = 0;
1832                 wl_vk_buffer->dy                       = 0;
1833                 wl_vk_buffer->width                    = tbm_surface_get_width(tbm_surface);
1834                 wl_vk_buffer->height                   = tbm_surface_get_height(tbm_surface);
1835
1836                 wl_vk_buffer->rects                    = NULL;
1837                 wl_vk_buffer->num_rects                = 0;
1838
1839                 tpl_gmutex_init(&wl_vk_buffer->mutex);
1840                 tpl_gcond_init(&wl_vk_buffer->cond);
1841
1842                 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1843                 {
1844                         int i;
1845                         for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1846                                 if (wl_vk_surface->buffers[i] == NULL) break;
1847
1848                         /* If this exception is reached,
1849                          * it may be a critical memory leak problem. */
1850                         if (i == BUFFER_ARRAY_SIZE) {
1851                                 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
1852                                 int evicted_idx = 0; /* evict the frontmost buffer */
1853
1854                                 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
1855
1856                                 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
1857                                                  wl_vk_surface);
1858                                 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
1859                                                  evicted_buffer, evicted_buffer->tbm_surface,
1860                                                  status_to_string[evicted_buffer->status]);
1861
1862                                 /* [TODO] need to think about whether there will be
1863                                  * better modifications */
1864                                 wl_vk_surface->buffer_cnt--;
1865                                 wl_vk_surface->buffers[evicted_idx]      = NULL;
1866
1867                                 i = evicted_idx;
1868                         }
1869
1870                         wl_vk_surface->buffer_cnt++;
1871                         wl_vk_surface->buffers[i]          = wl_vk_buffer;
1872                         wl_vk_buffer->idx                  = i;
1873                 }
1874                 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1875
1876                 TPL_INFO("[WL_VK_BUFFER_CREATE]",
1877                                  "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
1878                                  wl_vk_surface, wl_vk_buffer, tbm_surface,
1879                                  wl_vk_buffer->bo_name);
1880         }
1881
1882         wl_vk_buffer->need_to_commit = TPL_FALSE;
1883         wl_vk_buffer->buffer_release = NULL;
1884
1885         return wl_vk_buffer;
1886 }
1887
1888 static tbm_surface_h
1889 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
1890                                                                    uint64_t timeout_ns,
1891                                                                    int32_t *release_fence)
1892 {
1893         TPL_ASSERT(surface);
1894         TPL_ASSERT(surface->backend.data);
1895         TPL_ASSERT(surface->display);
1896         TPL_ASSERT(surface->display->backend.data);
1897         TPL_OBJECT_CHECK_RETURN(surface, NULL);
1898
1899         tpl_wl_vk_surface_t *wl_vk_surface =
1900                 (tpl_wl_vk_surface_t *)surface->backend.data;
1901         tpl_wl_vk_display_t *wl_vk_display =
1902                 (tpl_wl_vk_display_t *)surface->display->backend.data;
1903         tpl_wl_vk_swapchain_t *swapchain   = wl_vk_surface->swapchain;
1904         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
1905
1906         tbm_surface_h tbm_surface          = NULL;
1907         tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_NONE;
1908
1909         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
1910         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
1911
1912         TPL_OBJECT_UNLOCK(surface);
1913         TRACE_BEGIN("WAIT_DEQUEUEABLE");
1914         if (timeout_ns != UINT64_MAX) {
1915                 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
1916                                                 swapchain->tbm_queue, timeout_ns/1000);
1917         } else {
1918                 tsq_err = tbm_surface_queue_can_dequeue(
1919                                                 swapchain->tbm_queue, 1);
1920         }
1921         TRACE_END();
1922         TPL_OBJECT_LOCK(surface);
1923
1924         if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
1925                 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
1926                                 timeout_ns);
1927                 return NULL;
1928         } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1929                 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
1930                                 wl_vk_surface, swapchain->tbm_queue, tsq_err);
1931                 return NULL;
1932         }
1933
1934         tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1935
1936         if (wl_vk_surface->reset) {
1937                 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
1938                                   swapchain, swapchain->tbm_queue);
1939                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1940                 return NULL;
1941         }
1942
1943         tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
1944                                                                                 &tbm_surface);
1945         if (!tbm_surface) {
1946                 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
1947                                 swapchain->tbm_queue, wl_vk_surface, tsq_err);
1948                 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1949                 return NULL;
1950         }
1951
1952         tbm_surface_internal_ref(tbm_surface);
1953
1954         wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
1955         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
1956
1957         tpl_gmutex_lock(&wl_vk_buffer->mutex);
1958         wl_vk_buffer->status = DEQUEUED;
1959
1960         if (release_fence) {
1961                 if (wl_vk_surface->surface_sync) {
1962                         *release_fence = wl_vk_buffer->release_fence_fd;
1963                         TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
1964                                           wl_vk_surface, wl_vk_buffer, *release_fence);
1965                         wl_vk_buffer->release_fence_fd = -1;
1966                 } else {
1967                         *release_fence = -1;
1968                 }
1969         }
1970
1971         wl_vk_surface->reset = TPL_FALSE;
1972
1973         TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
1974                           wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
1975                           release_fence ? *release_fence : -1);
1976
1977         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1978         tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1979
1980         return tbm_surface;
1981 }
1982
1983 static tpl_result_t
1984 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
1985                                                                           tbm_surface_h tbm_surface)
1986 {
1987         TPL_ASSERT(surface);
1988         TPL_ASSERT(surface->backend.data);
1989
1990         tpl_wl_vk_surface_t *wl_vk_surface  =
1991                 (tpl_wl_vk_surface_t *)surface->backend.data;
1992         tpl_wl_vk_swapchain_t *swapchain    = NULL;
1993         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
1994         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
1995
1996         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
1997                                                                   TPL_ERROR_INVALID_PARAMETER);
1998
1999         swapchain = wl_vk_surface->swapchain;
2000         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2001         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2002                                                                  TPL_ERROR_INVALID_PARAMETER);
2003
2004         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2005         if (wl_vk_buffer) {
2006                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2007                 wl_vk_buffer->status = RELEASED;
2008                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2009         }
2010
2011         tbm_surface_internal_unref(tbm_surface);
2012
2013         TPL_INFO("[CANCEL BUFFER]",
2014                          "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2015                           wl_vk_surface, swapchain, tbm_surface,
2016                           _get_tbm_surface_bo_name(tbm_surface));
2017
2018         tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2019                                                                                            tbm_surface);
2020         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2021                 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2022                 return TPL_ERROR_INVALID_OPERATION;
2023         }
2024
2025         return TPL_ERROR_NONE;
2026 }
2027
2028 static tpl_result_t
2029 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2030                                                                            tbm_surface_h tbm_surface,
2031                                                                            int num_rects, const int *rects,
2032                                                                            int32_t acquire_fence)
2033 {
2034         TPL_ASSERT(surface);
2035         TPL_ASSERT(surface->display);
2036         TPL_ASSERT(surface->backend.data);
2037         TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2038
2039         tpl_wl_vk_surface_t *wl_vk_surface  =
2040                 (tpl_wl_vk_surface_t *) surface->backend.data;
2041         tpl_wl_vk_swapchain_t *swapchain    = wl_vk_surface->swapchain;
2042         tpl_wl_vk_buffer_t *wl_vk_buffer    = NULL;
2043         tbm_surface_queue_error_e tsq_err   = TBM_SURFACE_QUEUE_ERROR_NONE;
2044         int bo_name                         = -1;
2045
2046         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2047         TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2048         TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2049                                                                   TPL_ERROR_INVALID_PARAMETER);
2050
2051         wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2052         bo_name = wl_vk_buffer->bo_name;
2053
2054         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2055
2056         /* If there are received region information, save it to wl_vk_buffer */
2057         if (num_rects && rects) {
2058                 if (wl_vk_buffer->rects != NULL) {
2059                         free(wl_vk_buffer->rects);
2060                         wl_vk_buffer->rects = NULL;
2061                         wl_vk_buffer->num_rects = 0;
2062                 }
2063
2064                 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2065                 wl_vk_buffer->num_rects = num_rects;
2066
2067                 if (wl_vk_buffer->rects) {
2068                         memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2069                                    sizeof(int) * 4 * num_rects);
2070                 } else {
2071                         TPL_ERR("Failed to allocate memory for rects info.");
2072                 }
2073         }
2074
2075         if (wl_vk_buffer->acquire_fence_fd != -1)
2076                 close(wl_vk_buffer->acquire_fence_fd);
2077
2078         wl_vk_buffer->acquire_fence_fd = acquire_fence;
2079
2080         wl_vk_buffer->status = ENQUEUED;
2081         TPL_LOG_T("WL_VK",
2082                           "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2083                           wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2084
2085         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2086
2087         tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2088                                                                                 tbm_surface);
2089         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2090                 tbm_surface_internal_unref(tbm_surface);
2091                 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2092                                 tbm_surface, wl_vk_surface, tsq_err);
2093                 return TPL_ERROR_INVALID_OPERATION;
2094         }
2095
2096         tbm_surface_internal_unref(tbm_surface);
2097
2098         return TPL_ERROR_NONE;
2099 }
2100
2101 static const struct wl_buffer_listener wl_buffer_release_listener = {
2102         (void *)__cb_wl_buffer_release,
2103 };
2104
2105 static tpl_result_t
2106 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2107 {
2108         tbm_surface_h tbm_surface            = NULL;
2109         tbm_surface_queue_error_e tsq_err    = TBM_SURFACE_QUEUE_ERROR_NONE;
2110         tpl_wl_vk_display_t *wl_vk_display   = wl_vk_surface->wl_vk_display;
2111         tpl_wl_vk_swapchain_t *swapchain     = wl_vk_surface->swapchain;
2112         tpl_wl_vk_buffer_t *wl_vk_buffer     = NULL;
2113         tpl_bool_t ready_to_commit           = TPL_TRUE;
2114
2115         TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2116
2117         while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2118                 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2119                                                                                         &tbm_surface);
2120                 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2121                         TPL_ERR("Failed to acquire from tbm_queue(%p)",
2122                                         swapchain->tbm_queue);
2123                         return TPL_ERROR_INVALID_OPERATION;
2124                 }
2125
2126                 tbm_surface_internal_ref(tbm_surface);
2127
2128                 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2129                 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2130                                                                            "wl_vk_buffer sould be not NULL");
2131
2132                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2133
2134                 wl_vk_buffer->status = ACQUIRED;
2135
2136                 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2137                                   wl_vk_buffer, tbm_surface,
2138                                   _get_tbm_surface_bo_name(tbm_surface));
2139
2140                 if (wl_vk_buffer->wl_buffer == NULL) {
2141                         wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2142                                                 wl_vk_display->wl_tbm_client, tbm_surface);
2143
2144                         if (!wl_vk_buffer->wl_buffer) {
2145                                 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2146                                                  wl_vk_display->wl_tbm_client, tbm_surface);
2147                         } else {
2148                                 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2149                                         wl_vk_display->use_explicit_sync == TPL_FALSE) {
2150                                         wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2151                                                                                    &wl_buffer_release_listener, wl_vk_buffer);
2152                                 }
2153
2154                                 TPL_LOG_T("WL_VK",
2155                                                   "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2156                                                   wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2157                         }
2158                 }
2159
2160                 if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done)
2161                         ready_to_commit = TPL_TRUE;
2162                 else {
2163                         wl_vk_buffer->status = WAITING_VBLANK;
2164                         __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2165                         ready_to_commit = TPL_FALSE;
2166                 }
2167
2168                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2169
2170                 if (ready_to_commit)
2171                         _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2172         }
2173
2174         return TPL_ERROR_NONE;
2175 }
2176
2177 static void
2178 __cb_buffer_fenced_release(void *data,
2179                                                    struct zwp_linux_buffer_release_v1 *release,
2180                                                    int32_t fence)
2181 {
2182         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2183         tbm_surface_h tbm_surface         = NULL;
2184
2185         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2186
2187         tbm_surface = wl_vk_buffer->tbm_surface;
2188
2189         if (tbm_surface_internal_is_valid(tbm_surface)) {
2190                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2191                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2192
2193                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2194                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2195                         tbm_surface_internal_unref(tbm_surface);
2196                         return;
2197                 }
2198
2199                 swapchain = wl_vk_surface->swapchain;
2200
2201                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2202                 if (wl_vk_buffer->status == COMMITTED) {
2203                         tbm_surface_queue_error_e tsq_err;
2204
2205                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2206                         wl_vk_buffer->buffer_release = NULL;
2207
2208                         wl_vk_buffer->release_fence_fd = fence;
2209                         wl_vk_buffer->status = RELEASED;
2210
2211                         TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2212                                            wl_vk_buffer->bo_name,
2213                                            fence);
2214                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2215                                                         wl_vk_buffer->bo_name);
2216
2217                         TPL_LOG_T("WL_VK",
2218                                           "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2219                                           wl_vk_buffer, tbm_surface,
2220                                           wl_vk_buffer->bo_name,
2221                                           fence);
2222
2223                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2224                                                                                                 tbm_surface);
2225                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2226                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2227
2228                         tbm_surface_internal_unref(tbm_surface);
2229                 }
2230
2231                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2232
2233         } else {
2234                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2235         }
2236 }
2237
2238 static void
2239 __cb_buffer_immediate_release(void *data,
2240                                                           struct zwp_linux_buffer_release_v1 *release)
2241 {
2242         tpl_wl_vk_buffer_t *wl_vk_buffer  = (tpl_wl_vk_buffer_t *)data;
2243         tbm_surface_h tbm_surface           = NULL;
2244
2245         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2246
2247         tbm_surface = wl_vk_buffer->tbm_surface;
2248
2249         if (tbm_surface_internal_is_valid(tbm_surface)) {
2250                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2251                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2252
2253                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2254                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2255                         tbm_surface_internal_unref(tbm_surface);
2256                         return;
2257                 }
2258
2259                 swapchain = wl_vk_surface->swapchain;
2260
2261                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2262                 if (wl_vk_buffer->status == COMMITTED) {
2263                         tbm_surface_queue_error_e tsq_err;
2264
2265                         zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2266                         wl_vk_buffer->buffer_release = NULL;
2267
2268                         wl_vk_buffer->release_fence_fd = -1;
2269                         wl_vk_buffer->status = RELEASED;
2270
2271                         TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2272                                            _get_tbm_surface_bo_name(tbm_surface));
2273                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2274                                                         _get_tbm_surface_bo_name(tbm_surface));
2275
2276                         TPL_LOG_T("WL_VK",
2277                                           "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2278                                           wl_vk_buffer, tbm_surface,
2279                                           _get_tbm_surface_bo_name(tbm_surface));
2280
2281                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2282                                                                                                 tbm_surface);
2283                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2284                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2285
2286                         tbm_surface_internal_unref(tbm_surface);
2287                 }
2288
2289                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2290
2291         } else {
2292                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2293         }
2294 }
2295
2296 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2297         __cb_buffer_fenced_release,
2298         __cb_buffer_immediate_release,
2299 };
2300
2301 static void
2302 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2303 {
2304         tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2305         tbm_surface_h tbm_surface = NULL;
2306
2307         TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2308
2309         tbm_surface = wl_vk_buffer->tbm_surface;
2310
2311         if (tbm_surface_internal_is_valid(tbm_surface)) {
2312                 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2313                 tpl_wl_vk_swapchain_t *swapchain   = NULL;
2314                 tbm_surface_queue_error_e tsq_err  = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2315
2316                 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2317                         TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2318                         tbm_surface_internal_unref(tbm_surface);
2319                         return;
2320                 }
2321
2322                 swapchain = wl_vk_surface->swapchain;
2323
2324                 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2325
2326                 if (wl_vk_buffer->status == COMMITTED) {
2327
2328                         tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2329                                                                                                 tbm_surface);
2330                         if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2331                                 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2332
2333                         wl_vk_buffer->status = RELEASED;
2334
2335                         TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2336                         TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2337                                                         wl_vk_buffer->bo_name);
2338
2339                         TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2340                                           wl_vk_buffer->wl_buffer, tbm_surface,
2341                                           wl_vk_buffer->bo_name);
2342
2343                         tbm_surface_internal_unref(tbm_surface);
2344                 }
2345
2346                 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2347         } else {
2348                 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2349         }
2350 }
2351
2352 static void
2353 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2354                                            unsigned int sequence, unsigned int tv_sec,
2355                                            unsigned int tv_usec, void *user_data)
2356 {
2357         tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2358         tpl_wl_vk_buffer_t *wl_vk_buffer   = NULL;
2359
2360         TRACE_ASYNC_END((int)wl_vk_surface, "WAIT_VBLANK");
2361         TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface);
2362
2363         if (error == TDM_ERROR_TIMEOUT)
2364                 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2365                                  wl_vk_surface);
2366
2367         wl_vk_surface->vblank_done = TPL_TRUE;
2368
2369         tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2370         wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front(
2371                                                 wl_vk_surface->vblank_waiting_buffers,
2372                                                 NULL);
2373         if (wl_vk_buffer)
2374                 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2375         tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2376 }
2377
2378 static tpl_result_t
2379 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2380 {
2381         tdm_error tdm_err                     = TDM_ERROR_NONE;
2382         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2383
2384         if (wl_vk_surface->vblank == NULL) {
2385                 wl_vk_surface->vblank =
2386                         _thread_create_tdm_client_vblank(wl_vk_display->tdm_client);
2387                 if (!wl_vk_surface->vblank) {
2388                         TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2389                                          wl_vk_surface);
2390                         return TPL_ERROR_OUT_OF_MEMORY;
2391                 }
2392         }
2393
2394         tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2395                         wl_vk_surface->post_interval,
2396                         __cb_tdm_client_vblank,
2397                         (void *)wl_vk_surface);
2398
2399         if (tdm_err == TDM_ERROR_NONE) {
2400                 wl_vk_surface->vblank_done = TPL_FALSE;
2401                 TRACE_ASYNC_BEGIN((int)wl_vk_surface, "WAIT_VBLANK");
2402         } else {
2403                 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2404                 return TPL_ERROR_INVALID_OPERATION;
2405         }
2406
2407         return TPL_ERROR_NONE;
2408 }
2409
2410 static void
2411 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2412                                                   tpl_wl_vk_buffer_t *wl_vk_buffer)
2413 {
2414         tpl_wl_vk_display_t *wl_vk_display    = wl_vk_surface->wl_vk_display;
2415         struct wl_surface *wl_surface         = wl_vk_surface->wl_surface;
2416         uint32_t version;
2417
2418         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2419                                                                    "wl_vk_buffer sould be not NULL");
2420
2421         if (wl_vk_buffer->wl_buffer == NULL) {
2422                 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2423                                                                                         wl_vk_display->wl_tbm_client,
2424                                                                                         wl_vk_buffer->tbm_surface);
2425                 if (wl_vk_buffer->wl_buffer &&
2426                         (wl_vk_buffer->acquire_fence_fd == -1 ||
2427                          wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2428                                 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2429                                                                            &wl_buffer_release_listener, wl_vk_buffer);
2430                 }
2431         }
2432         TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2433                                                                    "[FATAL] Failed to create wl_buffer");
2434
2435         version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2436
2437         wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2438                                           wl_vk_buffer->dx, wl_vk_buffer->dy);
2439
2440         if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2441                 if (version < 4) {
2442                         wl_surface_damage(wl_surface,
2443                                                           wl_vk_buffer->dx, wl_vk_buffer->dy,
2444                                                           wl_vk_buffer->width, wl_vk_buffer->height);
2445                 } else {
2446                         wl_surface_damage_buffer(wl_surface,
2447                                                                          0, 0,
2448                                                                          wl_vk_buffer->width, wl_vk_buffer->height);
2449                 }
2450         } else {
2451                 int i;
2452                 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2453                         int inverted_y =
2454                                 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2455                                                 wl_vk_buffer->rects[i * 4 + 3]);
2456                         if (version < 4) {
2457                                 wl_surface_damage(wl_surface,
2458                                                                   wl_vk_buffer->rects[i * 4 + 0],
2459                                                                   inverted_y,
2460                                                                   wl_vk_buffer->rects[i * 4 + 2],
2461                                                                   wl_vk_buffer->rects[i * 4 + 3]);
2462                         } else {
2463                                 wl_surface_damage_buffer(wl_surface,
2464                                                                                  wl_vk_buffer->rects[i * 4 + 0],
2465                                                                                  inverted_y,
2466                                                                                  wl_vk_buffer->rects[i * 4 + 2],
2467                                                                                  wl_vk_buffer->rects[i * 4 + 3]);
2468                         }
2469                 }
2470         }
2471
2472         if (wl_vk_display->use_explicit_sync &&
2473                 wl_vk_surface->surface_sync &&
2474                 wl_vk_buffer->acquire_fence_fd != -1) {
2475
2476                 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2477                                                                                                                            wl_vk_buffer->acquire_fence_fd);
2478                 TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2479                                   wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2480                 close(wl_vk_buffer->acquire_fence_fd);
2481                 wl_vk_buffer->acquire_fence_fd = -1;
2482
2483                 wl_vk_buffer->buffer_release =
2484                         zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2485                 if (!wl_vk_buffer->buffer_release) {
2486                         TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2487                 } else {
2488                         zwp_linux_buffer_release_v1_add_listener(
2489                                 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2490                         TPL_DEBUG("add explicit_sync_release_listener.");
2491                 }
2492         }
2493
2494         wl_surface_commit(wl_surface);
2495
2496         wl_display_flush(wl_vk_display->wl_display);
2497
2498         TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2499                                           wl_vk_buffer->bo_name);
2500
2501         tpl_gmutex_lock(&wl_vk_buffer->mutex);
2502
2503         wl_vk_buffer->need_to_commit   = TPL_FALSE;
2504         wl_vk_buffer->status           = COMMITTED;
2505
2506         tpl_gcond_signal(&wl_vk_buffer->cond);
2507
2508         tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2509
2510         TPL_LOG_T("WL_VK",
2511                           "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2512                           wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2513                           wl_vk_buffer->bo_name);
2514
2515         if (wl_vk_display->use_wait_vblank &&
2516                 _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2517                 TPL_ERR("Failed to set wait vblank.");
2518 }
2519
2520 tpl_bool_t
2521 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2522 {
2523         if (!native_dpy) return TPL_FALSE;
2524
2525         if (_check_native_handle_is_wl_display(native_dpy))
2526                 return TPL_TRUE;
2527
2528         return TPL_FALSE;
2529 }
2530
2531 void
2532 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2533 {
2534         TPL_ASSERT(backend);
2535
2536         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2537         backend->data = NULL;
2538
2539         backend->init = __tpl_wl_vk_display_init;
2540         backend->fini = __tpl_wl_vk_display_fini;
2541         backend->query_config = __tpl_wl_vk_display_query_config;
2542         backend->filter_config = __tpl_wl_vk_display_filter_config;
2543         backend->query_window_supported_buffer_count =
2544                 __tpl_wl_vk_display_query_window_supported_buffer_count;
2545         backend->query_window_supported_present_modes =
2546                 __tpl_wl_vk_display_query_window_supported_present_modes;
2547 }
2548
2549 void
2550 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2551 {
2552         TPL_ASSERT(backend);
2553
2554         backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2555         backend->data = NULL;
2556
2557         backend->init = __tpl_wl_vk_surface_init;
2558         backend->fini = __tpl_wl_vk_surface_fini;
2559         backend->validate = __tpl_wl_vk_surface_validate;
2560         backend->cancel_dequeued_buffer =
2561                 __tpl_wl_vk_surface_cancel_buffer;
2562         backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2563         backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2564         backend->get_swapchain_buffers =
2565                 __tpl_wl_vk_surface_get_swapchain_buffers;
2566         backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2567         backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2568         backend->set_post_interval =
2569                 __tpl_wl_vk_surface_set_post_interval;
2570 }
2571
2572 static int
2573 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2574 {
2575         return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2576 }
2577
2578 static void
2579 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2580 {
2581         int idx = 0;
2582
2583         tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2584         TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2585                          wl_vk_surface, wl_vk_surface->buffer_cnt);
2586         for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2587                 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2588                 if (wl_vk_buffer) {
2589                         TPL_INFO("[INFO]",
2590                                          "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2591                                          idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2592                                          wl_vk_buffer->bo_name,
2593                                          status_to_string[wl_vk_buffer->status]);
2594                 }
2595         }
2596         tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2597 }