1 #define inline __inline__
4 #include "tpl_internal.h"
9 #include <sys/eventfd.h>
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
20 #include <tdm_client.h>
22 #include <tizen-surface-client-protocol.h>
23 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
25 #include "tpl_utils_gthread.h"
27 #define BUFFER_ARRAY_SIZE 10
28 #define VK_CLIENT_QUEUE_SIZE 3
30 static int wl_vk_buffer_key;
31 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
33 typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t;
34 typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t;
35 typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t;
36 typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t;
38 struct _tpl_wl_vk_display {
39 tpl_gsource *disp_source;
41 tpl_gmutex wl_event_mutex;
43 struct wl_display *wl_display;
44 struct wl_event_queue *ev_queue;
45 struct wayland_tbm_client *wl_tbm_client;
46 int last_error; /* errno of the last wl_display error*/
48 tpl_bool_t wl_initialized;
49 tpl_bool_t tdm_initialized;
51 tdm_client *tdm_client;
52 tpl_gsource *tdm_source;
55 tpl_bool_t use_wait_vblank;
56 tpl_bool_t use_explicit_sync;
59 /* device surface capabilities */
64 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
67 struct _tpl_wl_vk_swapchain {
68 tpl_wl_vk_surface_t *wl_vk_surface;
70 tbm_surface_queue_h tbm_queue;
80 tbm_surface_h *swapchain_buffers;
82 tpl_util_atomic_uint ref_cnt;
85 struct _tpl_wl_vk_surface {
86 tpl_gsource *surf_source;
88 tpl_wl_vk_swapchain_t *swapchain;
90 struct wl_surface *wl_surface;
91 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
93 tdm_client_vblank *vblank;
95 /* surface information */
98 tpl_wl_vk_display_t *wl_vk_display;
99 tpl_surface_t *tpl_surface;
101 /* wl_vk_buffer array for buffer tracing */
102 tpl_wl_vk_buffer_t *buffers[BUFFER_ARRAY_SIZE];
103 int buffer_cnt; /* the number of using wl_vk_buffers */
104 tpl_gmutex buffers_mutex;
106 tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
108 tpl_gmutex surf_mutex;
111 /* for waiting draw done */
112 tpl_bool_t is_activated;
113 tpl_bool_t reset; /* TRUE if queue reseted by external */
114 tpl_bool_t vblank_done;
119 typedef enum buffer_status {
124 WAITING_SIGNALED, // 4
129 static const char *status_to_string[7] = {
134 "WAITING_SIGNALED", // 4
135 "WAITING_VBLANK", // 5
139 struct _tpl_wl_vk_buffer {
140 tbm_surface_h tbm_surface;
143 struct wl_buffer *wl_buffer;
144 int dx, dy; /* position to attach to wl_surface */
145 int width, height; /* size to attach to wl_surface */
147 buffer_status_t status; /* for tracing buffer status */
148 int idx; /* position index in buffers array of wl_vk_surface */
150 /* for damage region */
154 /* for checking need_to_commit (frontbuffer mode) */
155 tpl_bool_t need_to_commit;
157 /* to get release event via zwp_linux_buffer_release_v1 */
158 struct zwp_linux_buffer_release_v1 *buffer_release;
160 /* each buffers own its release_fence_fd, until it passes ownership
162 int32_t release_fence_fd;
164 /* each buffers own its acquire_fence_fd.
165 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
166 * will be passed to display server
167 * Otherwise it will be used as a fence waiting for render done
169 int32_t acquire_fence_fd;
174 tpl_wl_vk_surface_t *wl_vk_surface;
178 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
180 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
182 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
184 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
186 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
188 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
190 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
192 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
193 tpl_wl_vk_buffer_t *wl_vk_buffer);
196 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
198 struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
200 if (!wl_vk_native_dpy) {
201 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
205 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
206 is a memory address pointing the structure of wl_display_interface. */
207 if (wl_vk_native_dpy == &wl_display_interface)
210 if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
211 strlen(wl_display_interface.name)) == 0) {
219 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
221 tpl_wl_vk_display_t *wl_vk_display = NULL;
222 tdm_error tdm_err = TDM_ERROR_NONE;
226 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
227 if (!wl_vk_display) {
228 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
229 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
233 tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client);
235 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
236 * When tdm_source is no longer available due to an unexpected situation,
237 * wl_vk_thread must remove it from the thread and destroy it.
238 * In that case, tdm_vblank can no longer be used for surfaces and displays
239 * that used this tdm_source. */
240 if (tdm_err != TDM_ERROR_NONE) {
241 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
243 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
245 tpl_gsource_destroy(gsource, TPL_FALSE);
247 wl_vk_display->tdm_source = NULL;
256 __thread_func_tdm_finalize(tpl_gsource *gsource)
258 tpl_wl_vk_display_t *wl_vk_display = NULL;
260 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
263 "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)",
264 wl_vk_display, wl_vk_display->tdm_client, gsource);
266 if (wl_vk_display->tdm_client) {
267 tdm_client_destroy(wl_vk_display->tdm_client);
268 wl_vk_display->tdm_client = NULL;
269 wl_vk_display->tdm_display_fd = -1;
272 wl_vk_display->tdm_initialized = TPL_FALSE;
275 static tpl_gsource_functions tdm_funcs = {
278 .dispatch = __thread_func_tdm_dispatch,
279 .finalize = __thread_func_tdm_finalize,
283 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
285 tdm_client *tdm_client = NULL;
286 int tdm_display_fd = -1;
287 tdm_error tdm_err = TDM_ERROR_NONE;
289 tdm_client = tdm_client_create(&tdm_err);
290 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
291 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
292 return TPL_ERROR_INVALID_OPERATION;
295 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
296 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
297 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
298 tdm_client_destroy(tdm_client);
299 return TPL_ERROR_INVALID_OPERATION;
302 wl_vk_display->tdm_display_fd = tdm_display_fd;
303 wl_vk_display->tdm_client = tdm_client;
304 wl_vk_display->tdm_source = NULL;
305 wl_vk_display->tdm_initialized = TPL_TRUE;
307 TPL_INFO("[TDM_CLIENT_INIT]",
308 "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
309 wl_vk_display, tdm_client, tdm_display_fd);
311 return TPL_ERROR_NONE;
314 #define IMPL_TIZEN_SURFACE_SHM_VERSION 2
317 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
318 uint32_t name, const char *interface,
321 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
323 if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
324 char *env = tpl_getenv("TPL_EFS");
325 if (env && !atoi(env)) {
326 wl_vk_display->use_explicit_sync = TPL_FALSE;
328 wl_vk_display->explicit_sync =
329 wl_registry_bind(wl_registry, name,
330 &zwp_linux_explicit_synchronization_v1_interface, 1);
331 wl_vk_display->use_explicit_sync = TPL_TRUE;
332 TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
338 __cb_wl_resistry_global_remove_callback(void *data,
339 struct wl_registry *wl_registry,
344 static const struct wl_registry_listener registry_listener = {
345 __cb_wl_resistry_global_callback,
346 __cb_wl_resistry_global_remove_callback
350 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
351 const char *func_name)
355 strerror_r(errno, buf, sizeof(buf));
357 if (wl_vk_display->last_error == errno)
360 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
362 dpy_err = wl_display_get_error(wl_vk_display->wl_display);
363 if (dpy_err == EPROTO) {
364 const struct wl_interface *err_interface;
365 uint32_t err_proxy_id, err_code;
366 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
369 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
370 err_interface->name, err_code, err_proxy_id);
373 wl_vk_display->last_error = errno;
377 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
379 struct wl_registry *registry = NULL;
380 struct wl_event_queue *queue = NULL;
381 struct wl_display *display_wrapper = NULL;
382 struct wl_proxy *wl_tbm = NULL;
383 struct wayland_tbm_client *wl_tbm_client = NULL;
385 tpl_result_t result = TPL_ERROR_NONE;
387 queue = wl_display_create_queue(wl_vk_display->wl_display);
389 TPL_ERR("Failed to create wl_queue wl_display(%p)",
390 wl_vk_display->wl_display);
391 result = TPL_ERROR_INVALID_OPERATION;
395 wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
396 if (!wl_vk_display->ev_queue) {
397 TPL_ERR("Failed to create wl_queue wl_display(%p)",
398 wl_vk_display->wl_display);
399 result = TPL_ERROR_INVALID_OPERATION;
403 display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
404 if (!display_wrapper) {
405 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
406 wl_vk_display->wl_display);
407 result = TPL_ERROR_INVALID_OPERATION;
411 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
413 registry = wl_display_get_registry(display_wrapper);
415 TPL_ERR("Failed to create wl_registry");
416 result = TPL_ERROR_INVALID_OPERATION;
420 wl_proxy_wrapper_destroy(display_wrapper);
421 display_wrapper = NULL;
423 wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
424 if (!wl_tbm_client) {
425 TPL_ERR("Failed to initialize wl_tbm_client.");
426 result = TPL_ERROR_INVALID_CONNECTION;
430 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
432 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
433 result = TPL_ERROR_INVALID_CONNECTION;
437 wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
438 wl_vk_display->wl_tbm_client = wl_tbm_client;
440 if (wl_registry_add_listener(registry, ®istry_listener,
442 TPL_ERR("Failed to wl_registry_add_listener");
443 result = TPL_ERROR_INVALID_OPERATION;
447 ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
449 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
450 result = TPL_ERROR_INVALID_OPERATION;
454 if (wl_vk_display->explicit_sync) {
455 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
456 wl_vk_display->ev_queue);
457 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
458 wl_vk_display->explicit_sync);
461 wl_vk_display->wl_initialized = TPL_TRUE;
463 TPL_INFO("[WAYLAND_INIT]",
464 "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
465 wl_vk_display, wl_vk_display->wl_display,
466 wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
467 TPL_INFO("[WAYLAND_INIT]",
469 wl_vk_display->explicit_sync);
473 wl_proxy_wrapper_destroy(display_wrapper);
475 wl_registry_destroy(registry);
477 wl_event_queue_destroy(queue);
483 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
485 /* If wl_vk_display is in prepared state, cancel it */
486 if (wl_vk_display->prepared) {
487 wl_display_cancel_read(wl_vk_display->wl_display);
488 wl_vk_display->prepared = TPL_FALSE;
491 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
492 wl_vk_display->ev_queue) == -1) {
493 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
496 if (wl_vk_display->explicit_sync) {
497 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
498 "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
499 wl_vk_display, wl_vk_display->explicit_sync);
500 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
501 wl_vk_display->explicit_sync = NULL;
504 if (wl_vk_display->wl_tbm_client) {
505 struct wl_proxy *wl_tbm = NULL;
507 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
508 wl_vk_display->wl_tbm_client);
510 wl_proxy_set_queue(wl_tbm, NULL);
513 TPL_INFO("[WL_TBM_DEINIT]",
514 "wl_vk_display(%p) wl_tbm_client(%p)",
515 wl_vk_display, wl_vk_display->wl_tbm_client);
516 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
517 wl_vk_display->wl_tbm_client = NULL;
520 wl_event_queue_destroy(wl_vk_display->ev_queue);
522 wl_vk_display->wl_initialized = TPL_FALSE;
524 TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
525 wl_vk_display, wl_vk_display->wl_display);
529 _thread_init(void *data)
531 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
533 if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
534 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
535 wl_vk_display, wl_vk_display->wl_display);
538 if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
539 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
542 return wl_vk_display;
546 __thread_func_disp_prepare(tpl_gsource *gsource)
548 tpl_wl_vk_display_t *wl_vk_display =
549 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
551 /* If this wl_vk_display is already prepared,
552 * do nothing in this function. */
553 if (wl_vk_display->prepared)
556 /* If there is a last_error, there is no need to poll,
557 * so skip directly to dispatch.
558 * prepare -> dispatch */
559 if (wl_vk_display->last_error)
562 while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
563 wl_vk_display->ev_queue) != 0) {
564 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
565 wl_vk_display->ev_queue) == -1) {
566 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
570 wl_vk_display->prepared = TPL_TRUE;
572 wl_display_flush(wl_vk_display->wl_display);
578 __thread_func_disp_check(tpl_gsource *gsource)
580 tpl_wl_vk_display_t *wl_vk_display =
581 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
582 tpl_bool_t ret = TPL_FALSE;
584 if (!wl_vk_display->prepared)
587 /* If prepared, but last_error is set,
588 * cancel_read is executed and FALSE is returned.
589 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
590 * and skipping disp_check from prepare to disp_dispatch.
591 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
592 if (wl_vk_display->prepared && wl_vk_display->last_error) {
593 wl_display_cancel_read(wl_vk_display->wl_display);
597 if (tpl_gsource_check_io_condition(gsource)) {
598 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
599 _wl_display_print_err(wl_vk_display, "read_event");
602 wl_display_cancel_read(wl_vk_display->wl_display);
606 wl_vk_display->prepared = TPL_FALSE;
612 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
614 tpl_wl_vk_display_t *wl_vk_display =
615 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
619 /* If there is last_error, SOURCE_REMOVE should be returned
620 * to remove the gsource from the main loop.
621 * This is because wl_vk_display is not valid since last_error was set.*/
622 if (wl_vk_display->last_error) {
626 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
627 if (tpl_gsource_check_io_condition(gsource)) {
628 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
629 wl_vk_display->ev_queue) == -1) {
630 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
634 wl_display_flush(wl_vk_display->wl_display);
635 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
641 __thread_func_disp_finalize(tpl_gsource *gsource)
643 tpl_wl_vk_display_t *wl_vk_display =
644 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
646 if (wl_vk_display->wl_initialized)
647 _thread_wl_display_fini(wl_vk_display);
649 TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)",
650 wl_vk_display, gsource);
656 static tpl_gsource_functions disp_funcs = {
657 .prepare = __thread_func_disp_prepare,
658 .check = __thread_func_disp_check,
659 .dispatch = __thread_func_disp_dispatch,
660 .finalize = __thread_func_disp_finalize,
664 __tpl_wl_vk_display_init(tpl_display_t *display)
668 tpl_wl_vk_display_t *wl_vk_display = NULL;
670 /* Do not allow default display in wayland */
671 if (!display->native_handle) {
672 TPL_ERR("Invalid native handle for display.");
673 return TPL_ERROR_INVALID_PARAMETER;
676 if (!_check_native_handle_is_wl_display(display->native_handle)) {
677 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
678 return TPL_ERROR_INVALID_PARAMETER;
681 wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
682 sizeof(tpl_wl_vk_display_t));
683 if (!wl_vk_display) {
684 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
685 return TPL_ERROR_OUT_OF_MEMORY;
688 display->backend.data = wl_vk_display;
689 display->bufmgr_fd = -1;
691 wl_vk_display->tdm_initialized = TPL_FALSE;
692 wl_vk_display->wl_initialized = TPL_FALSE;
694 wl_vk_display->ev_queue = NULL;
695 wl_vk_display->wl_display = (struct wl_display *)display->native_handle;
696 wl_vk_display->last_error = 0;
697 wl_vk_display->use_explicit_sync = TPL_FALSE; // default disabled
698 wl_vk_display->prepared = TPL_FALSE;
700 /* Wayland Interfaces */
701 wl_vk_display->explicit_sync = NULL;
702 wl_vk_display->wl_tbm_client = NULL;
704 /* Vulkan specific surface capabilities */
705 wl_vk_display->min_buffer = 2;
706 wl_vk_display->max_buffer = VK_CLIENT_QUEUE_SIZE;
707 wl_vk_display->present_modes = TPL_DISPLAY_PRESENT_MODE_FIFO;
709 wl_vk_display->use_wait_vblank = TPL_TRUE; // default enabled
711 char *env = tpl_getenv("TPL_WAIT_VBLANK");
712 if (env && !atoi(env)) {
713 wl_vk_display->use_wait_vblank = TPL_FALSE;
717 tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
720 wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
721 (tpl_gthread_func)_thread_init,
722 (void *)wl_vk_display);
723 if (!wl_vk_display->thread) {
724 TPL_ERR("Failed to create wl_vk_thread");
728 wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
729 (void *)wl_vk_display,
730 wl_display_get_fd(wl_vk_display->wl_display),
731 &disp_funcs, SOURCE_TYPE_NORMAL);
732 if (!wl_vk_display->disp_source) {
733 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
734 display->native_handle,
735 wl_vk_display->thread);
739 wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread,
740 (void *)wl_vk_display,
741 wl_vk_display->tdm_display_fd,
742 &tdm_funcs, SOURCE_TYPE_NORMAL);
743 if (!wl_vk_display->tdm_source) {
744 TPL_ERR("Failed to create tdm_gsource\n");
748 TPL_INFO("[DISPLAY_INIT]",
749 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
751 wl_vk_display->thread,
752 wl_vk_display->wl_display);
754 TPL_INFO("[DISPLAY_INIT]",
755 "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
756 wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
757 wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
759 return TPL_ERROR_NONE;
762 if (wl_vk_display->thread) {
763 if (wl_vk_display->tdm_source)
764 tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
765 if (wl_vk_display->disp_source)
766 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
768 tpl_gthread_destroy(wl_vk_display->thread);
771 wl_vk_display->thread = NULL;
774 display->backend.data = NULL;
775 return TPL_ERROR_INVALID_OPERATION;
779 __tpl_wl_vk_display_fini(tpl_display_t *display)
781 tpl_wl_vk_display_t *wl_vk_display;
785 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
787 TPL_INFO("[DISPLAY_FINI]",
788 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
790 wl_vk_display->thread,
791 wl_vk_display->wl_display);
793 if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) {
794 tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
795 wl_vk_display->tdm_source = NULL;
798 if (wl_vk_display->disp_source) {
799 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
800 wl_vk_display->disp_source = NULL;
803 if (wl_vk_display->thread) {
804 tpl_gthread_destroy(wl_vk_display->thread);
805 wl_vk_display->thread = NULL;
808 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
813 display->backend.data = NULL;
817 __tpl_wl_vk_display_query_config(tpl_display_t *display,
818 tpl_surface_type_t surface_type,
819 int red_size, int green_size,
820 int blue_size, int alpha_size,
821 int color_depth, int *native_visual_id,
826 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
827 green_size == 8 && blue_size == 8 &&
828 (color_depth == 32 || color_depth == 24)) {
830 if (alpha_size == 8) {
831 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
832 if (is_slow) *is_slow = TPL_FALSE;
833 return TPL_ERROR_NONE;
835 if (alpha_size == 0) {
836 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
837 if (is_slow) *is_slow = TPL_FALSE;
838 return TPL_ERROR_NONE;
842 return TPL_ERROR_INVALID_PARAMETER;
846 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
851 TPL_IGNORE(visual_id);
852 TPL_IGNORE(alpha_size);
853 return TPL_ERROR_NONE;
857 __tpl_wl_vk_display_query_window_supported_buffer_count(
858 tpl_display_t *display,
859 tpl_handle_t window, int *min, int *max)
861 tpl_wl_vk_display_t *wl_vk_display = NULL;
866 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
867 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
869 if (min) *min = wl_vk_display->min_buffer;
870 if (max) *max = wl_vk_display->max_buffer;
872 return TPL_ERROR_NONE;
876 __tpl_wl_vk_display_query_window_supported_present_modes(
877 tpl_display_t *display,
878 tpl_handle_t window, int *present_modes)
880 tpl_wl_vk_display_t *wl_vk_display = NULL;
885 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
886 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
889 *present_modes = wl_vk_display->present_modes;
892 return TPL_ERROR_NONE;
896 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
898 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
899 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
900 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
901 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
902 tpl_bool_t need_to_release = TPL_FALSE;
903 tpl_bool_t need_to_cancel = TPL_FALSE;
904 buffer_status_t status = RELEASED;
907 while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
908 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
909 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
910 wl_vk_buffer = wl_vk_surface->buffers[idx];
913 wl_vk_surface->buffers[idx] = NULL;
914 wl_vk_surface->buffer_cnt--;
916 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
917 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
922 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
924 tpl_gmutex_lock(&wl_vk_buffer->mutex);
926 status = wl_vk_buffer->status;
928 TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
930 wl_vk_buffer->tbm_surface,
931 status_to_string[status]);
933 if (status >= ENQUEUED) {
934 tpl_bool_t need_to_wait = TPL_FALSE;
935 tpl_result_t wait_result = TPL_ERROR_NONE;
937 if (!wl_vk_display->use_explicit_sync &&
938 status < WAITING_VBLANK)
939 need_to_wait = TPL_TRUE;
941 if (wl_vk_display->use_explicit_sync &&
943 need_to_wait = TPL_TRUE;
946 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
947 wait_result = tpl_cond_timed_wait(&wl_vk_buffer->cond,
948 &wl_vk_buffer->mutex,
950 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
952 status = wl_vk_buffer->status;
954 if (wait_result == TPL_ERROR_TIME_OUT)
955 TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
960 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
961 /* It has been acquired but has not yet been released, so this
962 * buffer must be released. */
963 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
965 /* After dequeue, it has not been enqueued yet
966 * so cancel_dequeue must be performed. */
967 need_to_cancel = (status == DEQUEUED);
969 if (swapchain && swapchain->tbm_queue) {
970 if (need_to_release) {
971 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
972 wl_vk_buffer->tbm_surface);
973 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
974 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
975 wl_vk_buffer->tbm_surface, tsq_err);
978 if (need_to_cancel) {
979 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
980 wl_vk_buffer->tbm_surface);
981 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
982 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
983 wl_vk_buffer->tbm_surface, tsq_err);
987 wl_vk_buffer->status = RELEASED;
989 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
991 if (need_to_release || need_to_cancel)
992 tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
994 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1000 static tdm_client_vblank*
1001 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1003 tdm_client_vblank *vblank = NULL;
1004 tdm_client_output *tdm_output = NULL;
1005 tdm_error tdm_err = TDM_ERROR_NONE;
1008 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1012 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1013 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1014 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1018 vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1019 if (!vblank || tdm_err != TDM_ERROR_NONE) {
1020 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1024 tdm_client_vblank_set_enable_fake(vblank, 1);
1025 tdm_client_vblank_set_sync(vblank, 0);
1031 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1033 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1035 /* tbm_surface_queue will be created at swapchain_create */
1037 wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1038 wl_vk_display->tdm_client);
1039 if (wl_vk_surface->vblank) {
1040 TPL_INFO("[VBLANK_INIT]",
1041 "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1042 wl_vk_surface, wl_vk_display->tdm_client,
1043 wl_vk_surface->vblank);
1046 if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1047 wl_vk_surface->surface_sync =
1048 zwp_linux_explicit_synchronization_v1_get_synchronization(
1049 wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1050 if (wl_vk_surface->surface_sync) {
1051 TPL_INFO("[EXPLICIT_SYNC_INIT]",
1052 "wl_vk_surface(%p) surface_sync(%p)",
1053 wl_vk_surface, wl_vk_surface->surface_sync);
1055 TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1057 wl_vk_display->use_explicit_sync = TPL_FALSE;
1061 wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1065 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1067 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1069 TPL_INFO("[SURFACE_FINI]",
1070 "wl_vk_surface(%p) wl_surface(%p)",
1071 wl_vk_surface, wl_vk_surface->wl_surface);
1073 if (wl_vk_surface->vblank_waiting_buffers) {
1074 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1075 wl_vk_surface->vblank_waiting_buffers = NULL;
1078 if (wl_vk_surface->surface_sync) {
1079 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1080 "wl_vk_surface(%p) surface_sync(%p)",
1081 wl_vk_surface, wl_vk_surface->surface_sync);
1082 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1083 wl_vk_surface->surface_sync = NULL;
1086 if (wl_vk_surface->vblank) {
1087 TPL_INFO("[VBLANK_DESTROY]",
1088 "wl_vk_surface(%p) vblank(%p)",
1089 wl_vk_surface, wl_vk_surface->vblank);
1090 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1091 wl_vk_surface->vblank = NULL;
1094 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1098 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1100 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1102 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1104 if (message == 1) { /* Initialize surface */
1105 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1106 TPL_DEBUG("wl_vk_surface(%p) initialize message received!",
1108 _thread_wl_vk_surface_init(wl_vk_surface);
1109 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1110 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1111 } else if (message == 2) { /* Create tbm_surface_queue */
1112 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1113 TPL_DEBUG("wl_vk_surface(%p) queue creation message received!",
1115 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1116 != TPL_ERROR_NONE) {
1117 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1120 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1121 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1122 } else if (message == 3) { /* Acquirable message */
1123 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1124 TPL_DEBUG("wl_vk_surface(%p) acquirable message received!",
1126 if (_thread_surface_queue_acquire(wl_vk_surface)
1127 != TPL_ERROR_NONE) {
1128 TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1131 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1132 } else if (message == 4) { /* swapchain destroy */
1133 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1134 TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!",
1136 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1137 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1138 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1145 __thread_func_surf_finalize(tpl_gsource *gsource)
1147 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1149 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1150 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1152 _thread_wl_vk_surface_fini(wl_vk_surface);
1154 TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)",
1155 wl_vk_surface, gsource);
1158 static tpl_gsource_functions surf_funcs = {
1161 .dispatch = __thread_func_surf_dispatch,
1162 .finalize = __thread_func_surf_finalize,
1167 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1169 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1170 tpl_wl_vk_display_t *wl_vk_display = NULL;
1171 tpl_gsource *surf_source = NULL;
1173 TPL_ASSERT(surface);
1174 TPL_ASSERT(surface->display);
1175 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1176 TPL_ASSERT(surface->native_handle);
1178 wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1179 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1181 wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1182 sizeof(tpl_wl_vk_surface_t));
1183 if (!wl_vk_surface) {
1184 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1185 return TPL_ERROR_OUT_OF_MEMORY;
1188 surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1189 -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1191 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1193 free(wl_vk_surface);
1194 surface->backend.data = NULL;
1195 return TPL_ERROR_INVALID_OPERATION;
1198 surface->backend.data = (void *)wl_vk_surface;
1199 surface->width = -1;
1200 surface->height = -1;
1202 wl_vk_surface->surf_source = surf_source;
1203 wl_vk_surface->swapchain = NULL;
1205 wl_vk_surface->wl_vk_display = wl_vk_display;
1206 wl_vk_surface->wl_surface = (struct wl_surface *)surface->native_handle;
1208 wl_vk_surface->reset = TPL_FALSE;
1209 wl_vk_surface->is_activated = TPL_FALSE;
1210 wl_vk_surface->vblank_done = TPL_TRUE;
1212 wl_vk_surface->render_done_cnt = 0;
1214 wl_vk_surface->vblank = NULL;
1215 wl_vk_surface->surface_sync = NULL;
1217 wl_vk_surface->post_interval = surface->post_interval;
1221 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1222 wl_vk_surface->buffers[i] = NULL;
1223 wl_vk_surface->buffer_cnt = 0;
1226 tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1227 tpl_gcond_init(&wl_vk_surface->surf_cond);
1229 tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1231 /* Initialize in thread */
1232 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1233 tpl_gsource_send_message(wl_vk_surface->surf_source, 1);
1234 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1235 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1237 TPL_INFO("[SURFACE_INIT]",
1238 "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1239 surface, wl_vk_surface, wl_vk_surface->surf_source);
1241 return TPL_ERROR_NONE;
1245 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1247 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1248 tpl_wl_vk_display_t *wl_vk_display = NULL;
1250 TPL_ASSERT(surface);
1251 TPL_ASSERT(surface->display);
1253 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1254 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1256 wl_vk_display = (tpl_wl_vk_display_t *)
1257 surface->display->backend.data;
1258 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1260 TPL_INFO("[SURFACE_FINI][BEGIN]",
1261 "wl_vk_surface(%p) wl_surface(%p)",
1262 wl_vk_surface, wl_vk_surface->wl_surface);
1264 if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1265 /* finalize swapchain */
1269 wl_vk_surface->swapchain = NULL;
1271 if (wl_vk_surface->surf_source)
1272 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1273 wl_vk_surface->surf_source = NULL;
1275 _print_buffer_lists(wl_vk_surface);
1277 wl_vk_surface->wl_surface = NULL;
1278 wl_vk_surface->wl_vk_display = NULL;
1279 wl_vk_surface->tpl_surface = NULL;
1281 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1282 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1283 tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1284 tpl_gcond_clear(&wl_vk_surface->surf_cond);
1286 TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1288 free(wl_vk_surface);
1289 surface->backend.data = NULL;
1293 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1296 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1298 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1300 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1302 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1304 TPL_INFO("[SET_POST_INTERVAL]",
1305 "wl_vk_surface(%p) post_interval(%d -> %d)",
1306 wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1308 wl_vk_surface->post_interval = post_interval;
1310 return TPL_ERROR_NONE;
1314 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1316 TPL_ASSERT(surface);
1317 TPL_ASSERT(surface->backend.data);
1319 tpl_wl_vk_surface_t *wl_vk_surface =
1320 (tpl_wl_vk_surface_t *)surface->backend.data;
1322 return !(wl_vk_surface->reset);
1326 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1329 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1330 tpl_wl_vk_display_t *wl_vk_display = NULL;
1331 tpl_wl_vk_swapchain_t *swapchain = NULL;
1332 tpl_surface_t *surface = NULL;
1333 tpl_bool_t is_activated = TPL_FALSE;
1336 wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1337 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1339 wl_vk_display = wl_vk_surface->wl_vk_display;
1340 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1342 surface = wl_vk_surface->tpl_surface;
1343 TPL_CHECK_ON_NULL_RETURN(surface);
1345 swapchain = wl_vk_surface->swapchain;
1346 TPL_CHECK_ON_NULL_RETURN(swapchain);
1348 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1349 * the changed window size at the next frame. */
1350 width = tbm_surface_queue_get_width(tbm_queue);
1351 height = tbm_surface_queue_get_height(tbm_queue);
1352 if (surface->width != width || surface->height != height) {
1353 TPL_INFO("[QUEUE_RESIZE]",
1354 "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1355 wl_vk_surface, tbm_queue,
1356 surface->width, surface->height, width, height);
1359 /* When queue_reset_callback is called, if is_activated is different from
1360 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1361 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1362 is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1363 swapchain->tbm_queue);
1364 if (wl_vk_surface->is_activated != is_activated) {
1366 TPL_INFO("[ACTIVATED]",
1367 "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1368 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1370 TPL_LOG_T("[DEACTIVATED]",
1371 " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1372 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1376 wl_vk_surface->reset = TPL_TRUE;
1378 if (surface->reset_cb)
1379 surface->reset_cb(surface->reset_data);
1383 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1386 TPL_IGNORE(tbm_queue);
1388 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1389 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1391 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1393 tpl_gsource_send_message(wl_vk_surface->surf_source, 3);
1395 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1399 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1401 TPL_ASSERT (wl_vk_surface);
1403 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1404 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1405 tbm_surface_queue_h tbm_queue = NULL;
1406 tbm_bufmgr bufmgr = NULL;
1407 unsigned int capability;
1409 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1410 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1412 if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1413 TPL_ERR("buffer count(%d) must be higher than (%d)",
1414 swapchain->properties.buffer_count,
1415 wl_vk_display->min_buffer);
1416 return TPL_ERROR_INVALID_PARAMETER;
1419 if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1420 TPL_ERR("buffer count(%d) must be lower than (%d)",
1421 swapchain->properties.buffer_count,
1422 wl_vk_display->max_buffer);
1423 return TPL_ERROR_INVALID_PARAMETER;
1426 if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1427 TPL_ERR("Unsupported present_mode(%d)",
1428 swapchain->properties.present_mode);
1429 return TPL_ERROR_INVALID_PARAMETER;
1432 if (swapchain->tbm_queue) {
1433 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1434 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1436 if (swapchain->swapchain_buffers) {
1438 for (i = 0; i < swapchain->properties.buffer_count; i++) {
1439 if (swapchain->swapchain_buffers[i]) {
1440 TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]);
1441 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1442 swapchain->swapchain_buffers[i] = NULL;
1446 free(swapchain->swapchain_buffers);
1447 swapchain->swapchain_buffers = NULL;
1450 if (old_width != swapchain->properties.width ||
1451 old_height != swapchain->properties.height) {
1452 tbm_surface_queue_reset(swapchain->tbm_queue,
1453 swapchain->properties.width,
1454 swapchain->properties.height,
1455 swapchain->properties.format);
1456 TPL_INFO("[RESIZE]",
1457 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1458 wl_vk_surface, swapchain, swapchain->tbm_queue,
1459 old_width, old_height,
1460 swapchain->properties.width,
1461 swapchain->properties.height);
1464 swapchain->properties.buffer_count =
1465 tbm_surface_queue_get_size(swapchain->tbm_queue);
1467 wl_vk_surface->reset = TPL_FALSE;
1469 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1471 TPL_INFO("[SWAPCHAIN_REUSE]",
1472 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1473 wl_vk_surface, swapchain, swapchain->tbm_queue,
1474 swapchain->properties.buffer_count);
1476 return TPL_ERROR_NONE;
1479 bufmgr = tbm_bufmgr_init(-1);
1480 capability = tbm_bufmgr_get_capability(bufmgr);
1481 tbm_bufmgr_deinit(bufmgr);
1483 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1484 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1485 wl_vk_display->wl_tbm_client,
1486 wl_vk_surface->wl_surface,
1487 swapchain->properties.buffer_count,
1488 swapchain->properties.width,
1489 swapchain->properties.height,
1490 TBM_FORMAT_ARGB8888);
1492 tbm_queue = wayland_tbm_client_create_surface_queue(
1493 wl_vk_display->wl_tbm_client,
1494 wl_vk_surface->wl_surface,
1495 swapchain->properties.buffer_count,
1496 swapchain->properties.width,
1497 swapchain->properties.height,
1498 TBM_FORMAT_ARGB8888);
1502 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1504 return TPL_ERROR_OUT_OF_MEMORY;
1507 if (tbm_surface_queue_set_modes(
1508 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1509 TBM_SURFACE_QUEUE_ERROR_NONE) {
1510 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1512 tbm_surface_queue_destroy(tbm_queue);
1513 return TPL_ERROR_INVALID_OPERATION;
1516 if (tbm_surface_queue_add_reset_cb(
1518 __cb_tbm_queue_reset_callback,
1519 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1520 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1522 tbm_surface_queue_destroy(tbm_queue);
1523 return TPL_ERROR_INVALID_OPERATION;
1526 if (tbm_surface_queue_add_acquirable_cb(
1528 __cb_tbm_queue_acquirable_callback,
1529 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1530 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1532 tbm_surface_queue_destroy(tbm_queue);
1533 return TPL_ERROR_INVALID_OPERATION;
1536 swapchain->tbm_queue = tbm_queue;
1538 TPL_INFO("[TBM_QUEUE_CREATED]",
1539 "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)",
1540 wl_vk_surface, swapchain, tbm_queue);
1542 return TPL_ERROR_NONE;
1546 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1547 tbm_format format, int width,
1548 int height, int buffer_count, int present_mode)
1550 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1551 tpl_wl_vk_display_t *wl_vk_display = NULL;
1552 tpl_wl_vk_swapchain_t *swapchain = NULL;
1554 TPL_ASSERT(surface);
1555 TPL_ASSERT(surface->display);
1557 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1558 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1560 wl_vk_display = (tpl_wl_vk_display_t *)
1561 surface->display->backend.data;
1562 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1564 swapchain = wl_vk_surface->swapchain;
1566 if (swapchain == NULL) {
1568 (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1569 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1570 swapchain->tbm_queue = NULL;
1573 swapchain->properties.buffer_count = buffer_count;
1574 swapchain->properties.width = width;
1575 swapchain->properties.height = height;
1576 swapchain->properties.present_mode = present_mode;
1577 swapchain->wl_vk_surface = wl_vk_surface;
1579 wl_vk_surface->swapchain = swapchain;
1581 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1582 /* send swapchain create tbm_queue message */
1583 tpl_gsource_send_message(wl_vk_surface->surf_source, 2);
1584 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1585 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1587 TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1588 swapchain->tbm_queue != NULL,
1589 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1591 wl_vk_surface->reset = TPL_FALSE;
1593 __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1595 return TPL_ERROR_NONE;
1599 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1601 TPL_ASSERT(wl_vk_surface);
1603 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1605 TPL_CHECK_ON_NULL_RETURN(swapchain);
1607 if (swapchain->tbm_queue) {
1608 TPL_INFO("[TBM_QUEUE_DESTROY]",
1609 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1610 wl_vk_surface, swapchain, swapchain->tbm_queue);
1611 tbm_surface_queue_destroy(swapchain->tbm_queue);
1612 swapchain->tbm_queue = NULL;
1617 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1619 tpl_wl_vk_swapchain_t *swapchain = NULL;
1620 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1621 tpl_wl_vk_display_t *wl_vk_display = NULL;
1623 TPL_ASSERT(surface);
1624 TPL_ASSERT(surface->display);
1626 wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1627 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1629 wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1630 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1632 swapchain = wl_vk_surface->swapchain;
1634 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1636 return TPL_ERROR_INVALID_OPERATION;
1639 if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1640 TPL_INFO("[DESTROY_SWAPCHAIN]",
1641 "wl_vk_surface(%p) swapchain(%p) still valid.",
1642 wl_vk_surface, swapchain);
1643 return TPL_ERROR_NONE;
1646 TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1647 "wl_vk_surface(%p) swapchain(%p)",
1648 wl_vk_surface, wl_vk_surface->swapchain);
1650 if (swapchain->swapchain_buffers) {
1651 for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1652 if (swapchain->swapchain_buffers[i]) {
1653 TPL_DEBUG("Stop tracking tbm_surface(%p)",
1654 swapchain->swapchain_buffers[i]);
1655 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1656 swapchain->swapchain_buffers[i] = NULL;
1660 free(swapchain->swapchain_buffers);
1661 swapchain->swapchain_buffers = NULL;
1664 _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1666 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1667 tpl_gsource_send_message(wl_vk_surface->surf_source, 4);
1668 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1669 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1671 _print_buffer_lists(wl_vk_surface);
1674 wl_vk_surface->swapchain = NULL;
1676 return TPL_ERROR_NONE;
1680 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1681 tbm_surface_h **buffers,
1684 TPL_ASSERT(surface);
1685 TPL_ASSERT(surface->backend.data);
1686 TPL_ASSERT(surface->display);
1687 TPL_ASSERT(surface->display->backend.data);
1689 tpl_wl_vk_surface_t *wl_vk_surface =
1690 (tpl_wl_vk_surface_t *)surface->backend.data;
1691 tpl_wl_vk_display_t *wl_vk_display =
1692 (tpl_wl_vk_display_t *)surface->display->backend.data;
1693 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1694 tpl_result_t ret = TPL_ERROR_NONE;
1697 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1698 TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1700 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1703 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1704 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1705 return TPL_ERROR_NONE;
1708 swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1710 sizeof(tbm_surface_h));
1711 if (!swapchain->swapchain_buffers) {
1712 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1714 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1715 return TPL_ERROR_OUT_OF_MEMORY;
1718 ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1719 swapchain->tbm_queue,
1720 swapchain->swapchain_buffers,
1723 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1724 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1725 free(swapchain->swapchain_buffers);
1726 swapchain->swapchain_buffers = NULL;
1727 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1728 return TPL_ERROR_INVALID_OPERATION;
1731 for (i = 0; i < *buffer_count; i++) {
1732 if (swapchain->swapchain_buffers[i]) {
1733 TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)",
1734 i, swapchain->swapchain_buffers[i],
1735 _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1736 tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1740 *buffers = swapchain->swapchain_buffers;
1742 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1744 return TPL_ERROR_NONE;
1748 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1750 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1751 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1753 TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1754 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1756 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1757 if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1758 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1759 wl_vk_surface->buffer_cnt--;
1761 wl_vk_buffer->idx = -1;
1763 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1765 wl_display_flush(wl_vk_display->wl_display);
1767 if (wl_vk_buffer->wl_buffer) {
1768 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1769 wl_vk_buffer->wl_buffer);
1770 wl_vk_buffer->wl_buffer = NULL;
1773 if (wl_vk_buffer->buffer_release) {
1774 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1775 wl_vk_buffer->buffer_release = NULL;
1778 if (wl_vk_buffer->release_fence_fd != -1) {
1779 close(wl_vk_buffer->release_fence_fd);
1780 wl_vk_buffer->release_fence_fd = -1;
1783 if (wl_vk_buffer->rects) {
1784 free(wl_vk_buffer->rects);
1785 wl_vk_buffer->rects = NULL;
1786 wl_vk_buffer->num_rects = 0;
1789 wl_vk_buffer->tbm_surface = NULL;
1790 wl_vk_buffer->bo_name = -1;
1795 static tpl_wl_vk_buffer_t *
1796 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1798 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1799 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1800 (void **)&wl_vk_buffer);
1801 return wl_vk_buffer;
1804 static tpl_wl_vk_buffer_t *
1805 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
1806 tbm_surface_h tbm_surface)
1808 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1810 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
1812 if (!wl_vk_buffer) {
1813 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
1814 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
1816 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1817 (tbm_data_free)__cb_wl_vk_buffer_free);
1818 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1821 wl_vk_buffer->wl_buffer = NULL;
1822 wl_vk_buffer->tbm_surface = tbm_surface;
1823 wl_vk_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
1824 wl_vk_buffer->wl_vk_surface = wl_vk_surface;
1826 wl_vk_buffer->status = RELEASED;
1828 wl_vk_buffer->acquire_fence_fd = -1;
1829 wl_vk_buffer->release_fence_fd = -1;
1831 wl_vk_buffer->dx = 0;
1832 wl_vk_buffer->dy = 0;
1833 wl_vk_buffer->width = tbm_surface_get_width(tbm_surface);
1834 wl_vk_buffer->height = tbm_surface_get_height(tbm_surface);
1836 wl_vk_buffer->rects = NULL;
1837 wl_vk_buffer->num_rects = 0;
1839 tpl_gmutex_init(&wl_vk_buffer->mutex);
1840 tpl_gcond_init(&wl_vk_buffer->cond);
1842 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1845 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1846 if (wl_vk_surface->buffers[i] == NULL) break;
1848 /* If this exception is reached,
1849 * it may be a critical memory leak problem. */
1850 if (i == BUFFER_ARRAY_SIZE) {
1851 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
1852 int evicted_idx = 0; /* evict the frontmost buffer */
1854 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
1856 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
1858 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
1859 evicted_buffer, evicted_buffer->tbm_surface,
1860 status_to_string[evicted_buffer->status]);
1862 /* [TODO] need to think about whether there will be
1863 * better modifications */
1864 wl_vk_surface->buffer_cnt--;
1865 wl_vk_surface->buffers[evicted_idx] = NULL;
1870 wl_vk_surface->buffer_cnt++;
1871 wl_vk_surface->buffers[i] = wl_vk_buffer;
1872 wl_vk_buffer->idx = i;
1874 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1876 TPL_INFO("[WL_VK_BUFFER_CREATE]",
1877 "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
1878 wl_vk_surface, wl_vk_buffer, tbm_surface,
1879 wl_vk_buffer->bo_name);
1882 wl_vk_buffer->need_to_commit = TPL_FALSE;
1883 wl_vk_buffer->buffer_release = NULL;
1885 return wl_vk_buffer;
1888 static tbm_surface_h
1889 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
1890 uint64_t timeout_ns,
1891 int32_t *release_fence)
1893 TPL_ASSERT(surface);
1894 TPL_ASSERT(surface->backend.data);
1895 TPL_ASSERT(surface->display);
1896 TPL_ASSERT(surface->display->backend.data);
1897 TPL_OBJECT_CHECK_RETURN(surface, NULL);
1899 tpl_wl_vk_surface_t *wl_vk_surface =
1900 (tpl_wl_vk_surface_t *)surface->backend.data;
1901 tpl_wl_vk_display_t *wl_vk_display =
1902 (tpl_wl_vk_display_t *)surface->display->backend.data;
1903 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1904 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1906 tbm_surface_h tbm_surface = NULL;
1907 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1909 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
1910 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
1912 TPL_OBJECT_UNLOCK(surface);
1913 TRACE_BEGIN("WAIT_DEQUEUEABLE");
1914 if (timeout_ns != UINT64_MAX) {
1915 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
1916 swapchain->tbm_queue, timeout_ns/1000);
1918 tsq_err = tbm_surface_queue_can_dequeue(
1919 swapchain->tbm_queue, 1);
1922 TPL_OBJECT_LOCK(surface);
1924 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
1925 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
1928 } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1929 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
1930 wl_vk_surface, swapchain->tbm_queue, tsq_err);
1934 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1936 if (wl_vk_surface->reset) {
1937 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
1938 swapchain, swapchain->tbm_queue);
1939 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1943 tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
1946 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
1947 swapchain->tbm_queue, wl_vk_surface, tsq_err);
1948 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1952 tbm_surface_internal_ref(tbm_surface);
1954 wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
1955 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
1957 tpl_gmutex_lock(&wl_vk_buffer->mutex);
1958 wl_vk_buffer->status = DEQUEUED;
1960 if (release_fence) {
1961 if (wl_vk_surface->surface_sync) {
1962 *release_fence = wl_vk_buffer->release_fence_fd;
1963 TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
1964 wl_vk_surface, wl_vk_buffer, *release_fence);
1965 wl_vk_buffer->release_fence_fd = -1;
1967 *release_fence = -1;
1971 wl_vk_surface->reset = TPL_FALSE;
1973 TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
1974 wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
1975 release_fence ? *release_fence : -1);
1977 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1978 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1984 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
1985 tbm_surface_h tbm_surface)
1987 TPL_ASSERT(surface);
1988 TPL_ASSERT(surface->backend.data);
1990 tpl_wl_vk_surface_t *wl_vk_surface =
1991 (tpl_wl_vk_surface_t *)surface->backend.data;
1992 tpl_wl_vk_swapchain_t *swapchain = NULL;
1993 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1994 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1996 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
1997 TPL_ERROR_INVALID_PARAMETER);
1999 swapchain = wl_vk_surface->swapchain;
2000 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2001 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2002 TPL_ERROR_INVALID_PARAMETER);
2004 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2006 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2007 wl_vk_buffer->status = RELEASED;
2008 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2011 tbm_surface_internal_unref(tbm_surface);
2013 TPL_INFO("[CANCEL BUFFER]",
2014 "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2015 wl_vk_surface, swapchain, tbm_surface,
2016 _get_tbm_surface_bo_name(tbm_surface));
2018 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2020 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2021 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2022 return TPL_ERROR_INVALID_OPERATION;
2025 return TPL_ERROR_NONE;
2029 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2030 tbm_surface_h tbm_surface,
2031 int num_rects, const int *rects,
2032 int32_t acquire_fence)
2034 TPL_ASSERT(surface);
2035 TPL_ASSERT(surface->display);
2036 TPL_ASSERT(surface->backend.data);
2037 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2039 tpl_wl_vk_surface_t *wl_vk_surface =
2040 (tpl_wl_vk_surface_t *) surface->backend.data;
2041 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2042 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2043 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2046 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2047 TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2048 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2049 TPL_ERROR_INVALID_PARAMETER);
2051 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2052 bo_name = wl_vk_buffer->bo_name;
2054 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2056 /* If there are received region information, save it to wl_vk_buffer */
2057 if (num_rects && rects) {
2058 if (wl_vk_buffer->rects != NULL) {
2059 free(wl_vk_buffer->rects);
2060 wl_vk_buffer->rects = NULL;
2061 wl_vk_buffer->num_rects = 0;
2064 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2065 wl_vk_buffer->num_rects = num_rects;
2067 if (wl_vk_buffer->rects) {
2068 memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2069 sizeof(int) * 4 * num_rects);
2071 TPL_ERR("Failed to allocate memory for rects info.");
2075 if (wl_vk_buffer->acquire_fence_fd != -1)
2076 close(wl_vk_buffer->acquire_fence_fd);
2078 wl_vk_buffer->acquire_fence_fd = acquire_fence;
2080 wl_vk_buffer->status = ENQUEUED;
2082 "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2083 wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2085 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2087 tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2089 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2090 tbm_surface_internal_unref(tbm_surface);
2091 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2092 tbm_surface, wl_vk_surface, tsq_err);
2093 return TPL_ERROR_INVALID_OPERATION;
2096 tbm_surface_internal_unref(tbm_surface);
2098 return TPL_ERROR_NONE;
2101 static const struct wl_buffer_listener wl_buffer_release_listener = {
2102 (void *)__cb_wl_buffer_release,
2106 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2108 tbm_surface_h tbm_surface = NULL;
2109 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2110 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2111 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2112 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2113 tpl_bool_t ready_to_commit = TPL_TRUE;
2115 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2117 while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2118 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2120 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2121 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2122 swapchain->tbm_queue);
2123 return TPL_ERROR_INVALID_OPERATION;
2126 tbm_surface_internal_ref(tbm_surface);
2128 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2129 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2130 "wl_vk_buffer sould be not NULL");
2132 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2134 wl_vk_buffer->status = ACQUIRED;
2136 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2137 wl_vk_buffer, tbm_surface,
2138 _get_tbm_surface_bo_name(tbm_surface));
2140 if (wl_vk_buffer->wl_buffer == NULL) {
2141 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2142 wl_vk_display->wl_tbm_client, tbm_surface);
2144 if (!wl_vk_buffer->wl_buffer) {
2145 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2146 wl_vk_display->wl_tbm_client, tbm_surface);
2148 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2149 wl_vk_display->use_explicit_sync == TPL_FALSE) {
2150 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2151 &wl_buffer_release_listener, wl_vk_buffer);
2155 "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2156 wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2160 if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done)
2161 ready_to_commit = TPL_TRUE;
2163 wl_vk_buffer->status = WAITING_VBLANK;
2164 __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2165 ready_to_commit = TPL_FALSE;
2168 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2170 if (ready_to_commit)
2171 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2174 return TPL_ERROR_NONE;
2178 __cb_buffer_fenced_release(void *data,
2179 struct zwp_linux_buffer_release_v1 *release,
2182 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2183 tbm_surface_h tbm_surface = NULL;
2185 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2187 tbm_surface = wl_vk_buffer->tbm_surface;
2189 if (tbm_surface_internal_is_valid(tbm_surface)) {
2190 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2191 tpl_wl_vk_swapchain_t *swapchain = NULL;
2193 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2194 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2195 tbm_surface_internal_unref(tbm_surface);
2199 swapchain = wl_vk_surface->swapchain;
2201 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2202 if (wl_vk_buffer->status == COMMITTED) {
2203 tbm_surface_queue_error_e tsq_err;
2205 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2206 wl_vk_buffer->buffer_release = NULL;
2208 wl_vk_buffer->release_fence_fd = fence;
2209 wl_vk_buffer->status = RELEASED;
2211 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2212 wl_vk_buffer->bo_name,
2214 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2215 wl_vk_buffer->bo_name);
2218 "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2219 wl_vk_buffer, tbm_surface,
2220 wl_vk_buffer->bo_name,
2223 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2225 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2226 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2228 tbm_surface_internal_unref(tbm_surface);
2231 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2234 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2239 __cb_buffer_immediate_release(void *data,
2240 struct zwp_linux_buffer_release_v1 *release)
2242 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2243 tbm_surface_h tbm_surface = NULL;
2245 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2247 tbm_surface = wl_vk_buffer->tbm_surface;
2249 if (tbm_surface_internal_is_valid(tbm_surface)) {
2250 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2251 tpl_wl_vk_swapchain_t *swapchain = NULL;
2253 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2254 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2255 tbm_surface_internal_unref(tbm_surface);
2259 swapchain = wl_vk_surface->swapchain;
2261 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2262 if (wl_vk_buffer->status == COMMITTED) {
2263 tbm_surface_queue_error_e tsq_err;
2265 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2266 wl_vk_buffer->buffer_release = NULL;
2268 wl_vk_buffer->release_fence_fd = -1;
2269 wl_vk_buffer->status = RELEASED;
2271 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2272 _get_tbm_surface_bo_name(tbm_surface));
2273 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2274 _get_tbm_surface_bo_name(tbm_surface));
2277 "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2278 wl_vk_buffer, tbm_surface,
2279 _get_tbm_surface_bo_name(tbm_surface));
2281 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2283 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2284 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2286 tbm_surface_internal_unref(tbm_surface);
2289 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2292 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2296 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2297 __cb_buffer_fenced_release,
2298 __cb_buffer_immediate_release,
2302 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2304 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2305 tbm_surface_h tbm_surface = NULL;
2307 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2309 tbm_surface = wl_vk_buffer->tbm_surface;
2311 if (tbm_surface_internal_is_valid(tbm_surface)) {
2312 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2313 tpl_wl_vk_swapchain_t *swapchain = NULL;
2314 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2316 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2317 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2318 tbm_surface_internal_unref(tbm_surface);
2322 swapchain = wl_vk_surface->swapchain;
2324 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2326 if (wl_vk_buffer->status == COMMITTED) {
2328 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2330 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2331 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2333 wl_vk_buffer->status = RELEASED;
2335 TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2336 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2337 wl_vk_buffer->bo_name);
2339 TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2340 wl_vk_buffer->wl_buffer, tbm_surface,
2341 wl_vk_buffer->bo_name);
2343 tbm_surface_internal_unref(tbm_surface);
2346 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2348 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2353 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2354 unsigned int sequence, unsigned int tv_sec,
2355 unsigned int tv_usec, void *user_data)
2357 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2358 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2360 TRACE_ASYNC_END((int)wl_vk_surface, "WAIT_VBLANK");
2361 TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface);
2363 if (error == TDM_ERROR_TIMEOUT)
2364 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2367 wl_vk_surface->vblank_done = TPL_TRUE;
2369 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2370 wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front(
2371 wl_vk_surface->vblank_waiting_buffers,
2374 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2375 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2379 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2381 tdm_error tdm_err = TDM_ERROR_NONE;
2382 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2384 if (wl_vk_surface->vblank == NULL) {
2385 wl_vk_surface->vblank =
2386 _thread_create_tdm_client_vblank(wl_vk_display->tdm_client);
2387 if (!wl_vk_surface->vblank) {
2388 TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2390 return TPL_ERROR_OUT_OF_MEMORY;
2394 tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2395 wl_vk_surface->post_interval,
2396 __cb_tdm_client_vblank,
2397 (void *)wl_vk_surface);
2399 if (tdm_err == TDM_ERROR_NONE) {
2400 wl_vk_surface->vblank_done = TPL_FALSE;
2401 TRACE_ASYNC_BEGIN((int)wl_vk_surface, "WAIT_VBLANK");
2403 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2404 return TPL_ERROR_INVALID_OPERATION;
2407 return TPL_ERROR_NONE;
2411 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2412 tpl_wl_vk_buffer_t *wl_vk_buffer)
2414 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2415 struct wl_surface *wl_surface = wl_vk_surface->wl_surface;
2418 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2419 "wl_vk_buffer sould be not NULL");
2421 if (wl_vk_buffer->wl_buffer == NULL) {
2422 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2423 wl_vk_display->wl_tbm_client,
2424 wl_vk_buffer->tbm_surface);
2425 if (wl_vk_buffer->wl_buffer &&
2426 (wl_vk_buffer->acquire_fence_fd == -1 ||
2427 wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2428 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2429 &wl_buffer_release_listener, wl_vk_buffer);
2432 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2433 "[FATAL] Failed to create wl_buffer");
2435 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2437 wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2438 wl_vk_buffer->dx, wl_vk_buffer->dy);
2440 if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2442 wl_surface_damage(wl_surface,
2443 wl_vk_buffer->dx, wl_vk_buffer->dy,
2444 wl_vk_buffer->width, wl_vk_buffer->height);
2446 wl_surface_damage_buffer(wl_surface,
2448 wl_vk_buffer->width, wl_vk_buffer->height);
2452 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2454 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2455 wl_vk_buffer->rects[i * 4 + 3]);
2457 wl_surface_damage(wl_surface,
2458 wl_vk_buffer->rects[i * 4 + 0],
2460 wl_vk_buffer->rects[i * 4 + 2],
2461 wl_vk_buffer->rects[i * 4 + 3]);
2463 wl_surface_damage_buffer(wl_surface,
2464 wl_vk_buffer->rects[i * 4 + 0],
2466 wl_vk_buffer->rects[i * 4 + 2],
2467 wl_vk_buffer->rects[i * 4 + 3]);
2472 if (wl_vk_display->use_explicit_sync &&
2473 wl_vk_surface->surface_sync &&
2474 wl_vk_buffer->acquire_fence_fd != -1) {
2476 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2477 wl_vk_buffer->acquire_fence_fd);
2478 TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2479 wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2480 close(wl_vk_buffer->acquire_fence_fd);
2481 wl_vk_buffer->acquire_fence_fd = -1;
2483 wl_vk_buffer->buffer_release =
2484 zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2485 if (!wl_vk_buffer->buffer_release) {
2486 TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2488 zwp_linux_buffer_release_v1_add_listener(
2489 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2490 TPL_DEBUG("add explicit_sync_release_listener.");
2494 wl_surface_commit(wl_surface);
2496 wl_display_flush(wl_vk_display->wl_display);
2498 TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2499 wl_vk_buffer->bo_name);
2501 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2503 wl_vk_buffer->need_to_commit = TPL_FALSE;
2504 wl_vk_buffer->status = COMMITTED;
2506 tpl_gcond_signal(&wl_vk_buffer->cond);
2508 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2511 "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2512 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2513 wl_vk_buffer->bo_name);
2515 if (wl_vk_display->use_wait_vblank &&
2516 _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2517 TPL_ERR("Failed to set wait vblank.");
2521 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2523 if (!native_dpy) return TPL_FALSE;
2525 if (_check_native_handle_is_wl_display(native_dpy))
2532 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2534 TPL_ASSERT(backend);
2536 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2537 backend->data = NULL;
2539 backend->init = __tpl_wl_vk_display_init;
2540 backend->fini = __tpl_wl_vk_display_fini;
2541 backend->query_config = __tpl_wl_vk_display_query_config;
2542 backend->filter_config = __tpl_wl_vk_display_filter_config;
2543 backend->query_window_supported_buffer_count =
2544 __tpl_wl_vk_display_query_window_supported_buffer_count;
2545 backend->query_window_supported_present_modes =
2546 __tpl_wl_vk_display_query_window_supported_present_modes;
2550 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2552 TPL_ASSERT(backend);
2554 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2555 backend->data = NULL;
2557 backend->init = __tpl_wl_vk_surface_init;
2558 backend->fini = __tpl_wl_vk_surface_fini;
2559 backend->validate = __tpl_wl_vk_surface_validate;
2560 backend->cancel_dequeued_buffer =
2561 __tpl_wl_vk_surface_cancel_buffer;
2562 backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2563 backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2564 backend->get_swapchain_buffers =
2565 __tpl_wl_vk_surface_get_swapchain_buffers;
2566 backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2567 backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2568 backend->set_post_interval =
2569 __tpl_wl_vk_surface_set_post_interval;
2573 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2575 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2579 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2583 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2584 TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2585 wl_vk_surface, wl_vk_surface->buffer_cnt);
2586 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2587 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2590 "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2591 idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2592 wl_vk_buffer->bo_name,
2593 status_to_string[wl_vk_buffer->status]);
2596 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);