1 #define inline __inline__
4 #include "tpl_internal.h"
9 #include <sys/eventfd.h>
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
20 #include <tdm_client.h>
22 #ifndef TIZEN_FEATURE_ENABLE
23 #define TIZEN_FEATURE_ENABLE 1
26 #if TIZEN_FEATURE_ENABLE
27 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
30 #include "tpl_utils_gthread.h"
32 #define BUFFER_ARRAY_SIZE 10
33 #define VK_CLIENT_QUEUE_SIZE 3
35 static int wl_vk_buffer_key;
36 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
38 typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t;
39 typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t;
40 typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t;
41 typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t;
43 struct _tpl_wl_vk_display {
44 tpl_gsource *disp_source;
46 tpl_gmutex wl_event_mutex;
48 struct wl_display *wl_display;
49 struct wl_event_queue *ev_queue;
50 struct wayland_tbm_client *wl_tbm_client;
51 int last_error; /* errno of the last wl_display error*/
53 tpl_bool_t wl_initialized;
56 tdm_client *tdm_client;
57 tpl_gsource *tdm_source;
59 tpl_bool_t tdm_initialized;
60 /* To make sure that tpl_gsource has been successfully finalized. */
61 tpl_bool_t gsource_finalized;
66 tpl_bool_t use_wait_vblank;
67 tpl_bool_t use_explicit_sync;
70 /* To make sure that tpl_gsource has been successfully finalized. */
71 tpl_bool_t gsource_finalized;
72 tpl_gmutex disp_mutex;
75 /* device surface capabilities */
79 #if TIZEN_FEATURE_ENABLE
80 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
84 struct _tpl_wl_vk_swapchain {
85 tpl_wl_vk_surface_t *wl_vk_surface;
87 tbm_surface_queue_h tbm_queue;
90 tpl_bool_t create_done;
100 tbm_surface_h *swapchain_buffers;
102 /* [TEMP] To fix dEQP-VK.wsi.wayland.swapchain.modify.resize crash issue
103 * It will be fixed properly using old_swapchain handle */
104 tbm_surface_h *old_swapchain_buffers;
106 tpl_util_atomic_uint ref_cnt;
109 typedef enum surf_message {
117 struct _tpl_wl_vk_surface {
118 tpl_gsource *surf_source;
120 tpl_wl_vk_swapchain_t *swapchain;
122 struct wl_surface *wl_surface;
123 #if TIZEN_FEATURE_ENABLE
124 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
126 tdm_client_vblank *vblank;
128 /* surface information */
131 tpl_wl_vk_display_t *wl_vk_display;
132 tpl_surface_t *tpl_surface;
134 /* wl_vk_buffer array for buffer tracing */
135 tpl_wl_vk_buffer_t *buffers[BUFFER_ARRAY_SIZE];
136 int buffer_cnt; /* the number of using wl_vk_buffers */
137 tpl_gmutex buffers_mutex;
139 tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
141 tpl_gmutex surf_mutex;
144 /* for waiting draw done */
145 tpl_bool_t is_activated;
146 tpl_bool_t reset; /* TRUE if queue reseted by external */
147 tpl_bool_t vblank_done;
148 tpl_bool_t vblank_enable;
149 tpl_bool_t initialized_in_thread;
151 /* To make sure that tpl_gsource has been successfully finalized. */
152 tpl_bool_t gsource_finalized;
154 surf_message sent_message;
159 typedef enum buffer_status {
164 WAITING_SIGNALED, // 4
169 static const char *status_to_string[7] = {
174 "WAITING_SIGNALED", // 4
175 "WAITING_VBLANK", // 5
179 struct _tpl_wl_vk_buffer {
180 tbm_surface_h tbm_surface;
183 struct wl_buffer *wl_buffer;
184 int dx, dy; /* position to attach to wl_surface */
185 int width, height; /* size to attach to wl_surface */
187 buffer_status_t status; /* for tracing buffer status */
188 int idx; /* position index in buffers array of wl_vk_surface */
190 /* for damage region */
194 /* for checking need_to_commit (frontbuffer mode) */
195 tpl_bool_t need_to_commit;
197 #if TIZEN_FEATURE_ENABLE
198 /* to get release event via zwp_linux_buffer_release_v1 */
199 struct zwp_linux_buffer_release_v1 *buffer_release;
202 /* each buffers own its release_fence_fd, until it passes ownership
204 int32_t release_fence_fd;
206 /* each buffers own its acquire_fence_fd.
207 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
208 * will be passed to display server
209 * Otherwise it will be used as a fence waiting for render done
211 int32_t acquire_fence_fd;
216 tpl_wl_vk_surface_t *wl_vk_surface;
220 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
222 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
224 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
226 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
228 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
230 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
232 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
234 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
235 tpl_wl_vk_buffer_t *wl_vk_buffer);
238 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
240 struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
242 if (!wl_vk_native_dpy) {
243 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
247 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
248 is a memory address pointing the structure of wl_display_interface. */
249 if (wl_vk_native_dpy == &wl_display_interface)
252 if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
253 strlen(wl_display_interface.name)) == 0) {
261 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
263 tpl_wl_vk_display_t *wl_vk_display = NULL;
264 tdm_error tdm_err = TDM_ERROR_NONE;
268 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
269 if (!wl_vk_display) {
270 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
271 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
275 tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client);
277 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
278 * When tdm_source is no longer available due to an unexpected situation,
279 * wl_vk_thread must remove it from the thread and destroy it.
280 * In that case, tdm_vblank can no longer be used for surfaces and displays
281 * that used this tdm_source. */
282 if (tdm_err != TDM_ERROR_NONE) {
283 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
285 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
287 tpl_gsource_destroy(gsource, TPL_FALSE);
289 wl_vk_display->tdm.tdm_source = NULL;
298 __thread_func_tdm_finalize(tpl_gsource *gsource)
300 tpl_wl_vk_display_t *wl_vk_display = NULL;
302 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
304 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
306 TPL_INFO("[TDM_CLIENT_FINI]",
307 "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
308 wl_vk_display, wl_vk_display->tdm.tdm_client,
309 wl_vk_display->tdm.tdm_display_fd);
311 if (wl_vk_display->tdm.tdm_client) {
312 tdm_client_destroy(wl_vk_display->tdm.tdm_client);
313 wl_vk_display->tdm.tdm_client = NULL;
314 wl_vk_display->tdm.tdm_display_fd = -1;
317 wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
318 wl_vk_display->tdm.gsource_finalized = TPL_TRUE;
320 tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond);
321 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
324 static tpl_gsource_functions tdm_funcs = {
327 .dispatch = __thread_func_tdm_dispatch,
328 .finalize = __thread_func_tdm_finalize,
332 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
334 tdm_client *tdm_client = NULL;
335 int tdm_display_fd = -1;
336 tdm_error tdm_err = TDM_ERROR_NONE;
338 tdm_client = tdm_client_create(&tdm_err);
339 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
340 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
341 return TPL_ERROR_INVALID_OPERATION;
344 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
345 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
346 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
347 tdm_client_destroy(tdm_client);
348 return TPL_ERROR_INVALID_OPERATION;
351 wl_vk_display->tdm.tdm_display_fd = tdm_display_fd;
352 wl_vk_display->tdm.tdm_client = tdm_client;
353 wl_vk_display->tdm.tdm_source = NULL;
354 wl_vk_display->tdm.tdm_initialized = TPL_TRUE;
356 TPL_INFO("[TDM_CLIENT_INIT]",
357 "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
358 wl_vk_display, tdm_client, tdm_display_fd);
360 return TPL_ERROR_NONE;
364 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
365 uint32_t name, const char *interface,
368 #if TIZEN_FEATURE_ENABLE
369 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
371 if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
372 char *env = tpl_getenv("TPL_EFS");
373 if (env && !atoi(env)) {
374 wl_vk_display->use_explicit_sync = TPL_FALSE;
376 wl_vk_display->explicit_sync =
377 wl_registry_bind(wl_registry, name,
378 &zwp_linux_explicit_synchronization_v1_interface, 1);
379 wl_vk_display->use_explicit_sync = TPL_TRUE;
380 TPL_LOG_D("[REGISTRY_BIND]",
381 "wl_vk_display(%p) bind zwp_linux_explicit_synchronization_v1_interface",
389 __cb_wl_resistry_global_remove_callback(void *data,
390 struct wl_registry *wl_registry,
395 static const struct wl_registry_listener registry_listener = {
396 __cb_wl_resistry_global_callback,
397 __cb_wl_resistry_global_remove_callback
401 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
402 const char *func_name)
406 strerror_r(errno, buf, sizeof(buf));
408 if (wl_vk_display->last_error == errno)
411 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
413 dpy_err = wl_display_get_error(wl_vk_display->wl_display);
414 if (dpy_err == EPROTO) {
415 const struct wl_interface *err_interface;
416 uint32_t err_proxy_id, err_code;
417 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
420 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
421 err_interface->name, err_code, err_proxy_id);
424 wl_vk_display->last_error = errno;
428 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
430 struct wl_registry *registry = NULL;
431 struct wl_event_queue *queue = NULL;
432 struct wl_display *display_wrapper = NULL;
433 struct wl_proxy *wl_tbm = NULL;
434 struct wayland_tbm_client *wl_tbm_client = NULL;
436 tpl_result_t result = TPL_ERROR_NONE;
438 queue = wl_display_create_queue(wl_vk_display->wl_display);
440 TPL_ERR("Failed to create wl_queue wl_display(%p)",
441 wl_vk_display->wl_display);
442 result = TPL_ERROR_INVALID_OPERATION;
446 wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
447 if (!wl_vk_display->ev_queue) {
448 TPL_ERR("Failed to create wl_queue wl_display(%p)",
449 wl_vk_display->wl_display);
450 result = TPL_ERROR_INVALID_OPERATION;
454 display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
455 if (!display_wrapper) {
456 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
457 wl_vk_display->wl_display);
458 result = TPL_ERROR_INVALID_OPERATION;
462 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
464 registry = wl_display_get_registry(display_wrapper);
466 TPL_ERR("Failed to create wl_registry");
467 result = TPL_ERROR_INVALID_OPERATION;
471 wl_proxy_wrapper_destroy(display_wrapper);
472 display_wrapper = NULL;
474 wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
475 if (!wl_tbm_client) {
476 TPL_ERR("Failed to initialize wl_tbm_client.");
477 result = TPL_ERROR_INVALID_CONNECTION;
481 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
483 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
484 result = TPL_ERROR_INVALID_CONNECTION;
488 wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
489 wl_vk_display->wl_tbm_client = wl_tbm_client;
491 if (wl_registry_add_listener(registry, ®istry_listener,
493 TPL_ERR("Failed to wl_registry_add_listener");
494 result = TPL_ERROR_INVALID_OPERATION;
498 ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
500 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
501 result = TPL_ERROR_INVALID_OPERATION;
505 #if TIZEN_FEATURE_ENABLE
506 if (wl_vk_display->explicit_sync) {
507 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
508 wl_vk_display->ev_queue);
509 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
510 wl_vk_display->explicit_sync);
514 wl_vk_display->wl_initialized = TPL_TRUE;
516 TPL_INFO("[WAYLAND_INIT]",
517 "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
518 wl_vk_display, wl_vk_display->wl_display,
519 wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
520 #if TIZEN_FEATURE_ENABLE
521 TPL_INFO("[WAYLAND_INIT]",
523 wl_vk_display->explicit_sync);
527 wl_proxy_wrapper_destroy(display_wrapper);
529 wl_registry_destroy(registry);
531 wl_event_queue_destroy(queue);
537 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
539 /* If wl_vk_display is in prepared state, cancel it */
540 if (wl_vk_display->prepared) {
541 wl_display_cancel_read(wl_vk_display->wl_display);
542 wl_vk_display->prepared = TPL_FALSE;
545 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
546 wl_vk_display->ev_queue) == -1) {
547 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
550 #if TIZEN_FEATURE_ENABLE
551 if (wl_vk_display->explicit_sync) {
552 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
553 "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
554 wl_vk_display, wl_vk_display->explicit_sync);
555 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
556 wl_vk_display->explicit_sync = NULL;
560 if (wl_vk_display->wl_tbm_client) {
561 struct wl_proxy *wl_tbm = NULL;
563 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
564 wl_vk_display->wl_tbm_client);
566 wl_proxy_set_queue(wl_tbm, NULL);
569 TPL_INFO("[WL_TBM_DEINIT]",
570 "wl_vk_display(%p) wl_tbm_client(%p)",
571 wl_vk_display, wl_vk_display->wl_tbm_client);
572 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
573 wl_vk_display->wl_tbm_client = NULL;
576 wl_event_queue_destroy(wl_vk_display->ev_queue);
578 wl_vk_display->wl_initialized = TPL_FALSE;
580 TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
581 wl_vk_display, wl_vk_display->wl_display);
585 _thread_init(void *data)
587 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
589 if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
590 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
591 wl_vk_display, wl_vk_display->wl_display);
594 if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
595 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
598 return wl_vk_display;
602 __thread_func_disp_prepare(tpl_gsource *gsource)
604 tpl_wl_vk_display_t *wl_vk_display =
605 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
607 /* If this wl_vk_display is already prepared,
608 * do nothing in this function. */
609 if (wl_vk_display->prepared)
612 /* If there is a last_error, there is no need to poll,
613 * so skip directly to dispatch.
614 * prepare -> dispatch */
615 if (wl_vk_display->last_error)
618 while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
619 wl_vk_display->ev_queue) != 0) {
620 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
621 wl_vk_display->ev_queue) == -1) {
622 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
626 wl_vk_display->prepared = TPL_TRUE;
628 wl_display_flush(wl_vk_display->wl_display);
634 __thread_func_disp_check(tpl_gsource *gsource)
636 tpl_wl_vk_display_t *wl_vk_display =
637 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
638 tpl_bool_t ret = TPL_FALSE;
640 if (!wl_vk_display->prepared)
643 /* If prepared, but last_error is set,
644 * cancel_read is executed and FALSE is returned.
645 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
646 * and skipping disp_check from prepare to disp_dispatch.
647 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
648 if (wl_vk_display->prepared && wl_vk_display->last_error) {
649 wl_display_cancel_read(wl_vk_display->wl_display);
653 if (tpl_gsource_check_io_condition(gsource)) {
654 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
655 _wl_display_print_err(wl_vk_display, "read_event");
658 wl_display_cancel_read(wl_vk_display->wl_display);
662 wl_vk_display->prepared = TPL_FALSE;
668 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
670 tpl_wl_vk_display_t *wl_vk_display =
671 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
675 /* If there is last_error, SOURCE_REMOVE should be returned
676 * to remove the gsource from the main loop.
677 * This is because wl_vk_display is not valid since last_error was set.*/
678 if (wl_vk_display->last_error) {
682 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
683 if (tpl_gsource_check_io_condition(gsource)) {
684 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
685 wl_vk_display->ev_queue) == -1) {
686 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
690 wl_display_flush(wl_vk_display->wl_display);
691 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
697 __thread_func_disp_finalize(tpl_gsource *gsource)
699 tpl_wl_vk_display_t *wl_vk_display =
700 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
702 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
703 TPL_LOG_D("[D_FINALIZE]", "wl_vk_display(%p) tpl_gsource(%p)",
704 wl_vk_display, gsource);
706 if (wl_vk_display->wl_initialized)
707 _thread_wl_display_fini(wl_vk_display);
709 wl_vk_display->gsource_finalized = TPL_TRUE;
711 tpl_gcond_signal(&wl_vk_display->disp_cond);
712 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
718 static tpl_gsource_functions disp_funcs = {
719 .prepare = __thread_func_disp_prepare,
720 .check = __thread_func_disp_check,
721 .dispatch = __thread_func_disp_dispatch,
722 .finalize = __thread_func_disp_finalize,
726 __tpl_wl_vk_display_init(tpl_display_t *display)
730 tpl_wl_vk_display_t *wl_vk_display = NULL;
732 /* Do not allow default display in wayland */
733 if (!display->native_handle) {
734 TPL_ERR("Invalid native handle for display.");
735 return TPL_ERROR_INVALID_PARAMETER;
738 if (!_check_native_handle_is_wl_display(display->native_handle)) {
739 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
740 return TPL_ERROR_INVALID_PARAMETER;
743 wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
744 sizeof(tpl_wl_vk_display_t));
745 if (!wl_vk_display) {
746 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
747 return TPL_ERROR_OUT_OF_MEMORY;
750 display->backend.data = wl_vk_display;
751 display->bufmgr_fd = -1;
753 wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
754 wl_vk_display->wl_initialized = TPL_FALSE;
756 wl_vk_display->ev_queue = NULL;
757 wl_vk_display->wl_display = (struct wl_display *)display->native_handle;
758 wl_vk_display->last_error = 0;
759 wl_vk_display->use_explicit_sync = TPL_FALSE; // default disabled
760 wl_vk_display->prepared = TPL_FALSE;
762 /* Wayland Interfaces */
763 #if TIZEN_FEATURE_ENABLE
764 wl_vk_display->explicit_sync = NULL;
766 wl_vk_display->wl_tbm_client = NULL;
768 /* Vulkan specific surface capabilities */
769 wl_vk_display->min_buffer = 2;
770 wl_vk_display->max_buffer = VK_CLIENT_QUEUE_SIZE;
771 wl_vk_display->present_modes = TPL_DISPLAY_PRESENT_MODE_FIFO;
773 wl_vk_display->use_wait_vblank = TPL_TRUE; // default enabled
775 char *env = tpl_getenv("TPL_WAIT_VBLANK");
776 if (env && !atoi(env)) {
777 wl_vk_display->use_wait_vblank = TPL_FALSE;
781 tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
783 tpl_gmutex_init(&wl_vk_display->disp_mutex);
784 tpl_gcond_init(&wl_vk_display->disp_cond);
787 wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
788 (tpl_gthread_func)_thread_init,
789 (void *)wl_vk_display);
790 if (!wl_vk_display->thread) {
791 TPL_ERR("Failed to create wl_vk_thread");
795 wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
796 (void *)wl_vk_display,
797 wl_display_get_fd(wl_vk_display->wl_display),
799 &disp_funcs, SOURCE_TYPE_NORMAL);
800 if (!wl_vk_display->disp_source) {
801 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
802 display->native_handle,
803 wl_vk_display->thread);
807 tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex);
808 tpl_gcond_init(&wl_vk_display->tdm.tdm_cond);
810 wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread,
811 (void *)wl_vk_display,
812 wl_vk_display->tdm.tdm_display_fd,
814 &tdm_funcs, SOURCE_TYPE_NORMAL);
815 if (!wl_vk_display->tdm.tdm_source) {
816 TPL_ERR("Failed to create tdm_gsource\n");
820 TPL_INFO("[DISPLAY_INIT]",
821 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
823 wl_vk_display->thread,
824 wl_vk_display->wl_display);
826 TPL_INFO("[DISPLAY_INIT]",
827 "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
828 wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
829 wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
831 return TPL_ERROR_NONE;
834 if (wl_vk_display->tdm.tdm_source) {
835 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
836 while (!wl_vk_display->tdm.gsource_finalized) {
837 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
838 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
840 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
843 if (wl_vk_display->disp_source) {
844 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
845 while (!wl_vk_display->gsource_finalized) {
846 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
847 tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
849 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
852 if (wl_vk_display->thread) {
853 tpl_gthread_destroy(wl_vk_display->thread);
856 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
857 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
858 tpl_gcond_clear(&wl_vk_display->disp_cond);
859 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
861 wl_vk_display->thread = NULL;
864 display->backend.data = NULL;
865 return TPL_ERROR_INVALID_OPERATION;
869 __tpl_wl_vk_display_fini(tpl_display_t *display)
871 tpl_wl_vk_display_t *wl_vk_display;
875 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
877 TPL_INFO("[DISPLAY_FINI]",
878 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
880 wl_vk_display->thread,
881 wl_vk_display->wl_display);
883 if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) {
884 /* This is a protection to prevent problems that arise in unexpected situations
885 * that g_cond_wait cannot work normally.
886 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
887 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
889 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
890 while (!wl_vk_display->tdm.gsource_finalized) {
891 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
892 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
894 wl_vk_display->tdm.tdm_source = NULL;
895 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
898 /* This is a protection to prevent problems that arise in unexpected situations
899 * that g_cond_wait cannot work normally.
900 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
901 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
903 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
904 while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) {
905 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
906 tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
908 wl_vk_display->disp_source = NULL;
909 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
911 if (wl_vk_display->thread) {
912 tpl_gthread_destroy(wl_vk_display->thread);
913 wl_vk_display->thread = NULL;
916 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
917 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
918 tpl_gcond_clear(&wl_vk_display->disp_cond);
919 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
921 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
926 display->backend.data = NULL;
930 __tpl_wl_vk_display_query_config(tpl_display_t *display,
931 tpl_surface_type_t surface_type,
932 int red_size, int green_size,
933 int blue_size, int alpha_size,
934 int color_depth, int *native_visual_id,
939 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
940 green_size == 8 && blue_size == 8 &&
941 (color_depth == 32 || color_depth == 24)) {
943 if (alpha_size == 8) {
944 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
945 if (is_slow) *is_slow = TPL_FALSE;
946 return TPL_ERROR_NONE;
948 if (alpha_size == 0) {
949 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
950 if (is_slow) *is_slow = TPL_FALSE;
951 return TPL_ERROR_NONE;
955 return TPL_ERROR_INVALID_PARAMETER;
959 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
964 TPL_IGNORE(visual_id);
965 TPL_IGNORE(alpha_size);
966 return TPL_ERROR_NONE;
970 __tpl_wl_vk_display_query_window_supported_buffer_count(
971 tpl_display_t *display,
972 tpl_handle_t window, int *min, int *max)
974 tpl_wl_vk_display_t *wl_vk_display = NULL;
979 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
980 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
982 if (min) *min = wl_vk_display->min_buffer;
983 if (max) *max = wl_vk_display->max_buffer;
985 return TPL_ERROR_NONE;
989 __tpl_wl_vk_display_query_window_supported_present_modes(
990 tpl_display_t *display,
991 tpl_handle_t window, int *present_modes)
993 tpl_wl_vk_display_t *wl_vk_display = NULL;
998 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
999 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1001 if (present_modes) {
1002 *present_modes = wl_vk_display->present_modes;
1005 return TPL_ERROR_NONE;
1009 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
1011 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1012 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1013 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1014 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1015 tpl_bool_t need_to_release = TPL_FALSE;
1016 tpl_bool_t need_to_cancel = TPL_FALSE;
1017 buffer_status_t status = RELEASED;
1020 while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
1021 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1022 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1023 wl_vk_buffer = wl_vk_surface->buffers[idx];
1026 wl_vk_surface->buffers[idx] = NULL;
1027 wl_vk_surface->buffer_cnt--;
1029 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1030 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1035 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1037 tpl_gmutex_lock(&wl_vk_buffer->mutex);
1039 status = wl_vk_buffer->status;
1041 TPL_INFO("[BUFFER_CLEAR]",
1042 "[%d] wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
1043 idx, wl_vk_surface, wl_vk_buffer,
1044 wl_vk_buffer->tbm_surface,
1045 status_to_string[status]);
1047 if (status >= ENQUEUED) {
1048 tpl_bool_t need_to_wait = TPL_FALSE;
1049 tpl_result_t wait_result = TPL_ERROR_NONE;
1051 if (!wl_vk_display->use_explicit_sync &&
1052 status < WAITING_VBLANK)
1053 need_to_wait = TPL_TRUE;
1055 if (wl_vk_display->use_explicit_sync &&
1057 need_to_wait = TPL_TRUE;
1060 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1061 wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond,
1062 &wl_vk_buffer->mutex,
1064 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1066 status = wl_vk_buffer->status;
1068 if (wait_result == TPL_ERROR_TIME_OUT)
1069 TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
1074 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1075 /* It has been acquired but has not yet been released, so this
1076 * buffer must be released. */
1077 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
1079 /* After dequeue, it has not been enqueued yet
1080 * so cancel_dequeue must be performed. */
1081 need_to_cancel = (status == DEQUEUED);
1083 if (swapchain && swapchain->tbm_queue) {
1084 if (need_to_release) {
1085 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
1086 wl_vk_buffer->tbm_surface);
1087 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1088 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1089 wl_vk_buffer->tbm_surface, tsq_err);
1092 if (need_to_cancel) {
1093 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
1094 wl_vk_buffer->tbm_surface);
1095 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1096 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1097 wl_vk_buffer->tbm_surface, tsq_err);
1101 wl_vk_buffer->status = RELEASED;
1103 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1105 if (need_to_release || need_to_cancel)
1106 tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
1108 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1114 static tdm_client_vblank*
1115 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1117 tdm_client_vblank *vblank = NULL;
1118 tdm_client_output *tdm_output = NULL;
1119 tdm_error tdm_err = TDM_ERROR_NONE;
1122 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1126 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1127 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1128 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1132 vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1133 if (!vblank || tdm_err != TDM_ERROR_NONE) {
1134 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1138 tdm_err = tdm_client_handle_pending_events(tdm_client);
1139 if (tdm_err != TDM_ERROR_NONE) {
1140 TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err);
1143 tdm_client_vblank_set_enable_fake(vblank, 1);
1144 tdm_client_vblank_set_sync(vblank, 0);
1150 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1152 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1154 /* tbm_surface_queue will be created at swapchain_create */
1156 if (wl_vk_display->use_wait_vblank) {
1157 wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1158 wl_vk_display->tdm.tdm_client);
1159 if (wl_vk_surface->vblank) {
1160 TPL_INFO("[VBLANK_INIT]",
1161 "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1162 wl_vk_surface, wl_vk_display->tdm.tdm_client,
1163 wl_vk_surface->vblank);
1165 wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1166 if (!wl_vk_surface->vblank_waiting_buffers) {
1167 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1168 wl_vk_surface->vblank = NULL;
1173 #if TIZEN_FEATURE_ENABLE
1174 if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1175 wl_vk_surface->surface_sync =
1176 zwp_linux_explicit_synchronization_v1_get_synchronization(
1177 wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1178 if (wl_vk_surface->surface_sync) {
1179 TPL_INFO("[EXPLICIT_SYNC_INIT]",
1180 "wl_vk_surface(%p) surface_sync(%p)",
1181 wl_vk_surface, wl_vk_surface->surface_sync);
1183 TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1185 wl_vk_display->use_explicit_sync = TPL_FALSE;
1190 wl_vk_surface->vblank_enable = (wl_vk_surface->vblank != NULL &&
1191 wl_vk_surface->post_interval > 0);
1195 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1197 TPL_INFO("[SURFACE_FINI]",
1198 "wl_vk_surface(%p) wl_surface(%p)",
1199 wl_vk_surface, wl_vk_surface->wl_surface);
1201 if (wl_vk_surface->vblank_waiting_buffers) {
1202 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1203 wl_vk_surface->vblank_waiting_buffers = NULL;
1206 #if TIZEN_FEATURE_ENABLE
1207 if (wl_vk_surface->surface_sync) {
1208 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1209 "wl_vk_surface(%p) surface_sync(%p)",
1210 wl_vk_surface, wl_vk_surface->surface_sync);
1211 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1212 wl_vk_surface->surface_sync = NULL;
1216 if (wl_vk_surface->vblank) {
1217 TPL_INFO("[VBLANK_DESTROY]",
1218 "wl_vk_surface(%p) vblank(%p)",
1219 wl_vk_surface, wl_vk_surface->vblank);
1220 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1221 wl_vk_surface->vblank = NULL;
1226 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1228 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1230 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1232 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1233 if (message & INIT_SURFACE) { /* Initialize surface */
1234 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) initialize message received!",
1236 _thread_wl_vk_surface_init(wl_vk_surface);
1237 wl_vk_surface->initialized_in_thread = TPL_TRUE;
1238 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1241 if (message & ACQUIRABLE) { /* Acquirable message */
1242 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) acquirable message received!",
1244 if (_thread_surface_queue_acquire(wl_vk_surface)
1245 != TPL_ERROR_NONE) {
1246 TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1251 if (message & CREATE_QUEUE) { /* Create tbm_surface_queue */
1252 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) queue creation message received!",
1254 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1255 != TPL_ERROR_NONE) {
1256 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1259 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1262 if (message & DESTROY_QUEUE) { /* swapchain destroy */
1263 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) swapchain destroy message received!",
1265 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1266 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1269 /* init to NONE_MESSAGE */
1270 wl_vk_surface->sent_message = NONE_MESSAGE;
1272 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1278 __thread_func_surf_finalize(tpl_gsource *gsource)
1280 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1282 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1283 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1285 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1286 TPL_LOG_D("[S_FINALIZE]", "wl_vk_surface(%p) tpl_gsource(%p)",
1287 wl_vk_surface, gsource);
1289 _thread_wl_vk_surface_fini(wl_vk_surface);
1291 wl_vk_surface->gsource_finalized = TPL_TRUE;
1293 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1294 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1297 static tpl_gsource_functions surf_funcs = {
1300 .dispatch = __thread_func_surf_dispatch,
1301 .finalize = __thread_func_surf_finalize,
1306 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1308 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1309 tpl_wl_vk_display_t *wl_vk_display = NULL;
1310 tpl_gsource *surf_source = NULL;
1312 TPL_ASSERT(surface);
1313 TPL_ASSERT(surface->display);
1314 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1315 TPL_ASSERT(surface->native_handle);
1317 wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1318 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1320 wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1321 sizeof(tpl_wl_vk_surface_t));
1322 if (!wl_vk_surface) {
1323 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1324 return TPL_ERROR_OUT_OF_MEMORY;
1327 surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1328 -1, FD_TYPE_NONE, &surf_funcs, SOURCE_TYPE_NORMAL);
1330 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1332 free(wl_vk_surface);
1333 surface->backend.data = NULL;
1334 return TPL_ERROR_INVALID_OPERATION;
1337 surface->backend.data = (void *)wl_vk_surface;
1338 surface->width = -1;
1339 surface->height = -1;
1341 wl_vk_surface->surf_source = surf_source;
1342 wl_vk_surface->swapchain = NULL;
1344 wl_vk_surface->wl_vk_display = wl_vk_display;
1345 wl_vk_surface->wl_surface = (struct wl_surface *)surface->native_handle;
1346 wl_vk_surface->tpl_surface = surface;
1348 wl_vk_surface->reset = TPL_FALSE;
1349 wl_vk_surface->is_activated = TPL_FALSE;
1350 wl_vk_surface->vblank_done = TPL_TRUE;
1351 wl_vk_surface->initialized_in_thread = TPL_FALSE;
1353 wl_vk_surface->render_done_cnt = 0;
1355 wl_vk_surface->vblank = NULL;
1356 wl_vk_surface->vblank_enable = TPL_FALSE;
1357 #if TIZEN_FEATURE_ENABLE
1358 wl_vk_surface->surface_sync = NULL;
1361 wl_vk_surface->sent_message = NONE_MESSAGE;
1363 wl_vk_surface->post_interval = surface->post_interval;
1367 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1368 wl_vk_surface->buffers[i] = NULL;
1369 wl_vk_surface->buffer_cnt = 0;
1372 tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1373 tpl_gcond_init(&wl_vk_surface->surf_cond);
1375 tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1377 /* Initialize in thread */
1378 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1379 wl_vk_surface->sent_message = INIT_SURFACE;
1380 tpl_gsource_send_message(wl_vk_surface->surf_source,
1381 wl_vk_surface->sent_message);
1382 while (!wl_vk_surface->initialized_in_thread)
1383 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1384 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1386 TPL_INFO("[SURFACE_INIT]",
1387 "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1388 surface, wl_vk_surface, wl_vk_surface->surf_source);
1390 return TPL_ERROR_NONE;
1394 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1396 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1397 tpl_wl_vk_display_t *wl_vk_display = NULL;
1399 TPL_ASSERT(surface);
1400 TPL_ASSERT(surface->display);
1402 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1403 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1405 wl_vk_display = (tpl_wl_vk_display_t *)
1406 surface->display->backend.data;
1407 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1409 TPL_INFO("[SURFACE_FINI][BEGIN]",
1410 "wl_vk_surface(%p) wl_surface(%p)",
1411 wl_vk_surface, wl_vk_surface->wl_surface);
1413 if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1414 /* finalize swapchain */
1418 wl_vk_surface->swapchain = NULL;
1420 /* This is a protection to prevent problems that arise in unexpected situations
1421 * that g_cond_wait cannot work normally.
1422 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1423 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1425 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1426 while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) {
1427 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1428 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1430 wl_vk_surface->surf_source = NULL;
1431 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1433 _print_buffer_lists(wl_vk_surface);
1435 wl_vk_surface->wl_surface = NULL;
1436 wl_vk_surface->wl_vk_display = NULL;
1437 wl_vk_surface->tpl_surface = NULL;
1439 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1440 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1441 tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1442 tpl_gcond_clear(&wl_vk_surface->surf_cond);
1444 TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1446 free(wl_vk_surface);
1447 surface->backend.data = NULL;
1451 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1454 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1456 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1458 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1460 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1462 TPL_INFO("[SET_POST_INTERVAL]",
1463 "wl_vk_surface(%p) post_interval(%d -> %d)",
1464 wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1466 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1467 wl_vk_surface->post_interval = post_interval;
1468 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1470 return TPL_ERROR_NONE;
1474 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1476 TPL_ASSERT(surface);
1477 TPL_ASSERT(surface->backend.data);
1479 tpl_wl_vk_surface_t *wl_vk_surface =
1480 (tpl_wl_vk_surface_t *)surface->backend.data;
1482 return !(wl_vk_surface->reset);
1486 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1489 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1490 tpl_wl_vk_display_t *wl_vk_display = NULL;
1491 tpl_wl_vk_swapchain_t *swapchain = NULL;
1492 tpl_surface_t *surface = NULL;
1493 tpl_bool_t is_activated = TPL_FALSE;
1496 wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1497 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1499 wl_vk_display = wl_vk_surface->wl_vk_display;
1500 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1502 surface = wl_vk_surface->tpl_surface;
1503 TPL_CHECK_ON_NULL_RETURN(surface);
1505 swapchain = wl_vk_surface->swapchain;
1506 TPL_CHECK_ON_NULL_RETURN(swapchain);
1508 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1509 * the changed window size at the next frame. */
1510 width = tbm_surface_queue_get_width(tbm_queue);
1511 height = tbm_surface_queue_get_height(tbm_queue);
1512 if (surface->width != width || surface->height != height) {
1513 TPL_INFO("[QUEUE_RESIZE]",
1514 "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1515 wl_vk_surface, tbm_queue,
1516 surface->width, surface->height, width, height);
1519 /* When queue_reset_callback is called, if is_activated is different from
1520 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1521 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1522 is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1523 swapchain->tbm_queue);
1524 if (wl_vk_surface->is_activated != is_activated) {
1526 TPL_INFO("[ACTIVATED]",
1527 "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1528 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1530 TPL_INFO("[DEACTIVATED]",
1531 " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1532 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1536 wl_vk_surface->reset = TPL_TRUE;
1538 if (surface->reset_cb)
1539 surface->reset_cb(surface->reset_data);
1543 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1546 TPL_IGNORE(tbm_queue);
1548 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1549 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1551 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1552 if (wl_vk_surface->sent_message == NONE_MESSAGE) {
1553 wl_vk_surface->sent_message = ACQUIRABLE;
1554 tpl_gsource_send_message(wl_vk_surface->surf_source,
1555 wl_vk_surface->sent_message);
1557 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1561 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1563 TPL_ASSERT (wl_vk_surface);
1565 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1566 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1567 tbm_surface_queue_h tbm_queue = NULL;
1568 tbm_bufmgr bufmgr = NULL;
1569 unsigned int capability;
1571 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1572 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1574 if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1575 TPL_ERR("buffer count(%d) must be higher than (%d)",
1576 swapchain->properties.buffer_count,
1577 wl_vk_display->min_buffer);
1578 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1579 return TPL_ERROR_INVALID_PARAMETER;
1582 if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1583 TPL_ERR("buffer count(%d) must be lower than (%d)",
1584 swapchain->properties.buffer_count,
1585 wl_vk_display->max_buffer);
1586 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1587 return TPL_ERROR_INVALID_PARAMETER;
1590 if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1591 TPL_ERR("Unsupported present_mode(%d)",
1592 swapchain->properties.present_mode);
1593 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1594 return TPL_ERROR_INVALID_PARAMETER;
1597 if (swapchain->old_swapchain_buffers) {
1598 TPL_ERR("Should be destroy old_swapchain before create");
1599 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1600 return TPL_ERROR_INVALID_OPERATION;
1603 if (swapchain->tbm_queue) {
1604 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1605 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1607 if (swapchain->swapchain_buffers) {
1608 swapchain->old_swapchain_buffers = swapchain->swapchain_buffers;
1609 swapchain->swapchain_buffers = NULL;
1612 if (old_width != swapchain->properties.width ||
1613 old_height != swapchain->properties.height) {
1614 tbm_surface_queue_reset(swapchain->tbm_queue,
1615 swapchain->properties.width,
1616 swapchain->properties.height,
1617 TBM_FORMAT_ARGB8888);
1618 TPL_INFO("[RESIZE]",
1619 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1620 wl_vk_surface, swapchain, swapchain->tbm_queue,
1621 old_width, old_height,
1622 swapchain->properties.width,
1623 swapchain->properties.height);
1626 swapchain->properties.buffer_count =
1627 tbm_surface_queue_get_size(swapchain->tbm_queue);
1629 wl_vk_surface->reset = TPL_FALSE;
1631 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1632 swapchain->create_done = TPL_TRUE;
1634 TPL_INFO("[SWAPCHAIN_REUSE]",
1635 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1636 wl_vk_surface, swapchain, swapchain->tbm_queue,
1637 swapchain->properties.buffer_count);
1639 return TPL_ERROR_NONE;
1642 bufmgr = tbm_bufmgr_init(-1);
1643 capability = tbm_bufmgr_get_capability(bufmgr);
1644 tbm_bufmgr_deinit(bufmgr);
1646 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1647 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1648 wl_vk_display->wl_tbm_client,
1649 wl_vk_surface->wl_surface,
1650 swapchain->properties.buffer_count,
1651 swapchain->properties.width,
1652 swapchain->properties.height,
1653 TBM_FORMAT_ARGB8888);
1655 tbm_queue = wayland_tbm_client_create_surface_queue(
1656 wl_vk_display->wl_tbm_client,
1657 wl_vk_surface->wl_surface,
1658 swapchain->properties.buffer_count,
1659 swapchain->properties.width,
1660 swapchain->properties.height,
1661 TBM_FORMAT_ARGB8888);
1665 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1667 swapchain->result = TPL_ERROR_OUT_OF_MEMORY;
1668 return TPL_ERROR_OUT_OF_MEMORY;
1671 if (tbm_surface_queue_set_modes(
1672 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1673 TBM_SURFACE_QUEUE_ERROR_NONE) {
1674 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1676 tbm_surface_queue_destroy(tbm_queue);
1677 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1678 return TPL_ERROR_INVALID_OPERATION;
1681 if (tbm_surface_queue_add_reset_cb(
1683 __cb_tbm_queue_reset_callback,
1684 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1685 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1687 tbm_surface_queue_destroy(tbm_queue);
1688 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1689 return TPL_ERROR_INVALID_OPERATION;
1692 if (tbm_surface_queue_add_acquirable_cb(
1694 __cb_tbm_queue_acquirable_callback,
1695 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1696 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1698 tbm_surface_queue_destroy(tbm_queue);
1699 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1700 return TPL_ERROR_INVALID_OPERATION;
1703 swapchain->tbm_queue = tbm_queue;
1704 swapchain->create_done = TPL_TRUE;
1706 TPL_INFO("[TBM_QUEUE_CREATED]",
1707 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1708 wl_vk_surface, swapchain, tbm_queue);
1710 return TPL_ERROR_NONE;
1714 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1715 tbm_format format, int width,
1716 int height, int buffer_count, int present_mode)
1718 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1719 tpl_wl_vk_display_t *wl_vk_display = NULL;
1720 tpl_wl_vk_swapchain_t *swapchain = NULL;
1722 TPL_ASSERT(surface);
1723 TPL_ASSERT(surface->display);
1725 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1726 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1728 wl_vk_display = (tpl_wl_vk_display_t *)
1729 surface->display->backend.data;
1730 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1732 swapchain = wl_vk_surface->swapchain;
1734 if (swapchain == NULL) {
1736 (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1737 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1738 swapchain->tbm_queue = NULL;
1741 swapchain->properties.buffer_count = buffer_count;
1742 swapchain->properties.width = width;
1743 swapchain->properties.height = height;
1744 swapchain->properties.present_mode = present_mode;
1745 swapchain->wl_vk_surface = wl_vk_surface;
1746 swapchain->properties.format = format;
1747 swapchain->swapchain_buffers = NULL;
1748 swapchain->old_swapchain_buffers = NULL;
1750 swapchain->result = TPL_ERROR_NONE;
1751 swapchain->create_done = TPL_FALSE;
1753 wl_vk_surface->swapchain = swapchain;
1755 __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1757 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1758 /* send swapchain create tbm_queue message */
1759 wl_vk_surface->sent_message = CREATE_QUEUE;
1760 tpl_gsource_send_message(wl_vk_surface->surf_source,
1761 wl_vk_surface->sent_message);
1762 while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE)
1763 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1764 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1766 TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1767 swapchain->tbm_queue != NULL,
1768 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1770 wl_vk_surface->reset = TPL_FALSE;
1772 return TPL_ERROR_NONE;
1776 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1778 TPL_ASSERT(wl_vk_surface);
1780 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1782 TPL_CHECK_ON_NULL_RETURN(swapchain);
1784 if (swapchain->tbm_queue) {
1785 TPL_INFO("[TBM_QUEUE_DESTROY]",
1786 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1787 wl_vk_surface, swapchain, swapchain->tbm_queue);
1788 tbm_surface_queue_destroy(swapchain->tbm_queue);
1789 swapchain->tbm_queue = NULL;
1793 void __untrack_swapchain_buffers(tpl_wl_vk_surface_t *wl_vk_surface, tbm_surface_h *sc_buffers)
1795 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1797 for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1798 if (sc_buffers[i]) {
1799 TPL_INFO("[UNTRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1800 i, wl_vk_surface, swapchain, sc_buffers[i],
1801 _get_tbm_surface_bo_name(sc_buffers[i]));
1802 tbm_surface_internal_unref(sc_buffers[i]);
1803 sc_buffers[i] = NULL;
1809 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1811 tpl_wl_vk_swapchain_t *swapchain = NULL;
1812 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1813 tpl_wl_vk_display_t *wl_vk_display = NULL;
1815 TPL_ASSERT(surface);
1816 TPL_ASSERT(surface->display);
1818 wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1819 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1821 wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1822 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1824 swapchain = wl_vk_surface->swapchain;
1826 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1828 return TPL_ERROR_INVALID_OPERATION;
1831 if (!swapchain->tbm_queue) {
1832 TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.",
1833 wl_vk_surface, wl_vk_surface->swapchain);
1834 return TPL_ERROR_INVALID_OPERATION;
1837 if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1838 TPL_INFO("[DESTROY_SWAPCHAIN]",
1839 "wl_vk_surface(%p) swapchain(%p) still valid.",
1840 wl_vk_surface, swapchain);
1841 if (swapchain->old_swapchain_buffers) {
1842 __untrack_swapchain_buffers(wl_vk_surface, swapchain->old_swapchain_buffers);
1843 free(swapchain->old_swapchain_buffers);
1844 swapchain->old_swapchain_buffers = NULL;
1846 return TPL_ERROR_NONE;
1849 TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1850 "wl_vk_surface(%p) swapchain(%p)",
1851 wl_vk_surface, wl_vk_surface->swapchain);
1853 if (swapchain->swapchain_buffers) {
1854 __untrack_swapchain_buffers(wl_vk_surface, swapchain->swapchain_buffers);
1855 free(swapchain->swapchain_buffers);
1856 swapchain->swapchain_buffers = NULL;
1859 _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1861 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1862 wl_vk_surface->sent_message = DESTROY_QUEUE;
1863 tpl_gsource_send_message(wl_vk_surface->surf_source,
1864 wl_vk_surface->sent_message);
1865 while (swapchain->tbm_queue)
1866 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1867 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1869 _print_buffer_lists(wl_vk_surface);
1872 wl_vk_surface->swapchain = NULL;
1874 return TPL_ERROR_NONE;
1878 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1879 tbm_surface_h **buffers,
1882 TPL_ASSERT(surface);
1883 TPL_ASSERT(surface->backend.data);
1884 TPL_ASSERT(surface->display);
1885 TPL_ASSERT(surface->display->backend.data);
1887 tpl_wl_vk_surface_t *wl_vk_surface =
1888 (tpl_wl_vk_surface_t *)surface->backend.data;
1889 tpl_wl_vk_display_t *wl_vk_display =
1890 (tpl_wl_vk_display_t *)surface->display->backend.data;
1891 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1892 tpl_result_t ret = TPL_ERROR_NONE;
1895 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1896 TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1898 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1901 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1902 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1903 return TPL_ERROR_NONE;
1906 swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1908 sizeof(tbm_surface_h));
1909 if (!swapchain->swapchain_buffers) {
1910 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1912 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1913 return TPL_ERROR_OUT_OF_MEMORY;
1916 ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1917 swapchain->tbm_queue,
1918 swapchain->swapchain_buffers,
1921 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1922 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1923 free(swapchain->swapchain_buffers);
1924 swapchain->swapchain_buffers = NULL;
1925 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1926 return TPL_ERROR_INVALID_OPERATION;
1929 for (i = 0; i < *buffer_count; i++) {
1930 if (swapchain->swapchain_buffers[i]) {
1931 TPL_INFO("[TRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1932 i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i],
1933 _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1934 tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1938 *buffers = swapchain->swapchain_buffers;
1940 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1942 return TPL_ERROR_NONE;
1946 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1948 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1949 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1951 TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1952 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1954 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1955 if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1956 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1957 wl_vk_surface->buffer_cnt--;
1959 wl_vk_buffer->idx = -1;
1961 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1963 wl_display_flush(wl_vk_display->wl_display);
1965 if (wl_vk_buffer->wl_buffer) {
1966 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1967 wl_vk_buffer->wl_buffer);
1968 wl_vk_buffer->wl_buffer = NULL;
1971 #if TIZEN_FEATURE_ENABLE
1972 if (wl_vk_buffer->buffer_release) {
1973 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1974 wl_vk_buffer->buffer_release = NULL;
1978 if (wl_vk_buffer->release_fence_fd != -1) {
1979 close(wl_vk_buffer->release_fence_fd);
1980 wl_vk_buffer->release_fence_fd = -1;
1983 if (wl_vk_buffer->rects) {
1984 free(wl_vk_buffer->rects);
1985 wl_vk_buffer->rects = NULL;
1986 wl_vk_buffer->num_rects = 0;
1989 wl_vk_buffer->tbm_surface = NULL;
1990 wl_vk_buffer->bo_name = -1;
1995 static tpl_wl_vk_buffer_t *
1996 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1998 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1999 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2000 (void **)&wl_vk_buffer);
2001 return wl_vk_buffer;
2004 static tpl_wl_vk_buffer_t *
2005 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
2006 tbm_surface_h tbm_surface)
2008 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2010 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2012 if (!wl_vk_buffer) {
2013 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
2014 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
2016 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2017 (tbm_data_free)__cb_wl_vk_buffer_free);
2018 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2021 wl_vk_buffer->wl_buffer = NULL;
2022 wl_vk_buffer->tbm_surface = tbm_surface;
2023 wl_vk_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
2024 wl_vk_buffer->wl_vk_surface = wl_vk_surface;
2026 wl_vk_buffer->status = RELEASED;
2028 wl_vk_buffer->acquire_fence_fd = -1;
2029 wl_vk_buffer->release_fence_fd = -1;
2031 wl_vk_buffer->dx = 0;
2032 wl_vk_buffer->dy = 0;
2033 wl_vk_buffer->width = tbm_surface_get_width(tbm_surface);
2034 wl_vk_buffer->height = tbm_surface_get_height(tbm_surface);
2036 wl_vk_buffer->rects = NULL;
2037 wl_vk_buffer->num_rects = 0;
2039 wl_vk_buffer->need_to_commit = TPL_FALSE;
2040 #if TIZEN_FEATURE_ENABLE
2041 wl_vk_buffer->buffer_release = NULL;
2043 tpl_gmutex_init(&wl_vk_buffer->mutex);
2044 tpl_gcond_init(&wl_vk_buffer->cond);
2046 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2049 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2050 if (wl_vk_surface->buffers[i] == NULL) break;
2052 /* If this exception is reached,
2053 * it may be a critical memory leak problem. */
2054 if (i == BUFFER_ARRAY_SIZE) {
2055 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
2056 int evicted_idx = 0; /* evict the frontmost buffer */
2058 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
2060 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
2062 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2063 evicted_buffer, evicted_buffer->tbm_surface,
2064 status_to_string[evicted_buffer->status]);
2066 /* [TODO] need to think about whether there will be
2067 * better modifications */
2068 wl_vk_surface->buffer_cnt--;
2069 wl_vk_surface->buffers[evicted_idx] = NULL;
2074 wl_vk_surface->buffer_cnt++;
2075 wl_vk_surface->buffers[i] = wl_vk_buffer;
2076 wl_vk_buffer->idx = i;
2078 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2080 TPL_INFO("[WL_VK_BUFFER_CREATE]",
2081 "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2082 wl_vk_surface, wl_vk_buffer, tbm_surface,
2083 wl_vk_buffer->bo_name);
2086 return wl_vk_buffer;
2089 static tbm_surface_h
2090 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
2091 uint64_t timeout_ns,
2092 int32_t *release_fence)
2094 TPL_ASSERT(surface);
2095 TPL_ASSERT(surface->backend.data);
2096 TPL_ASSERT(surface->display);
2097 TPL_ASSERT(surface->display->backend.data);
2098 TPL_OBJECT_CHECK_RETURN(surface, NULL);
2100 tpl_wl_vk_surface_t *wl_vk_surface =
2101 (tpl_wl_vk_surface_t *)surface->backend.data;
2102 tpl_wl_vk_display_t *wl_vk_display =
2103 (tpl_wl_vk_display_t *)surface->display->backend.data;
2104 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2105 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2107 tbm_surface_h tbm_surface = NULL;
2108 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2110 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
2111 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
2113 TPL_OBJECT_UNLOCK(surface);
2114 TRACE_BEGIN("WAIT_DEQUEUEABLE");
2115 if (timeout_ns != UINT64_MAX) {
2116 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2117 swapchain->tbm_queue, timeout_ns/1000);
2119 tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
2122 TPL_OBJECT_LOCK(surface);
2124 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2125 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
2128 } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2129 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
2130 wl_vk_surface, swapchain->tbm_queue, tsq_err);
2134 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
2136 if (wl_vk_surface->reset) {
2137 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
2138 swapchain, swapchain->tbm_queue);
2139 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2143 tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
2146 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
2147 swapchain->tbm_queue, wl_vk_surface, tsq_err);
2148 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2152 tbm_surface_internal_ref(tbm_surface);
2154 wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
2155 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
2157 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2158 wl_vk_buffer->status = DEQUEUED;
2160 if (release_fence) {
2161 #if TIZEN_FEATURE_ENABLE
2162 if (wl_vk_surface->surface_sync) {
2163 *release_fence = wl_vk_buffer->release_fence_fd;
2164 TPL_LOG_D("[EXPLICIT_FENCE]", "wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
2165 wl_vk_surface, wl_vk_buffer, *release_fence);
2166 wl_vk_buffer->release_fence_fd = -1;
2170 *release_fence = -1;
2174 wl_vk_surface->reset = TPL_FALSE;
2176 TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2177 wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
2178 release_fence ? *release_fence : -1);
2180 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2181 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2187 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
2188 tbm_surface_h tbm_surface)
2190 TPL_ASSERT(surface);
2191 TPL_ASSERT(surface->backend.data);
2193 tpl_wl_vk_surface_t *wl_vk_surface =
2194 (tpl_wl_vk_surface_t *)surface->backend.data;
2195 tpl_wl_vk_swapchain_t *swapchain = NULL;
2196 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2197 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2199 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2200 TPL_ERROR_INVALID_PARAMETER);
2202 swapchain = wl_vk_surface->swapchain;
2203 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2204 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2205 TPL_ERROR_INVALID_PARAMETER);
2207 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2209 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2210 wl_vk_buffer->status = RELEASED;
2211 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2214 tbm_surface_internal_unref(tbm_surface);
2216 TPL_INFO("[CANCEL BUFFER]",
2217 "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2218 wl_vk_surface, swapchain, tbm_surface,
2219 _get_tbm_surface_bo_name(tbm_surface));
2221 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2223 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2224 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2225 return TPL_ERROR_INVALID_OPERATION;
2228 return TPL_ERROR_NONE;
2232 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2233 tbm_surface_h tbm_surface,
2234 int num_rects, const int *rects,
2235 int32_t acquire_fence)
2237 TPL_ASSERT(surface);
2238 TPL_ASSERT(surface->display);
2239 TPL_ASSERT(surface->backend.data);
2240 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2242 tpl_wl_vk_surface_t *wl_vk_surface =
2243 (tpl_wl_vk_surface_t *) surface->backend.data;
2244 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2245 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2246 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2249 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2250 TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2251 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2252 TPL_ERROR_INVALID_PARAMETER);
2254 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2255 if (!wl_vk_buffer) {
2256 TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
2257 return TPL_ERROR_INVALID_PARAMETER;
2260 bo_name = wl_vk_buffer->bo_name;
2262 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2264 /* If there are received region information, save it to wl_vk_buffer */
2265 if (num_rects && rects) {
2266 if (wl_vk_buffer->rects != NULL) {
2267 free(wl_vk_buffer->rects);
2268 wl_vk_buffer->rects = NULL;
2269 wl_vk_buffer->num_rects = 0;
2272 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2273 wl_vk_buffer->num_rects = num_rects;
2275 if (wl_vk_buffer->rects) {
2276 memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2277 sizeof(int) * 4 * num_rects);
2279 TPL_ERR("Failed to allocate memory for rects info.");
2283 if (wl_vk_buffer->acquire_fence_fd != -1)
2284 close(wl_vk_buffer->acquire_fence_fd);
2286 wl_vk_buffer->acquire_fence_fd = acquire_fence;
2288 wl_vk_buffer->status = ENQUEUED;
2290 "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2291 wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2293 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2295 tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2297 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2298 tbm_surface_internal_unref(tbm_surface);
2299 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2300 tbm_surface, wl_vk_surface, tsq_err);
2301 return TPL_ERROR_INVALID_OPERATION;
2304 tbm_surface_internal_unref(tbm_surface);
2306 return TPL_ERROR_NONE;
2309 static const struct wl_buffer_listener wl_buffer_release_listener = {
2310 (void *)__cb_wl_buffer_release,
2314 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2316 tbm_surface_h tbm_surface = NULL;
2317 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2318 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2319 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2320 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2321 tpl_bool_t ready_to_commit = TPL_TRUE;
2323 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2325 while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2326 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2328 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2329 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2330 swapchain->tbm_queue);
2331 return TPL_ERROR_INVALID_OPERATION;
2334 tbm_surface_internal_ref(tbm_surface);
2336 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2337 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2338 "wl_vk_buffer sould be not NULL");
2340 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2342 wl_vk_buffer->status = ACQUIRED;
2344 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2345 wl_vk_buffer, tbm_surface,
2346 _get_tbm_surface_bo_name(tbm_surface));
2348 if (wl_vk_buffer->wl_buffer == NULL) {
2349 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2350 wl_vk_display->wl_tbm_client, tbm_surface);
2352 if (!wl_vk_buffer->wl_buffer) {
2353 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2354 wl_vk_display->wl_tbm_client, tbm_surface);
2356 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2357 wl_vk_display->use_explicit_sync == TPL_FALSE) {
2358 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2359 &wl_buffer_release_listener, wl_vk_buffer);
2363 "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2364 wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2368 if (!wl_vk_surface->vblank_enable || wl_vk_surface->vblank_done)
2369 ready_to_commit = TPL_TRUE;
2371 wl_vk_buffer->status = WAITING_VBLANK;
2372 __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2373 ready_to_commit = TPL_FALSE;
2376 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2378 if (ready_to_commit)
2379 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2382 return TPL_ERROR_NONE;
2385 #if TIZEN_FEATURE_ENABLE
2387 __cb_buffer_fenced_release(void *data,
2388 struct zwp_linux_buffer_release_v1 *release,
2391 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2392 tbm_surface_h tbm_surface = NULL;
2394 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2396 tbm_surface = wl_vk_buffer->tbm_surface;
2398 if (tbm_surface_internal_is_valid(tbm_surface)) {
2399 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2400 tpl_wl_vk_swapchain_t *swapchain = NULL;
2402 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2403 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2404 tbm_surface_internal_unref(tbm_surface);
2408 swapchain = wl_vk_surface->swapchain;
2410 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2411 if (wl_vk_buffer->status == COMMITTED) {
2412 tbm_surface_queue_error_e tsq_err;
2414 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2415 wl_vk_buffer->buffer_release = NULL;
2417 wl_vk_buffer->release_fence_fd = fence;
2418 wl_vk_buffer->status = RELEASED;
2420 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2421 wl_vk_buffer->bo_name,
2423 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2424 wl_vk_buffer->bo_name);
2427 "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2428 wl_vk_buffer, tbm_surface,
2429 wl_vk_buffer->bo_name,
2432 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2434 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2435 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2437 tbm_surface_internal_unref(tbm_surface);
2440 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2443 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2448 __cb_buffer_immediate_release(void *data,
2449 struct zwp_linux_buffer_release_v1 *release)
2451 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2452 tbm_surface_h tbm_surface = NULL;
2454 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2456 tbm_surface = wl_vk_buffer->tbm_surface;
2458 if (tbm_surface_internal_is_valid(tbm_surface)) {
2459 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2460 tpl_wl_vk_swapchain_t *swapchain = NULL;
2462 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2463 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2464 tbm_surface_internal_unref(tbm_surface);
2468 swapchain = wl_vk_surface->swapchain;
2470 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2471 if (wl_vk_buffer->status == COMMITTED) {
2472 tbm_surface_queue_error_e tsq_err;
2474 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2475 wl_vk_buffer->buffer_release = NULL;
2477 wl_vk_buffer->release_fence_fd = -1;
2478 wl_vk_buffer->status = RELEASED;
2480 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2481 _get_tbm_surface_bo_name(tbm_surface));
2482 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2483 _get_tbm_surface_bo_name(tbm_surface));
2486 "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2487 wl_vk_buffer, tbm_surface,
2488 _get_tbm_surface_bo_name(tbm_surface));
2490 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2492 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2493 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2495 tbm_surface_internal_unref(tbm_surface);
2498 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2501 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2505 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2506 __cb_buffer_fenced_release,
2507 __cb_buffer_immediate_release,
2512 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2514 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2515 tbm_surface_h tbm_surface = NULL;
2517 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2519 tbm_surface = wl_vk_buffer->tbm_surface;
2521 if (tbm_surface_internal_is_valid(tbm_surface)) {
2522 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2523 tpl_wl_vk_swapchain_t *swapchain = NULL;
2524 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2526 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2527 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2528 tbm_surface_internal_unref(tbm_surface);
2532 swapchain = wl_vk_surface->swapchain;
2534 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2536 if (wl_vk_buffer->status == COMMITTED) {
2538 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2540 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2541 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2543 wl_vk_buffer->status = RELEASED;
2545 TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2546 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2547 wl_vk_buffer->bo_name);
2549 TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2550 wl_vk_buffer->wl_buffer, tbm_surface,
2551 wl_vk_buffer->bo_name);
2553 tbm_surface_internal_unref(tbm_surface);
2556 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2558 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2563 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2564 unsigned int sequence, unsigned int tv_sec,
2565 unsigned int tv_usec, void *user_data)
2567 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2568 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2570 TRACE_ASYNC_END((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2571 TPL_LOG_D("[VBLANK_DONE]", "wl_vk_surface(%p)", wl_vk_surface);
2573 if (error == TDM_ERROR_TIMEOUT)
2574 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2577 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2578 wl_vk_surface->vblank_done = TPL_TRUE;
2580 if (wl_vk_surface->vblank && wl_vk_surface->vblank_waiting_buffers) {
2581 tpl_bool_t is_empty = TPL_TRUE;
2583 tpl_wl_vk_buffer_t* wl_vk_buffer =(tpl_wl_vk_buffer_t *)
2584 __tpl_list_pop_front(wl_vk_surface->vblank_waiting_buffers, NULL);
2585 is_empty = __tpl_list_is_empty(wl_vk_surface->vblank_waiting_buffers);
2587 if (!wl_vk_buffer) break;
2589 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2591 /* If tdm error such as TIMEOUT occured,
2592 * flush all vblank waiting buffers of its wl_vk_surface.
2593 * Otherwise, only one wl_vk_buffer will be commited per one vblank event.
2595 if (error == TDM_ERROR_NONE && wl_vk_surface->post_interval > 0)
2597 } while (!is_empty);
2599 wl_vk_surface->vblank_enable = (wl_vk_surface->post_interval > 0);
2601 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2605 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2607 tdm_error tdm_err = TDM_ERROR_NONE;
2608 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2610 if (wl_vk_surface->vblank == NULL) {
2611 wl_vk_surface->vblank =
2612 _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client);
2613 if (!wl_vk_surface->vblank) {
2614 TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2616 return TPL_ERROR_OUT_OF_MEMORY;
2618 wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
2619 if (!wl_vk_surface->vblank_waiting_buffers) {
2620 tdm_client_vblank_destroy(wl_vk_surface->vblank);
2621 wl_vk_surface->vblank = NULL;
2626 tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2627 wl_vk_surface->post_interval,
2628 __cb_tdm_client_vblank,
2629 (void *)wl_vk_surface);
2631 if (tdm_err == TDM_ERROR_NONE) {
2632 wl_vk_surface->vblank_done = TPL_FALSE;
2633 TRACE_ASYNC_BEGIN((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2635 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2636 return TPL_ERROR_INVALID_OPERATION;
2639 return TPL_ERROR_NONE;
2643 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2644 tpl_wl_vk_buffer_t *wl_vk_buffer)
2646 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2647 struct wl_surface *wl_surface = wl_vk_surface->wl_surface;
2650 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2651 "wl_vk_buffer sould be not NULL");
2653 if (wl_vk_buffer->wl_buffer == NULL) {
2654 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2655 wl_vk_display->wl_tbm_client,
2656 wl_vk_buffer->tbm_surface);
2657 if (wl_vk_buffer->wl_buffer &&
2658 (wl_vk_buffer->acquire_fence_fd == -1 ||
2659 wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2660 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2661 &wl_buffer_release_listener, wl_vk_buffer);
2664 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2665 "[FATAL] Failed to create wl_buffer");
2667 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2669 wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2670 wl_vk_buffer->dx, wl_vk_buffer->dy);
2672 if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2674 wl_surface_damage(wl_surface,
2675 wl_vk_buffer->dx, wl_vk_buffer->dy,
2676 wl_vk_buffer->width, wl_vk_buffer->height);
2678 wl_surface_damage_buffer(wl_surface,
2680 wl_vk_buffer->width, wl_vk_buffer->height);
2684 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2686 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2687 wl_vk_buffer->rects[i * 4 + 3]);
2689 wl_surface_damage(wl_surface,
2690 wl_vk_buffer->rects[i * 4 + 0],
2692 wl_vk_buffer->rects[i * 4 + 2],
2693 wl_vk_buffer->rects[i * 4 + 3]);
2695 wl_surface_damage_buffer(wl_surface,
2696 wl_vk_buffer->rects[i * 4 + 0],
2698 wl_vk_buffer->rects[i * 4 + 2],
2699 wl_vk_buffer->rects[i * 4 + 3]);
2704 #if TIZEN_FEATURE_ENABLE
2705 if (wl_vk_display->use_explicit_sync &&
2706 wl_vk_surface->surface_sync &&
2707 wl_vk_buffer->acquire_fence_fd != -1) {
2709 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2710 wl_vk_buffer->acquire_fence_fd);
2711 TPL_LOG_D("[SET_ACQUIRE_FENCE][1/2]", "wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2712 wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2713 close(wl_vk_buffer->acquire_fence_fd);
2714 wl_vk_buffer->acquire_fence_fd = -1;
2716 wl_vk_buffer->buffer_release =
2717 zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2718 if (!wl_vk_buffer->buffer_release) {
2719 TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2721 zwp_linux_buffer_release_v1_add_listener(
2722 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2723 TPL_LOG_D("[SET_ACQUIRE_FENCE][2/2]", "add explicit_sync_release_listener.");
2728 wl_surface_commit(wl_surface);
2730 wl_display_flush(wl_vk_display->wl_display);
2732 TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2733 wl_vk_buffer->bo_name);
2735 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2737 wl_vk_buffer->need_to_commit = TPL_FALSE;
2738 wl_vk_buffer->status = COMMITTED;
2740 tpl_gcond_signal(&wl_vk_buffer->cond);
2742 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2745 "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2746 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2747 wl_vk_buffer->bo_name);
2749 if (wl_vk_surface->post_interval > 0 && wl_vk_surface->vblank != NULL) {
2750 wl_vk_surface->vblank_enable = TPL_TRUE;
2751 if (_thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2752 TPL_ERR("Failed to set wait vblank.");
2757 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2759 if (!native_dpy) return TPL_FALSE;
2761 if (_check_native_handle_is_wl_display(native_dpy))
2768 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2770 TPL_ASSERT(backend);
2772 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2773 backend->data = NULL;
2775 backend->init = __tpl_wl_vk_display_init;
2776 backend->fini = __tpl_wl_vk_display_fini;
2777 backend->query_config = __tpl_wl_vk_display_query_config;
2778 backend->filter_config = __tpl_wl_vk_display_filter_config;
2779 backend->query_window_supported_buffer_count =
2780 __tpl_wl_vk_display_query_window_supported_buffer_count;
2781 backend->query_window_supported_present_modes =
2782 __tpl_wl_vk_display_query_window_supported_present_modes;
2786 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2788 TPL_ASSERT(backend);
2790 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2791 backend->data = NULL;
2793 backend->init = __tpl_wl_vk_surface_init;
2794 backend->fini = __tpl_wl_vk_surface_fini;
2795 backend->validate = __tpl_wl_vk_surface_validate;
2796 backend->cancel_dequeued_buffer =
2797 __tpl_wl_vk_surface_cancel_buffer;
2798 backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2799 backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2800 backend->get_swapchain_buffers =
2801 __tpl_wl_vk_surface_get_swapchain_buffers;
2802 backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2803 backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2804 backend->set_post_interval =
2805 __tpl_wl_vk_surface_set_post_interval;
2809 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2811 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2815 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2819 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2820 TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2821 wl_vk_surface, wl_vk_surface->buffer_cnt);
2822 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2823 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2826 "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2827 idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2828 wl_vk_buffer->bo_name,
2829 status_to_string[wl_vk_buffer->status]);
2832 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);