1 #define inline __inline__
4 #include "tpl_internal.h"
9 #include <sys/eventfd.h>
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
20 #include <tdm_client.h>
22 #ifndef TIZEN_FEATURE_ENABLE
23 #define TIZEN_FEATURE_ENABLE 1
26 #if TIZEN_FEATURE_ENABLE
27 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
30 #include "tpl_utils_gthread.h"
32 #define BUFFER_ARRAY_SIZE 10
33 #define VK_CLIENT_QUEUE_SIZE 3
35 static int wl_vk_buffer_key;
36 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
38 typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t;
39 typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t;
40 typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t;
41 typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t;
43 struct _tpl_wl_vk_display {
44 tpl_gsource *disp_source;
46 tpl_gmutex wl_event_mutex;
48 struct wl_display *wl_display;
49 struct wl_event_queue *ev_queue;
50 struct wayland_tbm_client *wl_tbm_client;
51 int last_error; /* errno of the last wl_display error*/
53 tpl_bool_t wl_initialized;
56 tdm_client *tdm_client;
57 tpl_gsource *tdm_source;
59 tpl_bool_t tdm_initialized;
60 /* To make sure that tpl_gsource has been successfully finalized. */
61 tpl_bool_t gsource_finalized;
66 tpl_bool_t use_wait_vblank;
67 tpl_bool_t use_explicit_sync;
70 /* To make sure that tpl_gsource has been successfully finalized. */
71 tpl_bool_t gsource_finalized;
72 tpl_gmutex disp_mutex;
75 /* device surface capabilities */
79 #if TIZEN_FEATURE_ENABLE
80 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
84 struct _tpl_wl_vk_swapchain {
85 tpl_wl_vk_surface_t *wl_vk_surface;
87 tbm_surface_queue_h tbm_queue;
90 tpl_bool_t create_done;
100 tbm_surface_h *swapchain_buffers;
102 /* [TEMP] To fix dEQP-VK.wsi.wayland.swapchain.modify.resize crash issue
103 * It will be fixed properly using old_swapchain handle */
104 tbm_surface_h *old_swapchain_buffers;
106 tpl_util_atomic_uint ref_cnt;
109 typedef enum surf_message {
117 struct _tpl_wl_vk_surface {
118 tpl_gsource *surf_source;
120 tpl_wl_vk_swapchain_t *swapchain;
122 struct wl_surface *wl_surface;
123 #if TIZEN_FEATURE_ENABLE
124 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
126 tdm_client_vblank *vblank;
128 /* surface information */
131 tpl_wl_vk_display_t *wl_vk_display;
132 tpl_surface_t *tpl_surface;
134 /* wl_vk_buffer array for buffer tracing */
135 tpl_wl_vk_buffer_t *buffers[BUFFER_ARRAY_SIZE];
136 int buffer_cnt; /* the number of using wl_vk_buffers */
137 tpl_gmutex buffers_mutex;
139 tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
141 tpl_gmutex surf_mutex;
144 /* for waiting draw done */
145 tpl_bool_t is_activated;
146 tpl_bool_t reset; /* TRUE if queue reseted by external */
147 tpl_bool_t vblank_done;
148 tpl_bool_t vblank_enable;
149 tpl_bool_t initialized_in_thread;
151 /* To make sure that tpl_gsource has been successfully finalized. */
152 tpl_bool_t gsource_finalized;
154 surf_message sent_message;
159 typedef enum buffer_status {
164 WAITING_SIGNALED, // 4
169 static const char *status_to_string[7] = {
174 "WAITING_SIGNALED", // 4
175 "WAITING_VBLANK", // 5
179 struct _tpl_wl_vk_buffer {
180 tbm_surface_h tbm_surface;
183 struct wl_buffer *wl_buffer;
184 int dx, dy; /* position to attach to wl_surface */
185 int width, height; /* size to attach to wl_surface */
187 buffer_status_t status; /* for tracing buffer status */
188 int idx; /* position index in buffers array of wl_vk_surface */
190 /* for damage region */
194 /* for checking need_to_commit (frontbuffer mode) */
195 tpl_bool_t need_to_commit;
197 #if TIZEN_FEATURE_ENABLE
198 /* to get release event via zwp_linux_buffer_release_v1 */
199 struct zwp_linux_buffer_release_v1 *buffer_release;
202 /* each buffers own its release_fence_fd, until it passes ownership
204 int32_t release_fence_fd;
206 /* each buffers own its acquire_fence_fd.
207 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
208 * will be passed to display server
209 * Otherwise it will be used as a fence waiting for render done
211 int32_t acquire_fence_fd;
216 tpl_wl_vk_surface_t *wl_vk_surface;
220 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
222 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
224 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
226 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
228 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
230 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
232 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
234 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
235 tpl_wl_vk_buffer_t *wl_vk_buffer);
238 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
240 struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
242 if (!wl_vk_native_dpy) {
243 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
247 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
248 is a memory address pointing the structure of wl_display_interface. */
249 if (wl_vk_native_dpy == &wl_display_interface)
252 if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
253 strlen(wl_display_interface.name)) == 0) {
261 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
263 tpl_wl_vk_display_t *wl_vk_display = NULL;
264 tdm_error tdm_err = TDM_ERROR_NONE;
268 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
269 if (!wl_vk_display) {
270 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
271 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
275 tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client);
277 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
278 * When tdm_source is no longer available due to an unexpected situation,
279 * wl_vk_thread must remove it from the thread and destroy it.
280 * In that case, tdm_vblank can no longer be used for surfaces and displays
281 * that used this tdm_source. */
282 if (tdm_err != TDM_ERROR_NONE) {
283 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
285 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
287 tpl_gsource_destroy(gsource, TPL_FALSE);
289 wl_vk_display->tdm.tdm_source = NULL;
298 __thread_func_tdm_finalize(tpl_gsource *gsource)
300 tpl_wl_vk_display_t *wl_vk_display = NULL;
302 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
304 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
306 TPL_INFO("[TDM_CLIENT_FINI]",
307 "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
308 wl_vk_display, wl_vk_display->tdm.tdm_client,
309 wl_vk_display->tdm.tdm_display_fd);
311 if (wl_vk_display->tdm.tdm_client) {
312 tdm_client_destroy(wl_vk_display->tdm.tdm_client);
313 wl_vk_display->tdm.tdm_client = NULL;
314 wl_vk_display->tdm.tdm_display_fd = -1;
317 wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
318 wl_vk_display->tdm.gsource_finalized = TPL_TRUE;
320 tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond);
321 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
324 static tpl_gsource_functions tdm_funcs = {
327 .dispatch = __thread_func_tdm_dispatch,
328 .finalize = __thread_func_tdm_finalize,
332 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
334 tdm_client *tdm_client = NULL;
335 int tdm_display_fd = -1;
336 tdm_error tdm_err = TDM_ERROR_NONE;
338 tdm_client = tdm_client_create(&tdm_err);
339 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
340 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
341 return TPL_ERROR_INVALID_OPERATION;
344 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
345 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
346 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
347 tdm_client_destroy(tdm_client);
348 return TPL_ERROR_INVALID_OPERATION;
351 wl_vk_display->tdm.tdm_display_fd = tdm_display_fd;
352 wl_vk_display->tdm.tdm_client = tdm_client;
353 wl_vk_display->tdm.tdm_source = NULL;
354 wl_vk_display->tdm.tdm_initialized = TPL_TRUE;
356 TPL_INFO("[TDM_CLIENT_INIT]",
357 "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
358 wl_vk_display, tdm_client, tdm_display_fd);
360 return TPL_ERROR_NONE;
364 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
365 uint32_t name, const char *interface,
368 #if TIZEN_FEATURE_ENABLE
369 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
371 if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
372 char *env = tpl_getenv("TPL_EFS");
373 if (env && !atoi(env)) {
374 wl_vk_display->use_explicit_sync = TPL_FALSE;
376 wl_vk_display->explicit_sync =
377 wl_registry_bind(wl_registry, name,
378 &zwp_linux_explicit_synchronization_v1_interface, 1);
379 wl_vk_display->use_explicit_sync = TPL_TRUE;
380 TPL_LOG_D("[REGISTRY_BIND]",
381 "wl_vk_display(%p) bind zwp_linux_explicit_synchronization_v1_interface",
389 __cb_wl_resistry_global_remove_callback(void *data,
390 struct wl_registry *wl_registry,
395 static const struct wl_registry_listener registry_listener = {
396 __cb_wl_resistry_global_callback,
397 __cb_wl_resistry_global_remove_callback
401 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
402 const char *func_name)
406 strerror_r(errno, buf, sizeof(buf));
408 if (wl_vk_display->last_error == errno)
411 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
413 dpy_err = wl_display_get_error(wl_vk_display->wl_display);
414 if (dpy_err == EPROTO) {
415 const struct wl_interface *err_interface;
416 uint32_t err_proxy_id, err_code;
417 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
420 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
421 (err_interface ? err_interface->name : "UNKNOWN"),
422 err_code, err_proxy_id);
425 wl_vk_display->last_error = errno;
429 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
431 struct wl_registry *registry = NULL;
432 struct wl_event_queue *queue = NULL;
433 struct wl_display *display_wrapper = NULL;
434 struct wl_proxy *wl_tbm = NULL;
435 struct wayland_tbm_client *wl_tbm_client = NULL;
437 tpl_result_t result = TPL_ERROR_NONE;
439 queue = wl_display_create_queue(wl_vk_display->wl_display);
441 TPL_ERR("Failed to create wl_queue wl_display(%p)",
442 wl_vk_display->wl_display);
443 result = TPL_ERROR_INVALID_OPERATION;
447 wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
448 if (!wl_vk_display->ev_queue) {
449 TPL_ERR("Failed to create wl_queue wl_display(%p)",
450 wl_vk_display->wl_display);
451 result = TPL_ERROR_INVALID_OPERATION;
455 display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
456 if (!display_wrapper) {
457 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
458 wl_vk_display->wl_display);
459 result = TPL_ERROR_INVALID_OPERATION;
463 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
465 registry = wl_display_get_registry(display_wrapper);
467 TPL_ERR("Failed to create wl_registry");
468 result = TPL_ERROR_INVALID_OPERATION;
472 wl_proxy_wrapper_destroy(display_wrapper);
473 display_wrapper = NULL;
475 wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
476 if (!wl_tbm_client) {
477 TPL_ERR("Failed to initialize wl_tbm_client.");
478 result = TPL_ERROR_INVALID_CONNECTION;
482 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
484 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
485 result = TPL_ERROR_INVALID_CONNECTION;
489 wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
490 wl_vk_display->wl_tbm_client = wl_tbm_client;
492 if (wl_registry_add_listener(registry, ®istry_listener,
494 TPL_ERR("Failed to wl_registry_add_listener");
495 result = TPL_ERROR_INVALID_OPERATION;
499 ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
501 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
502 result = TPL_ERROR_INVALID_OPERATION;
506 #if TIZEN_FEATURE_ENABLE
507 if (wl_vk_display->explicit_sync) {
508 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
509 wl_vk_display->ev_queue);
510 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
511 wl_vk_display->explicit_sync);
515 wl_vk_display->wl_initialized = TPL_TRUE;
517 TPL_INFO("[WAYLAND_INIT]",
518 "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
519 wl_vk_display, wl_vk_display->wl_display,
520 wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
521 #if TIZEN_FEATURE_ENABLE
522 TPL_INFO("[WAYLAND_INIT]",
524 wl_vk_display->explicit_sync);
528 wl_proxy_wrapper_destroy(display_wrapper);
530 wl_registry_destroy(registry);
532 wl_event_queue_destroy(queue);
538 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
540 /* If wl_vk_display is in prepared state, cancel it */
541 if (wl_vk_display->prepared) {
542 wl_display_cancel_read(wl_vk_display->wl_display);
543 wl_vk_display->prepared = TPL_FALSE;
546 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
547 wl_vk_display->ev_queue) == -1) {
548 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
551 #if TIZEN_FEATURE_ENABLE
552 if (wl_vk_display->explicit_sync) {
553 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
554 "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
555 wl_vk_display, wl_vk_display->explicit_sync);
556 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
557 wl_vk_display->explicit_sync = NULL;
561 if (wl_vk_display->wl_tbm_client) {
562 struct wl_proxy *wl_tbm = NULL;
564 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
565 wl_vk_display->wl_tbm_client);
567 wl_proxy_set_queue(wl_tbm, NULL);
570 TPL_INFO("[WL_TBM_DEINIT]",
571 "wl_vk_display(%p) wl_tbm_client(%p)",
572 wl_vk_display, wl_vk_display->wl_tbm_client);
573 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
574 wl_vk_display->wl_tbm_client = NULL;
577 wl_event_queue_destroy(wl_vk_display->ev_queue);
579 wl_vk_display->wl_initialized = TPL_FALSE;
581 TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
582 wl_vk_display, wl_vk_display->wl_display);
586 _thread_init(void *data)
588 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
590 if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
591 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
592 wl_vk_display, wl_vk_display->wl_display);
595 if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
596 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
599 return wl_vk_display;
603 __thread_func_disp_prepare(tpl_gsource *gsource)
605 tpl_wl_vk_display_t *wl_vk_display =
606 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
608 /* If this wl_vk_display is already prepared,
609 * do nothing in this function. */
610 if (wl_vk_display->prepared)
613 /* If there is a last_error, there is no need to poll,
614 * so skip directly to dispatch.
615 * prepare -> dispatch */
616 if (wl_vk_display->last_error)
619 while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
620 wl_vk_display->ev_queue) != 0) {
621 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
622 wl_vk_display->ev_queue) == -1) {
623 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
627 wl_vk_display->prepared = TPL_TRUE;
629 wl_display_flush(wl_vk_display->wl_display);
635 __thread_func_disp_check(tpl_gsource *gsource)
637 tpl_wl_vk_display_t *wl_vk_display =
638 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
639 tpl_bool_t ret = TPL_FALSE;
641 if (!wl_vk_display->prepared)
644 /* If prepared, but last_error is set,
645 * cancel_read is executed and FALSE is returned.
646 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
647 * and skipping disp_check from prepare to disp_dispatch.
648 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
649 if (wl_vk_display->prepared && wl_vk_display->last_error) {
650 wl_display_cancel_read(wl_vk_display->wl_display);
654 if (tpl_gsource_check_io_condition(gsource)) {
655 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
656 _wl_display_print_err(wl_vk_display, "read_event");
659 wl_display_cancel_read(wl_vk_display->wl_display);
663 wl_vk_display->prepared = TPL_FALSE;
669 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
671 tpl_wl_vk_display_t *wl_vk_display =
672 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
676 /* If there is last_error, SOURCE_REMOVE should be returned
677 * to remove the gsource from the main loop.
678 * This is because wl_vk_display is not valid since last_error was set.*/
679 if (wl_vk_display->last_error) {
683 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
684 if (tpl_gsource_check_io_condition(gsource)) {
685 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
686 wl_vk_display->ev_queue) == -1) {
687 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
691 wl_display_flush(wl_vk_display->wl_display);
692 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
698 __thread_func_disp_finalize(tpl_gsource *gsource)
700 tpl_wl_vk_display_t *wl_vk_display =
701 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
703 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
704 TPL_LOG_D("[D_FINALIZE]", "wl_vk_display(%p) tpl_gsource(%p)",
705 wl_vk_display, gsource);
707 if (wl_vk_display->wl_initialized)
708 _thread_wl_display_fini(wl_vk_display);
710 wl_vk_display->gsource_finalized = TPL_TRUE;
712 tpl_gcond_signal(&wl_vk_display->disp_cond);
713 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
719 static tpl_gsource_functions disp_funcs = {
720 .prepare = __thread_func_disp_prepare,
721 .check = __thread_func_disp_check,
722 .dispatch = __thread_func_disp_dispatch,
723 .finalize = __thread_func_disp_finalize,
727 __tpl_wl_vk_display_init(tpl_display_t *display)
731 tpl_wl_vk_display_t *wl_vk_display = NULL;
733 /* Do not allow default display in wayland */
734 if (!display->native_handle) {
735 TPL_ERR("Invalid native handle for display.");
736 return TPL_ERROR_INVALID_PARAMETER;
739 if (!_check_native_handle_is_wl_display(display->native_handle)) {
740 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
741 return TPL_ERROR_INVALID_PARAMETER;
744 wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
745 sizeof(tpl_wl_vk_display_t));
746 if (!wl_vk_display) {
747 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
748 return TPL_ERROR_OUT_OF_MEMORY;
751 display->backend.data = wl_vk_display;
752 display->bufmgr_fd = -1;
754 wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
755 wl_vk_display->wl_initialized = TPL_FALSE;
757 wl_vk_display->ev_queue = NULL;
758 wl_vk_display->wl_display = (struct wl_display *)display->native_handle;
759 wl_vk_display->last_error = 0;
760 wl_vk_display->use_explicit_sync = TPL_FALSE; // default disabled
761 wl_vk_display->prepared = TPL_FALSE;
763 /* Wayland Interfaces */
764 #if TIZEN_FEATURE_ENABLE
765 wl_vk_display->explicit_sync = NULL;
767 wl_vk_display->wl_tbm_client = NULL;
769 /* Vulkan specific surface capabilities */
770 wl_vk_display->min_buffer = 2;
771 wl_vk_display->max_buffer = VK_CLIENT_QUEUE_SIZE;
772 wl_vk_display->present_modes = TPL_DISPLAY_PRESENT_MODE_FIFO;
774 wl_vk_display->use_wait_vblank = TPL_TRUE; // default enabled
776 char *env = tpl_getenv("TPL_WAIT_VBLANK");
777 if (env && !atoi(env)) {
778 wl_vk_display->use_wait_vblank = TPL_FALSE;
782 tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
784 tpl_gmutex_init(&wl_vk_display->disp_mutex);
785 tpl_gcond_init(&wl_vk_display->disp_cond);
788 wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
789 (tpl_gthread_func)_thread_init,
790 (void *)wl_vk_display);
791 if (!wl_vk_display->thread) {
792 TPL_ERR("Failed to create wl_vk_thread");
796 wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
797 (void *)wl_vk_display,
798 wl_display_get_fd(wl_vk_display->wl_display),
800 &disp_funcs, SOURCE_TYPE_NORMAL);
801 if (!wl_vk_display->disp_source) {
802 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
803 display->native_handle,
804 wl_vk_display->thread);
808 tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex);
809 tpl_gcond_init(&wl_vk_display->tdm.tdm_cond);
811 wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread,
812 (void *)wl_vk_display,
813 wl_vk_display->tdm.tdm_display_fd,
815 &tdm_funcs, SOURCE_TYPE_NORMAL);
816 if (!wl_vk_display->tdm.tdm_source) {
817 TPL_ERR("Failed to create tdm_gsource\n");
821 TPL_INFO("[DISPLAY_INIT]",
822 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
824 wl_vk_display->thread,
825 wl_vk_display->wl_display);
827 TPL_INFO("[DISPLAY_INIT]",
828 "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
829 wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
830 wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
832 return TPL_ERROR_NONE;
835 if (wl_vk_display->tdm.tdm_source) {
836 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
837 while (!wl_vk_display->tdm.gsource_finalized) {
838 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
839 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
841 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
844 if (wl_vk_display->disp_source) {
845 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
846 while (!wl_vk_display->gsource_finalized) {
847 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
848 tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
850 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
853 if (wl_vk_display->thread) {
854 tpl_gthread_destroy(wl_vk_display->thread);
857 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
858 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
859 tpl_gcond_clear(&wl_vk_display->disp_cond);
860 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
862 wl_vk_display->thread = NULL;
865 display->backend.data = NULL;
866 return TPL_ERROR_INVALID_OPERATION;
870 __tpl_wl_vk_display_fini(tpl_display_t *display)
872 tpl_wl_vk_display_t *wl_vk_display;
876 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
878 TPL_INFO("[DISPLAY_FINI]",
879 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
881 wl_vk_display->thread,
882 wl_vk_display->wl_display);
884 if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) {
885 /* This is a protection to prevent problems that arise in unexpected situations
886 * that g_cond_wait cannot work normally.
887 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
888 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
890 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
891 while (!wl_vk_display->tdm.gsource_finalized) {
892 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
893 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
895 wl_vk_display->tdm.tdm_source = NULL;
896 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
899 /* This is a protection to prevent problems that arise in unexpected situations
900 * that g_cond_wait cannot work normally.
901 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
902 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
904 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
905 while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) {
906 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
907 tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
909 wl_vk_display->disp_source = NULL;
910 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
912 if (wl_vk_display->thread) {
913 tpl_gthread_destroy(wl_vk_display->thread);
914 wl_vk_display->thread = NULL;
917 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
918 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
919 tpl_gcond_clear(&wl_vk_display->disp_cond);
920 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
922 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
927 display->backend.data = NULL;
931 __tpl_wl_vk_display_query_config(tpl_display_t *display,
932 tpl_surface_type_t surface_type,
933 int red_size, int green_size,
934 int blue_size, int alpha_size,
935 int color_depth, int *native_visual_id,
940 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
941 green_size == 8 && blue_size == 8 &&
942 (color_depth == 32 || color_depth == 24)) {
944 if (alpha_size == 8) {
945 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
946 if (is_slow) *is_slow = TPL_FALSE;
947 return TPL_ERROR_NONE;
949 if (alpha_size == 0) {
950 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
951 if (is_slow) *is_slow = TPL_FALSE;
952 return TPL_ERROR_NONE;
956 return TPL_ERROR_INVALID_PARAMETER;
960 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
965 TPL_IGNORE(visual_id);
966 TPL_IGNORE(alpha_size);
967 return TPL_ERROR_NONE;
971 __tpl_wl_vk_display_query_window_supported_buffer_count(
972 tpl_display_t *display,
973 tpl_handle_t window, int *min, int *max)
975 tpl_wl_vk_display_t *wl_vk_display = NULL;
980 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
981 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
983 if (min) *min = wl_vk_display->min_buffer;
984 if (max) *max = wl_vk_display->max_buffer;
986 return TPL_ERROR_NONE;
990 __tpl_wl_vk_display_query_window_supported_present_modes(
991 tpl_display_t *display,
992 tpl_handle_t window, int *present_modes)
994 tpl_wl_vk_display_t *wl_vk_display = NULL;
999 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
1000 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1002 if (present_modes) {
1003 *present_modes = wl_vk_display->present_modes;
1006 return TPL_ERROR_NONE;
1010 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
1012 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1013 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1014 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1015 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1016 tpl_bool_t need_to_release = TPL_FALSE;
1017 tpl_bool_t need_to_cancel = TPL_FALSE;
1018 buffer_status_t status = RELEASED;
1021 while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
1022 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1023 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1024 wl_vk_buffer = wl_vk_surface->buffers[idx];
1027 wl_vk_surface->buffers[idx] = NULL;
1028 wl_vk_surface->buffer_cnt--;
1030 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1031 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1036 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1038 tpl_gmutex_lock(&wl_vk_buffer->mutex);
1040 status = wl_vk_buffer->status;
1042 TPL_INFO("[BUFFER_CLEAR]",
1043 "[%d] wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
1044 idx, wl_vk_surface, wl_vk_buffer,
1045 wl_vk_buffer->tbm_surface,
1046 status_to_string[status]);
1048 if (status >= ENQUEUED) {
1049 tpl_bool_t need_to_wait = TPL_FALSE;
1050 tpl_result_t wait_result = TPL_ERROR_NONE;
1052 if (!wl_vk_display->use_explicit_sync &&
1053 status < WAITING_VBLANK)
1054 need_to_wait = TPL_TRUE;
1056 if (wl_vk_display->use_explicit_sync &&
1058 need_to_wait = TPL_TRUE;
1061 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1062 wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond,
1063 &wl_vk_buffer->mutex,
1065 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1067 status = wl_vk_buffer->status;
1069 if (wait_result == TPL_ERROR_TIME_OUT)
1070 TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
1075 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1076 /* It has been acquired but has not yet been released, so this
1077 * buffer must be released. */
1078 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
1080 /* After dequeue, it has not been enqueued yet
1081 * so cancel_dequeue must be performed. */
1082 need_to_cancel = (status == DEQUEUED);
1084 if (swapchain && swapchain->tbm_queue) {
1085 if (need_to_release) {
1086 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
1087 wl_vk_buffer->tbm_surface);
1088 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1089 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1090 wl_vk_buffer->tbm_surface, tsq_err);
1093 if (need_to_cancel) {
1094 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
1095 wl_vk_buffer->tbm_surface);
1096 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1097 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1098 wl_vk_buffer->tbm_surface, tsq_err);
1102 wl_vk_buffer->status = RELEASED;
1104 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1106 if (need_to_release || need_to_cancel)
1107 tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
1109 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1115 static tdm_client_vblank*
1116 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1118 tdm_client_vblank *vblank = NULL;
1119 tdm_client_output *tdm_output = NULL;
1120 tdm_error tdm_err = TDM_ERROR_NONE;
1123 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1127 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1128 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1129 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1133 vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1134 if (!vblank || tdm_err != TDM_ERROR_NONE) {
1135 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1139 tdm_err = tdm_client_handle_pending_events(tdm_client);
1140 if (tdm_err != TDM_ERROR_NONE) {
1141 TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err);
1144 tdm_client_vblank_set_enable_fake(vblank, 1);
1145 tdm_client_vblank_set_sync(vblank, 0);
1151 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1153 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1155 /* tbm_surface_queue will be created at swapchain_create */
1157 if (wl_vk_display->use_wait_vblank) {
1158 wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1159 wl_vk_display->tdm.tdm_client);
1160 if (wl_vk_surface->vblank) {
1161 TPL_INFO("[VBLANK_INIT]",
1162 "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1163 wl_vk_surface, wl_vk_display->tdm.tdm_client,
1164 wl_vk_surface->vblank);
1166 wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1167 if (!wl_vk_surface->vblank_waiting_buffers) {
1168 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1169 wl_vk_surface->vblank = NULL;
1174 #if TIZEN_FEATURE_ENABLE
1175 if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1176 wl_vk_surface->surface_sync =
1177 zwp_linux_explicit_synchronization_v1_get_synchronization(
1178 wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1179 if (wl_vk_surface->surface_sync) {
1180 TPL_INFO("[EXPLICIT_SYNC_INIT]",
1181 "wl_vk_surface(%p) surface_sync(%p)",
1182 wl_vk_surface, wl_vk_surface->surface_sync);
1184 TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1186 wl_vk_display->use_explicit_sync = TPL_FALSE;
1191 wl_vk_surface->vblank_enable = (wl_vk_surface->vblank != NULL &&
1192 wl_vk_surface->post_interval > 0);
1196 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1198 TPL_INFO("[SURFACE_FINI]",
1199 "wl_vk_surface(%p) wl_surface(%p)",
1200 wl_vk_surface, wl_vk_surface->wl_surface);
1202 if (wl_vk_surface->vblank_waiting_buffers) {
1203 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1204 wl_vk_surface->vblank_waiting_buffers = NULL;
1207 #if TIZEN_FEATURE_ENABLE
1208 if (wl_vk_surface->surface_sync) {
1209 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1210 "wl_vk_surface(%p) surface_sync(%p)",
1211 wl_vk_surface, wl_vk_surface->surface_sync);
1212 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1213 wl_vk_surface->surface_sync = NULL;
1217 if (wl_vk_surface->vblank) {
1218 TPL_INFO("[VBLANK_DESTROY]",
1219 "wl_vk_surface(%p) vblank(%p)",
1220 wl_vk_surface, wl_vk_surface->vblank);
1221 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1222 wl_vk_surface->vblank = NULL;
1227 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1229 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1231 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1233 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1234 if (message & INIT_SURFACE) { /* Initialize surface */
1235 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) initialize message received!",
1237 _thread_wl_vk_surface_init(wl_vk_surface);
1238 wl_vk_surface->initialized_in_thread = TPL_TRUE;
1239 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1242 if (message & ACQUIRABLE) { /* Acquirable message */
1243 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) acquirable message received!",
1245 if (_thread_surface_queue_acquire(wl_vk_surface)
1246 != TPL_ERROR_NONE) {
1247 TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1252 if (message & CREATE_QUEUE) { /* Create tbm_surface_queue */
1253 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) queue creation message received!",
1255 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1256 != TPL_ERROR_NONE) {
1257 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1260 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1263 if (message & DESTROY_QUEUE) { /* swapchain destroy */
1264 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) swapchain destroy message received!",
1266 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1267 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1270 /* init to NONE_MESSAGE */
1271 wl_vk_surface->sent_message = NONE_MESSAGE;
1273 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1279 __thread_func_surf_finalize(tpl_gsource *gsource)
1281 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1283 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1284 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1286 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1287 TPL_LOG_D("[S_FINALIZE]", "wl_vk_surface(%p) tpl_gsource(%p)",
1288 wl_vk_surface, gsource);
1290 _thread_wl_vk_surface_fini(wl_vk_surface);
1292 wl_vk_surface->gsource_finalized = TPL_TRUE;
1294 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1295 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1298 static tpl_gsource_functions surf_funcs = {
1301 .dispatch = __thread_func_surf_dispatch,
1302 .finalize = __thread_func_surf_finalize,
1307 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1309 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1310 tpl_wl_vk_display_t *wl_vk_display = NULL;
1311 tpl_gsource *surf_source = NULL;
1313 TPL_ASSERT(surface);
1314 TPL_ASSERT(surface->display);
1315 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1316 TPL_ASSERT(surface->native_handle);
1318 wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1319 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1321 wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1322 sizeof(tpl_wl_vk_surface_t));
1323 if (!wl_vk_surface) {
1324 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1325 return TPL_ERROR_OUT_OF_MEMORY;
1328 surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1329 -1, FD_TYPE_NONE, &surf_funcs, SOURCE_TYPE_NORMAL);
1331 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1333 free(wl_vk_surface);
1334 surface->backend.data = NULL;
1335 return TPL_ERROR_INVALID_OPERATION;
1338 surface->backend.data = (void *)wl_vk_surface;
1339 surface->width = -1;
1340 surface->height = -1;
1342 wl_vk_surface->surf_source = surf_source;
1343 wl_vk_surface->swapchain = NULL;
1345 wl_vk_surface->wl_vk_display = wl_vk_display;
1346 wl_vk_surface->wl_surface = (struct wl_surface *)surface->native_handle;
1347 wl_vk_surface->tpl_surface = surface;
1349 wl_vk_surface->reset = TPL_FALSE;
1350 wl_vk_surface->is_activated = TPL_FALSE;
1351 wl_vk_surface->vblank_done = TPL_TRUE;
1352 wl_vk_surface->initialized_in_thread = TPL_FALSE;
1354 wl_vk_surface->render_done_cnt = 0;
1356 wl_vk_surface->vblank = NULL;
1357 wl_vk_surface->vblank_enable = TPL_FALSE;
1358 #if TIZEN_FEATURE_ENABLE
1359 wl_vk_surface->surface_sync = NULL;
1362 wl_vk_surface->sent_message = NONE_MESSAGE;
1364 wl_vk_surface->post_interval = surface->post_interval;
1368 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1369 wl_vk_surface->buffers[i] = NULL;
1370 wl_vk_surface->buffer_cnt = 0;
1373 tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1374 tpl_gcond_init(&wl_vk_surface->surf_cond);
1376 tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1378 /* Initialize in thread */
1379 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1380 wl_vk_surface->sent_message = INIT_SURFACE;
1381 tpl_gsource_send_message(wl_vk_surface->surf_source,
1382 wl_vk_surface->sent_message);
1383 while (!wl_vk_surface->initialized_in_thread)
1384 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1385 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1387 TPL_INFO("[SURFACE_INIT]",
1388 "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1389 surface, wl_vk_surface, wl_vk_surface->surf_source);
1391 return TPL_ERROR_NONE;
1395 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1397 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1398 tpl_wl_vk_display_t *wl_vk_display = NULL;
1400 TPL_ASSERT(surface);
1401 TPL_ASSERT(surface->display);
1403 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1404 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1406 wl_vk_display = (tpl_wl_vk_display_t *)
1407 surface->display->backend.data;
1408 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1410 TPL_INFO("[SURFACE_FINI][BEGIN]",
1411 "wl_vk_surface(%p) wl_surface(%p)",
1412 wl_vk_surface, wl_vk_surface->wl_surface);
1414 if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1415 /* finalize swapchain */
1419 wl_vk_surface->swapchain = NULL;
1421 /* This is a protection to prevent problems that arise in unexpected situations
1422 * that g_cond_wait cannot work normally.
1423 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1424 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1426 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1427 while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) {
1428 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1429 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1431 wl_vk_surface->surf_source = NULL;
1432 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1434 _print_buffer_lists(wl_vk_surface);
1436 wl_vk_surface->wl_surface = NULL;
1437 wl_vk_surface->wl_vk_display = NULL;
1438 wl_vk_surface->tpl_surface = NULL;
1440 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1441 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1442 tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1443 tpl_gcond_clear(&wl_vk_surface->surf_cond);
1445 TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1447 free(wl_vk_surface);
1448 surface->backend.data = NULL;
1452 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1455 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1457 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1459 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1461 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1463 TPL_INFO("[SET_POST_INTERVAL]",
1464 "wl_vk_surface(%p) post_interval(%d -> %d)",
1465 wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1467 wl_vk_surface->post_interval = post_interval;
1468 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1472 return TPL_ERROR_NONE;
1476 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1478 TPL_ASSERT(surface);
1479 TPL_ASSERT(surface->backend.data);
1481 tpl_wl_vk_surface_t *wl_vk_surface =
1482 (tpl_wl_vk_surface_t *)surface->backend.data;
1484 return !(wl_vk_surface->reset);
1488 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1491 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1492 tpl_wl_vk_display_t *wl_vk_display = NULL;
1493 tpl_wl_vk_swapchain_t *swapchain = NULL;
1494 tpl_surface_t *surface = NULL;
1495 tpl_bool_t is_activated = TPL_FALSE;
1498 wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1499 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1501 wl_vk_display = wl_vk_surface->wl_vk_display;
1502 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1504 surface = wl_vk_surface->tpl_surface;
1505 TPL_CHECK_ON_NULL_RETURN(surface);
1507 swapchain = wl_vk_surface->swapchain;
1508 TPL_CHECK_ON_NULL_RETURN(swapchain);
1510 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1511 * the changed window size at the next frame. */
1512 width = tbm_surface_queue_get_width(tbm_queue);
1513 height = tbm_surface_queue_get_height(tbm_queue);
1514 if (surface->width != width || surface->height != height) {
1515 TPL_INFO("[QUEUE_RESIZE]",
1516 "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1517 wl_vk_surface, tbm_queue,
1518 surface->width, surface->height, width, height);
1521 /* When queue_reset_callback is called, if is_activated is different from
1522 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1523 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1524 is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1525 swapchain->tbm_queue);
1526 if (wl_vk_surface->is_activated != is_activated) {
1528 TPL_INFO("[ACTIVATED]",
1529 "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1530 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1532 TPL_INFO("[DEACTIVATED]",
1533 " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1534 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1538 wl_vk_surface->reset = TPL_TRUE;
1540 if (surface->reset_cb)
1541 surface->reset_cb(surface->reset_data);
1545 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1548 TPL_IGNORE(tbm_queue);
1550 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1551 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1553 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1554 if (wl_vk_surface->sent_message == NONE_MESSAGE) {
1555 wl_vk_surface->sent_message = ACQUIRABLE;
1556 tpl_gsource_send_message(wl_vk_surface->surf_source,
1557 wl_vk_surface->sent_message);
1559 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1563 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1565 TPL_ASSERT (wl_vk_surface);
1567 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1568 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1569 tbm_surface_queue_h tbm_queue = NULL;
1570 tbm_bufmgr bufmgr = NULL;
1571 unsigned int capability;
1573 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1574 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1576 if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1577 TPL_ERR("buffer count(%d) must be higher than (%d)",
1578 swapchain->properties.buffer_count,
1579 wl_vk_display->min_buffer);
1580 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1581 return TPL_ERROR_INVALID_PARAMETER;
1584 if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1585 TPL_ERR("buffer count(%d) must be lower than (%d)",
1586 swapchain->properties.buffer_count,
1587 wl_vk_display->max_buffer);
1588 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1589 return TPL_ERROR_INVALID_PARAMETER;
1592 if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1593 TPL_ERR("Unsupported present_mode(%d)",
1594 swapchain->properties.present_mode);
1595 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1596 return TPL_ERROR_INVALID_PARAMETER;
1599 if (swapchain->old_swapchain_buffers) {
1600 TPL_ERR("Should be destroy old_swapchain before create");
1601 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1602 return TPL_ERROR_INVALID_OPERATION;
1605 if (swapchain->tbm_queue) {
1606 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1607 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1609 if (swapchain->swapchain_buffers) {
1610 swapchain->old_swapchain_buffers = swapchain->swapchain_buffers;
1611 swapchain->swapchain_buffers = NULL;
1614 if (old_width != swapchain->properties.width ||
1615 old_height != swapchain->properties.height) {
1616 tbm_surface_queue_reset(swapchain->tbm_queue,
1617 swapchain->properties.width,
1618 swapchain->properties.height,
1619 TBM_FORMAT_ARGB8888);
1620 TPL_INFO("[RESIZE]",
1621 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1622 wl_vk_surface, swapchain, swapchain->tbm_queue,
1623 old_width, old_height,
1624 swapchain->properties.width,
1625 swapchain->properties.height);
1628 swapchain->properties.buffer_count =
1629 tbm_surface_queue_get_size(swapchain->tbm_queue);
1631 wl_vk_surface->reset = TPL_FALSE;
1633 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1634 swapchain->create_done = TPL_TRUE;
1636 TPL_INFO("[SWAPCHAIN_REUSE]",
1637 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1638 wl_vk_surface, swapchain, swapchain->tbm_queue,
1639 swapchain->properties.buffer_count);
1641 return TPL_ERROR_NONE;
1644 bufmgr = tbm_bufmgr_init(-1);
1645 capability = tbm_bufmgr_get_capability(bufmgr);
1646 tbm_bufmgr_deinit(bufmgr);
1648 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1649 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1650 wl_vk_display->wl_tbm_client,
1651 wl_vk_surface->wl_surface,
1652 swapchain->properties.buffer_count,
1653 swapchain->properties.width,
1654 swapchain->properties.height,
1655 TBM_FORMAT_ARGB8888);
1657 tbm_queue = wayland_tbm_client_create_surface_queue(
1658 wl_vk_display->wl_tbm_client,
1659 wl_vk_surface->wl_surface,
1660 swapchain->properties.buffer_count,
1661 swapchain->properties.width,
1662 swapchain->properties.height,
1663 TBM_FORMAT_ARGB8888);
1667 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1669 swapchain->result = TPL_ERROR_OUT_OF_MEMORY;
1670 return TPL_ERROR_OUT_OF_MEMORY;
1673 if (tbm_surface_queue_set_modes(
1674 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1675 TBM_SURFACE_QUEUE_ERROR_NONE) {
1676 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1678 tbm_surface_queue_destroy(tbm_queue);
1679 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1680 return TPL_ERROR_INVALID_OPERATION;
1683 if (tbm_surface_queue_add_reset_cb(
1685 __cb_tbm_queue_reset_callback,
1686 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1687 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1689 tbm_surface_queue_destroy(tbm_queue);
1690 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1691 return TPL_ERROR_INVALID_OPERATION;
1694 if (tbm_surface_queue_add_acquirable_cb(
1696 __cb_tbm_queue_acquirable_callback,
1697 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1698 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1700 tbm_surface_queue_destroy(tbm_queue);
1701 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1702 return TPL_ERROR_INVALID_OPERATION;
1705 swapchain->tbm_queue = tbm_queue;
1706 swapchain->create_done = TPL_TRUE;
1708 TPL_INFO("[TBM_QUEUE_CREATED]",
1709 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1710 wl_vk_surface, swapchain, tbm_queue);
1712 return TPL_ERROR_NONE;
1716 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1717 tbm_format format, int width,
1718 int height, int buffer_count, int present_mode)
1720 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1721 tpl_wl_vk_display_t *wl_vk_display = NULL;
1722 tpl_wl_vk_swapchain_t *swapchain = NULL;
1724 TPL_ASSERT(surface);
1725 TPL_ASSERT(surface->display);
1727 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1728 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1730 wl_vk_display = (tpl_wl_vk_display_t *)
1731 surface->display->backend.data;
1732 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1734 swapchain = wl_vk_surface->swapchain;
1736 if (swapchain == NULL) {
1738 (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1739 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1740 swapchain->tbm_queue = NULL;
1743 swapchain->properties.buffer_count = buffer_count;
1744 swapchain->properties.width = width;
1745 swapchain->properties.height = height;
1746 swapchain->properties.present_mode = present_mode;
1747 swapchain->wl_vk_surface = wl_vk_surface;
1748 swapchain->properties.format = format;
1749 swapchain->swapchain_buffers = NULL;
1750 swapchain->old_swapchain_buffers = NULL;
1752 swapchain->result = TPL_ERROR_NONE;
1753 swapchain->create_done = TPL_FALSE;
1755 wl_vk_surface->swapchain = swapchain;
1757 __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1759 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1760 /* send swapchain create tbm_queue message */
1761 wl_vk_surface->sent_message = CREATE_QUEUE;
1762 tpl_gsource_send_message(wl_vk_surface->surf_source,
1763 wl_vk_surface->sent_message);
1764 while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE)
1765 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1766 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1768 TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1769 swapchain->tbm_queue != NULL,
1770 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1772 wl_vk_surface->reset = TPL_FALSE;
1774 return TPL_ERROR_NONE;
1778 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1780 TPL_ASSERT(wl_vk_surface);
1782 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1784 TPL_CHECK_ON_NULL_RETURN(swapchain);
1786 if (swapchain->tbm_queue) {
1787 TPL_INFO("[TBM_QUEUE_DESTROY]",
1788 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1789 wl_vk_surface, swapchain, swapchain->tbm_queue);
1790 tbm_surface_queue_destroy(swapchain->tbm_queue);
1791 swapchain->tbm_queue = NULL;
1795 void __untrack_swapchain_buffers(tpl_wl_vk_surface_t *wl_vk_surface, tbm_surface_h *sc_buffers)
1797 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1799 for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1800 if (sc_buffers[i]) {
1801 TPL_INFO("[UNTRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1802 i, wl_vk_surface, swapchain, sc_buffers[i],
1803 _get_tbm_surface_bo_name(sc_buffers[i]));
1804 tbm_surface_internal_unref(sc_buffers[i]);
1805 sc_buffers[i] = NULL;
1811 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1813 tpl_wl_vk_swapchain_t *swapchain = NULL;
1814 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1815 tpl_wl_vk_display_t *wl_vk_display = NULL;
1817 TPL_ASSERT(surface);
1818 TPL_ASSERT(surface->display);
1820 wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1821 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1823 wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1824 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1826 swapchain = wl_vk_surface->swapchain;
1828 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1830 return TPL_ERROR_INVALID_OPERATION;
1833 if (!swapchain->tbm_queue) {
1834 TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.",
1835 wl_vk_surface, wl_vk_surface->swapchain);
1836 return TPL_ERROR_INVALID_OPERATION;
1839 if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1840 TPL_INFO("[DESTROY_SWAPCHAIN]",
1841 "wl_vk_surface(%p) swapchain(%p) still valid.",
1842 wl_vk_surface, swapchain);
1843 if (swapchain->old_swapchain_buffers) {
1844 __untrack_swapchain_buffers(wl_vk_surface, swapchain->old_swapchain_buffers);
1845 free(swapchain->old_swapchain_buffers);
1846 swapchain->old_swapchain_buffers = NULL;
1848 return TPL_ERROR_NONE;
1851 TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1852 "wl_vk_surface(%p) swapchain(%p)",
1853 wl_vk_surface, wl_vk_surface->swapchain);
1855 if (swapchain->swapchain_buffers) {
1856 __untrack_swapchain_buffers(wl_vk_surface, swapchain->swapchain_buffers);
1857 free(swapchain->swapchain_buffers);
1858 swapchain->swapchain_buffers = NULL;
1861 _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1863 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1864 wl_vk_surface->sent_message = DESTROY_QUEUE;
1865 tpl_gsource_send_message(wl_vk_surface->surf_source,
1866 wl_vk_surface->sent_message);
1867 while (swapchain->tbm_queue)
1868 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1869 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1871 _print_buffer_lists(wl_vk_surface);
1874 wl_vk_surface->swapchain = NULL;
1876 return TPL_ERROR_NONE;
1880 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1881 tbm_surface_h **buffers,
1884 TPL_ASSERT(surface);
1885 TPL_ASSERT(surface->backend.data);
1886 TPL_ASSERT(surface->display);
1887 TPL_ASSERT(surface->display->backend.data);
1889 tpl_wl_vk_surface_t *wl_vk_surface =
1890 (tpl_wl_vk_surface_t *)surface->backend.data;
1891 tpl_wl_vk_display_t *wl_vk_display =
1892 (tpl_wl_vk_display_t *)surface->display->backend.data;
1893 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1894 tpl_result_t ret = TPL_ERROR_NONE;
1897 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1898 TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1900 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1903 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1904 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1905 return TPL_ERROR_NONE;
1908 swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1910 sizeof(tbm_surface_h));
1911 if (!swapchain->swapchain_buffers) {
1912 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1914 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1915 return TPL_ERROR_OUT_OF_MEMORY;
1918 ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1919 swapchain->tbm_queue,
1920 swapchain->swapchain_buffers,
1923 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1924 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1925 free(swapchain->swapchain_buffers);
1926 swapchain->swapchain_buffers = NULL;
1927 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1928 return TPL_ERROR_INVALID_OPERATION;
1931 for (i = 0; i < *buffer_count; i++) {
1932 if (swapchain->swapchain_buffers[i]) {
1933 TPL_INFO("[TRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1934 i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i],
1935 _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1936 tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1940 *buffers = swapchain->swapchain_buffers;
1942 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1944 return TPL_ERROR_NONE;
1948 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1950 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1951 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1953 TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1954 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1956 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1957 if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1958 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1959 wl_vk_surface->buffer_cnt--;
1961 wl_vk_buffer->idx = -1;
1963 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1965 wl_display_flush(wl_vk_display->wl_display);
1967 if (wl_vk_buffer->wl_buffer) {
1968 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1969 wl_vk_buffer->wl_buffer);
1970 wl_vk_buffer->wl_buffer = NULL;
1973 #if TIZEN_FEATURE_ENABLE
1974 if (wl_vk_buffer->buffer_release) {
1975 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1976 wl_vk_buffer->buffer_release = NULL;
1980 if (wl_vk_buffer->release_fence_fd != -1) {
1981 close(wl_vk_buffer->release_fence_fd);
1982 wl_vk_buffer->release_fence_fd = -1;
1985 if (wl_vk_buffer->rects) {
1986 free(wl_vk_buffer->rects);
1987 wl_vk_buffer->rects = NULL;
1988 wl_vk_buffer->num_rects = 0;
1991 wl_vk_buffer->tbm_surface = NULL;
1992 wl_vk_buffer->bo_name = -1;
1997 static tpl_wl_vk_buffer_t *
1998 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
2000 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2001 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2002 (void **)&wl_vk_buffer);
2003 return wl_vk_buffer;
2006 static tpl_wl_vk_buffer_t *
2007 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
2008 tbm_surface_h tbm_surface)
2010 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2012 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2014 if (!wl_vk_buffer) {
2015 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
2016 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
2018 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2019 (tbm_data_free)__cb_wl_vk_buffer_free);
2020 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
2023 wl_vk_buffer->wl_buffer = NULL;
2024 wl_vk_buffer->tbm_surface = tbm_surface;
2025 wl_vk_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
2026 wl_vk_buffer->wl_vk_surface = wl_vk_surface;
2028 wl_vk_buffer->status = RELEASED;
2030 wl_vk_buffer->acquire_fence_fd = -1;
2031 wl_vk_buffer->release_fence_fd = -1;
2033 wl_vk_buffer->dx = 0;
2034 wl_vk_buffer->dy = 0;
2035 wl_vk_buffer->width = tbm_surface_get_width(tbm_surface);
2036 wl_vk_buffer->height = tbm_surface_get_height(tbm_surface);
2038 wl_vk_buffer->rects = NULL;
2039 wl_vk_buffer->num_rects = 0;
2041 wl_vk_buffer->need_to_commit = TPL_FALSE;
2042 #if TIZEN_FEATURE_ENABLE
2043 wl_vk_buffer->buffer_release = NULL;
2045 tpl_gmutex_init(&wl_vk_buffer->mutex);
2046 tpl_gcond_init(&wl_vk_buffer->cond);
2048 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2051 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2052 if (wl_vk_surface->buffers[i] == NULL) break;
2054 /* If this exception is reached,
2055 * it may be a critical memory leak problem. */
2056 if (i == BUFFER_ARRAY_SIZE) {
2057 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
2058 int evicted_idx = 0; /* evict the frontmost buffer */
2060 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
2062 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
2064 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2065 evicted_buffer, evicted_buffer->tbm_surface,
2066 status_to_string[evicted_buffer->status]);
2068 /* [TODO] need to think about whether there will be
2069 * better modifications */
2070 wl_vk_surface->buffer_cnt--;
2071 wl_vk_surface->buffers[evicted_idx] = NULL;
2076 wl_vk_surface->buffer_cnt++;
2077 wl_vk_surface->buffers[i] = wl_vk_buffer;
2078 wl_vk_buffer->idx = i;
2080 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2082 TPL_INFO("[WL_VK_BUFFER_CREATE]",
2083 "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2084 wl_vk_surface, wl_vk_buffer, tbm_surface,
2085 wl_vk_buffer->bo_name);
2088 return wl_vk_buffer;
2091 static tbm_surface_h
2092 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
2093 uint64_t timeout_ns,
2094 int32_t *release_fence)
2096 TPL_ASSERT(surface);
2097 TPL_ASSERT(surface->backend.data);
2098 TPL_ASSERT(surface->display);
2099 TPL_ASSERT(surface->display->backend.data);
2100 TPL_OBJECT_CHECK_RETURN(surface, NULL);
2102 tpl_wl_vk_surface_t *wl_vk_surface =
2103 (tpl_wl_vk_surface_t *)surface->backend.data;
2104 tpl_wl_vk_display_t *wl_vk_display =
2105 (tpl_wl_vk_display_t *)surface->display->backend.data;
2106 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2107 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2109 tbm_surface_h tbm_surface = NULL;
2110 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2112 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
2113 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
2115 TPL_OBJECT_UNLOCK(surface);
2116 TRACE_BEGIN("WAIT_DEQUEUEABLE");
2117 if (timeout_ns != UINT64_MAX) {
2118 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2119 swapchain->tbm_queue, timeout_ns/1000);
2121 tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
2124 TPL_OBJECT_LOCK(surface);
2126 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2127 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
2130 } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2131 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
2132 wl_vk_surface, swapchain->tbm_queue, tsq_err);
2136 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
2138 if (wl_vk_surface->reset) {
2139 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
2140 swapchain, swapchain->tbm_queue);
2141 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2145 tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
2148 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
2149 swapchain->tbm_queue, wl_vk_surface, tsq_err);
2150 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2154 tbm_surface_internal_ref(tbm_surface);
2156 wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
2157 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
2159 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2160 wl_vk_buffer->status = DEQUEUED;
2162 if (release_fence) {
2163 #if TIZEN_FEATURE_ENABLE
2164 if (wl_vk_surface->surface_sync) {
2165 *release_fence = wl_vk_buffer->release_fence_fd;
2166 TPL_LOG_D("[EXPLICIT_FENCE]", "wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
2167 wl_vk_surface, wl_vk_buffer, *release_fence);
2168 wl_vk_buffer->release_fence_fd = -1;
2172 *release_fence = -1;
2176 wl_vk_surface->reset = TPL_FALSE;
2178 TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2179 wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
2180 release_fence ? *release_fence : -1);
2182 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2183 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2189 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
2190 tbm_surface_h tbm_surface)
2192 TPL_ASSERT(surface);
2193 TPL_ASSERT(surface->backend.data);
2195 tpl_wl_vk_surface_t *wl_vk_surface =
2196 (tpl_wl_vk_surface_t *)surface->backend.data;
2197 tpl_wl_vk_swapchain_t *swapchain = NULL;
2198 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2199 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2201 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2202 TPL_ERROR_INVALID_PARAMETER);
2204 swapchain = wl_vk_surface->swapchain;
2205 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2206 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2207 TPL_ERROR_INVALID_PARAMETER);
2209 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2211 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2212 wl_vk_buffer->status = RELEASED;
2213 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2216 tbm_surface_internal_unref(tbm_surface);
2218 TPL_INFO("[CANCEL BUFFER]",
2219 "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2220 wl_vk_surface, swapchain, tbm_surface,
2221 _get_tbm_surface_bo_name(tbm_surface));
2223 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2225 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2226 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2227 return TPL_ERROR_INVALID_OPERATION;
2230 return TPL_ERROR_NONE;
2234 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2235 tbm_surface_h tbm_surface,
2236 int num_rects, const int *rects,
2237 int32_t acquire_fence)
2239 TPL_ASSERT(surface);
2240 TPL_ASSERT(surface->display);
2241 TPL_ASSERT(surface->backend.data);
2242 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2244 tpl_wl_vk_surface_t *wl_vk_surface =
2245 (tpl_wl_vk_surface_t *) surface->backend.data;
2246 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2247 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2248 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2251 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2252 TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2253 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2254 TPL_ERROR_INVALID_PARAMETER);
2256 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2257 if (!wl_vk_buffer) {
2258 TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
2259 return TPL_ERROR_INVALID_PARAMETER;
2262 bo_name = wl_vk_buffer->bo_name;
2264 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2266 /* If there are received region information, save it to wl_vk_buffer */
2267 if (num_rects && rects) {
2268 if (wl_vk_buffer->rects != NULL) {
2269 free(wl_vk_buffer->rects);
2270 wl_vk_buffer->rects = NULL;
2271 wl_vk_buffer->num_rects = 0;
2274 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2275 wl_vk_buffer->num_rects = num_rects;
2277 if (wl_vk_buffer->rects) {
2278 memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2279 sizeof(int) * 4 * num_rects);
2281 TPL_ERR("Failed to allocate memory for rects info.");
2285 if (wl_vk_buffer->acquire_fence_fd != -1)
2286 close(wl_vk_buffer->acquire_fence_fd);
2288 wl_vk_buffer->acquire_fence_fd = acquire_fence;
2290 wl_vk_buffer->status = ENQUEUED;
2292 "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2293 wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2295 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2297 tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2299 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2300 tbm_surface_internal_unref(tbm_surface);
2301 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2302 tbm_surface, wl_vk_surface, tsq_err);
2303 return TPL_ERROR_INVALID_OPERATION;
2306 tbm_surface_internal_unref(tbm_surface);
2308 return TPL_ERROR_NONE;
2311 static const struct wl_buffer_listener wl_buffer_release_listener = {
2312 (void *)__cb_wl_buffer_release,
2316 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2318 tbm_surface_h tbm_surface = NULL;
2319 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2320 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2321 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2322 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2323 tpl_bool_t ready_to_commit = TPL_TRUE;
2325 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2327 while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2328 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2330 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2331 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2332 swapchain->tbm_queue);
2333 return TPL_ERROR_INVALID_OPERATION;
2336 tbm_surface_internal_ref(tbm_surface);
2338 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2339 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2340 "wl_vk_buffer sould be not NULL");
2342 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2344 wl_vk_buffer->status = ACQUIRED;
2346 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2347 wl_vk_buffer, tbm_surface,
2348 _get_tbm_surface_bo_name(tbm_surface));
2350 if (wl_vk_buffer->wl_buffer == NULL) {
2351 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2352 wl_vk_display->wl_tbm_client, tbm_surface);
2354 if (!wl_vk_buffer->wl_buffer) {
2355 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2356 wl_vk_display->wl_tbm_client, tbm_surface);
2358 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2359 wl_vk_display->use_explicit_sync == TPL_FALSE) {
2360 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2361 &wl_buffer_release_listener, wl_vk_buffer);
2365 "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2366 wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2370 if (!wl_vk_surface->vblank_enable || wl_vk_surface->vblank_done)
2371 ready_to_commit = TPL_TRUE;
2373 wl_vk_buffer->status = WAITING_VBLANK;
2374 __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2375 ready_to_commit = TPL_FALSE;
2378 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2380 if (ready_to_commit)
2381 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2384 return TPL_ERROR_NONE;
2387 #if TIZEN_FEATURE_ENABLE
2389 __cb_buffer_fenced_release(void *data,
2390 struct zwp_linux_buffer_release_v1 *release,
2393 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2394 tbm_surface_h tbm_surface = NULL;
2396 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2398 tbm_surface = wl_vk_buffer->tbm_surface;
2400 if (tbm_surface_internal_is_valid(tbm_surface)) {
2401 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2402 tpl_wl_vk_swapchain_t *swapchain = NULL;
2404 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2405 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2406 tbm_surface_internal_unref(tbm_surface);
2410 swapchain = wl_vk_surface->swapchain;
2412 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2413 if (wl_vk_buffer->status == COMMITTED) {
2414 tbm_surface_queue_error_e tsq_err;
2416 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2417 wl_vk_buffer->buffer_release = NULL;
2419 wl_vk_buffer->release_fence_fd = fence;
2420 wl_vk_buffer->status = RELEASED;
2422 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2423 wl_vk_buffer->bo_name,
2425 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2426 wl_vk_buffer->bo_name);
2429 "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2430 wl_vk_buffer, tbm_surface,
2431 wl_vk_buffer->bo_name,
2434 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2436 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2437 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2439 tbm_surface_internal_unref(tbm_surface);
2442 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2445 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2450 __cb_buffer_immediate_release(void *data,
2451 struct zwp_linux_buffer_release_v1 *release)
2453 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2454 tbm_surface_h tbm_surface = NULL;
2456 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2458 tbm_surface = wl_vk_buffer->tbm_surface;
2460 if (tbm_surface_internal_is_valid(tbm_surface)) {
2461 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2462 tpl_wl_vk_swapchain_t *swapchain = NULL;
2464 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2465 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2466 tbm_surface_internal_unref(tbm_surface);
2470 swapchain = wl_vk_surface->swapchain;
2472 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2473 if (wl_vk_buffer->status == COMMITTED) {
2474 tbm_surface_queue_error_e tsq_err;
2476 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2477 wl_vk_buffer->buffer_release = NULL;
2479 wl_vk_buffer->release_fence_fd = -1;
2480 wl_vk_buffer->status = RELEASED;
2482 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2483 _get_tbm_surface_bo_name(tbm_surface));
2484 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2485 _get_tbm_surface_bo_name(tbm_surface));
2488 "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2489 wl_vk_buffer, tbm_surface,
2490 _get_tbm_surface_bo_name(tbm_surface));
2492 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2494 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2495 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2497 tbm_surface_internal_unref(tbm_surface);
2500 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2503 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2507 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2508 __cb_buffer_fenced_release,
2509 __cb_buffer_immediate_release,
2514 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2516 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2517 tbm_surface_h tbm_surface = NULL;
2519 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2521 tbm_surface = wl_vk_buffer->tbm_surface;
2523 if (tbm_surface_internal_is_valid(tbm_surface)) {
2524 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2525 tpl_wl_vk_swapchain_t *swapchain = NULL;
2526 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2528 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2529 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2530 tbm_surface_internal_unref(tbm_surface);
2534 swapchain = wl_vk_surface->swapchain;
2536 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2538 if (wl_vk_buffer->status == COMMITTED) {
2540 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2542 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2543 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2545 wl_vk_buffer->status = RELEASED;
2547 TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2548 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2549 wl_vk_buffer->bo_name);
2551 TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2552 wl_vk_buffer->wl_buffer, tbm_surface,
2553 wl_vk_buffer->bo_name);
2555 tbm_surface_internal_unref(tbm_surface);
2558 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2560 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2565 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2566 unsigned int sequence, unsigned int tv_sec,
2567 unsigned int tv_usec, void *user_data)
2569 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2571 TRACE_ASYNC_END((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2572 TPL_LOG_D("[VBLANK_DONE]", "wl_vk_surface(%p)", wl_vk_surface);
2574 if (error == TDM_ERROR_TIMEOUT)
2575 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2578 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2579 wl_vk_surface->vblank_done = TPL_TRUE;
2581 if (wl_vk_surface->vblank && wl_vk_surface->vblank_waiting_buffers) {
2582 tpl_bool_t is_empty = TPL_TRUE;
2584 tpl_wl_vk_buffer_t* wl_vk_buffer =(tpl_wl_vk_buffer_t *)
2585 __tpl_list_pop_front(wl_vk_surface->vblank_waiting_buffers, NULL);
2586 is_empty = __tpl_list_is_empty(wl_vk_surface->vblank_waiting_buffers);
2588 if (!wl_vk_buffer) break;
2590 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2592 /* If tdm error such as TIMEOUT occured,
2593 * flush all vblank waiting buffers of its wl_vk_surface.
2594 * Otherwise, only one wl_vk_buffer will be commited per one vblank event.
2596 if (error == TDM_ERROR_NONE && wl_vk_surface->post_interval > 0)
2598 } while (!is_empty);
2600 wl_vk_surface->vblank_enable = (wl_vk_surface->post_interval > 0);
2602 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2606 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2608 tdm_error tdm_err = TDM_ERROR_NONE;
2609 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2611 if (wl_vk_surface->vblank == NULL) {
2612 wl_vk_surface->vblank =
2613 _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client);
2614 if (!wl_vk_surface->vblank) {
2615 TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2617 return TPL_ERROR_OUT_OF_MEMORY;
2619 wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
2620 if (!wl_vk_surface->vblank_waiting_buffers) {
2621 tdm_client_vblank_destroy(wl_vk_surface->vblank);
2622 wl_vk_surface->vblank = NULL;
2627 tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2628 wl_vk_surface->post_interval,
2629 __cb_tdm_client_vblank,
2630 (void *)wl_vk_surface);
2632 if (tdm_err == TDM_ERROR_NONE) {
2633 wl_vk_surface->vblank_done = TPL_FALSE;
2634 TRACE_ASYNC_BEGIN((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2636 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2637 return TPL_ERROR_INVALID_OPERATION;
2640 return TPL_ERROR_NONE;
2644 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2645 tpl_wl_vk_buffer_t *wl_vk_buffer)
2647 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2648 struct wl_surface *wl_surface = wl_vk_surface->wl_surface;
2651 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2652 "wl_vk_buffer sould be not NULL");
2654 if (wl_vk_buffer->wl_buffer == NULL) {
2655 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2656 wl_vk_display->wl_tbm_client,
2657 wl_vk_buffer->tbm_surface);
2658 if (wl_vk_buffer->wl_buffer &&
2659 (wl_vk_buffer->acquire_fence_fd == -1 ||
2660 wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2661 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2662 &wl_buffer_release_listener, wl_vk_buffer);
2665 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2666 "[FATAL] Failed to create wl_buffer");
2668 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2670 wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2671 wl_vk_buffer->dx, wl_vk_buffer->dy);
2673 if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2675 wl_surface_damage(wl_surface,
2676 wl_vk_buffer->dx, wl_vk_buffer->dy,
2677 wl_vk_buffer->width, wl_vk_buffer->height);
2679 wl_surface_damage_buffer(wl_surface,
2681 wl_vk_buffer->width, wl_vk_buffer->height);
2685 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2687 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2688 wl_vk_buffer->rects[i * 4 + 3]);
2690 wl_surface_damage(wl_surface,
2691 wl_vk_buffer->rects[i * 4 + 0],
2693 wl_vk_buffer->rects[i * 4 + 2],
2694 wl_vk_buffer->rects[i * 4 + 3]);
2696 wl_surface_damage_buffer(wl_surface,
2697 wl_vk_buffer->rects[i * 4 + 0],
2699 wl_vk_buffer->rects[i * 4 + 2],
2700 wl_vk_buffer->rects[i * 4 + 3]);
2705 #if TIZEN_FEATURE_ENABLE
2706 if (wl_vk_display->use_explicit_sync &&
2707 wl_vk_surface->surface_sync &&
2708 wl_vk_buffer->acquire_fence_fd != -1) {
2710 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2711 wl_vk_buffer->acquire_fence_fd);
2712 TPL_LOG_D("[SET_ACQUIRE_FENCE][1/2]", "wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2713 wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2714 close(wl_vk_buffer->acquire_fence_fd);
2715 wl_vk_buffer->acquire_fence_fd = -1;
2717 wl_vk_buffer->buffer_release =
2718 zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2719 if (!wl_vk_buffer->buffer_release) {
2720 TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2722 zwp_linux_buffer_release_v1_add_listener(
2723 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2724 TPL_LOG_D("[SET_ACQUIRE_FENCE][2/2]", "add explicit_sync_release_listener.");
2729 wl_surface_commit(wl_surface);
2731 wl_display_flush(wl_vk_display->wl_display);
2733 TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2734 wl_vk_buffer->bo_name);
2736 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2738 wl_vk_buffer->need_to_commit = TPL_FALSE;
2739 wl_vk_buffer->status = COMMITTED;
2741 tpl_gcond_signal(&wl_vk_buffer->cond);
2743 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2746 "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2747 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2748 wl_vk_buffer->bo_name);
2750 if (wl_vk_surface->post_interval > 0 && wl_vk_surface->vblank != NULL) {
2751 wl_vk_surface->vblank_enable = TPL_TRUE;
2752 if (_thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2753 TPL_ERR("Failed to set wait vblank.");
2758 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2760 if (!native_dpy) return TPL_FALSE;
2762 if (_check_native_handle_is_wl_display(native_dpy))
2769 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2771 TPL_ASSERT(backend);
2773 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2774 backend->data = NULL;
2776 backend->init = __tpl_wl_vk_display_init;
2777 backend->fini = __tpl_wl_vk_display_fini;
2778 backend->query_config = __tpl_wl_vk_display_query_config;
2779 backend->filter_config = __tpl_wl_vk_display_filter_config;
2780 backend->query_window_supported_buffer_count =
2781 __tpl_wl_vk_display_query_window_supported_buffer_count;
2782 backend->query_window_supported_present_modes =
2783 __tpl_wl_vk_display_query_window_supported_present_modes;
2787 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2789 TPL_ASSERT(backend);
2791 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2792 backend->data = NULL;
2794 backend->init = __tpl_wl_vk_surface_init;
2795 backend->fini = __tpl_wl_vk_surface_fini;
2796 backend->validate = __tpl_wl_vk_surface_validate;
2797 backend->cancel_dequeued_buffer =
2798 __tpl_wl_vk_surface_cancel_buffer;
2799 backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2800 backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2801 backend->get_swapchain_buffers =
2802 __tpl_wl_vk_surface_get_swapchain_buffers;
2803 backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2804 backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2805 backend->set_post_interval =
2806 __tpl_wl_vk_surface_set_post_interval;
2810 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2812 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2816 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2820 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2821 TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2822 wl_vk_surface, wl_vk_surface->buffer_cnt);
2823 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2824 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2827 "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2828 idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2829 wl_vk_buffer->bo_name,
2830 status_to_string[wl_vk_buffer->status]);
2833 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);