1 #define inline __inline__
4 #include "tpl_internal.h"
9 #include <sys/eventfd.h>
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
20 #include <tdm_client.h>
22 #ifndef TIZEN_FEATURE_ENABLE
23 #define TIZEN_FEATURE_ENABLE 1
26 #if TIZEN_FEATURE_ENABLE
27 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
30 #include "tpl_utils_gthread.h"
32 #define BUFFER_ARRAY_SIZE 10
33 #define VK_CLIENT_QUEUE_SIZE 3
35 static int wl_vk_buffer_key;
36 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
38 typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t;
39 typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t;
40 typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t;
41 typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t;
43 struct _tpl_wl_vk_display {
44 tpl_gsource *disp_source;
46 tpl_gmutex wl_event_mutex;
48 struct wl_display *wl_display;
49 struct wl_event_queue *ev_queue;
50 struct wayland_tbm_client *wl_tbm_client;
51 int last_error; /* errno of the last wl_display error*/
53 tpl_bool_t wl_initialized;
56 tdm_client *tdm_client;
57 tpl_gsource *tdm_source;
59 tpl_bool_t tdm_initialized;
60 /* To make sure that tpl_gsource has been successfully finalized. */
61 tpl_bool_t gsource_finalized;
66 tpl_bool_t use_wait_vblank;
67 tpl_bool_t use_explicit_sync;
70 /* To make sure that tpl_gsource has been successfully finalized. */
71 tpl_bool_t gsource_finalized;
72 tpl_gmutex disp_mutex;
75 /* device surface capabilities */
79 #if TIZEN_FEATURE_ENABLE
80 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
84 struct _tpl_wl_vk_swapchain {
85 tpl_wl_vk_surface_t *wl_vk_surface;
87 tbm_surface_queue_h tbm_queue;
90 tpl_bool_t create_done;
100 tbm_surface_h *swapchain_buffers;
102 tpl_util_atomic_uint ref_cnt;
105 typedef enum surf_message {
113 struct _tpl_wl_vk_surface {
114 tpl_gsource *surf_source;
116 tpl_wl_vk_swapchain_t *swapchain;
118 struct wl_surface *wl_surface;
119 #if TIZEN_FEATURE_ENABLE
120 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
122 tdm_client_vblank *vblank;
124 /* surface information */
127 tpl_wl_vk_display_t *wl_vk_display;
128 tpl_surface_t *tpl_surface;
130 /* wl_vk_buffer array for buffer tracing */
131 tpl_wl_vk_buffer_t *buffers[BUFFER_ARRAY_SIZE];
132 int buffer_cnt; /* the number of using wl_vk_buffers */
133 tpl_gmutex buffers_mutex;
135 tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
137 tpl_gmutex surf_mutex;
140 /* for waiting draw done */
141 tpl_bool_t is_activated;
142 tpl_bool_t reset; /* TRUE if queue reseted by external */
143 tpl_bool_t vblank_done;
144 tpl_bool_t initialized_in_thread;
146 /* To make sure that tpl_gsource has been successfully finalized. */
147 tpl_bool_t gsource_finalized;
149 surf_message sent_message;
154 typedef enum buffer_status {
159 WAITING_SIGNALED, // 4
164 static const char *status_to_string[7] = {
169 "WAITING_SIGNALED", // 4
170 "WAITING_VBLANK", // 5
174 struct _tpl_wl_vk_buffer {
175 tbm_surface_h tbm_surface;
178 struct wl_buffer *wl_buffer;
179 int dx, dy; /* position to attach to wl_surface */
180 int width, height; /* size to attach to wl_surface */
182 buffer_status_t status; /* for tracing buffer status */
183 int idx; /* position index in buffers array of wl_vk_surface */
185 /* for damage region */
189 /* for checking need_to_commit (frontbuffer mode) */
190 tpl_bool_t need_to_commit;
192 #if TIZEN_FEATURE_ENABLE
193 /* to get release event via zwp_linux_buffer_release_v1 */
194 struct zwp_linux_buffer_release_v1 *buffer_release;
197 /* each buffers own its release_fence_fd, until it passes ownership
199 int32_t release_fence_fd;
201 /* each buffers own its acquire_fence_fd.
202 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
203 * will be passed to display server
204 * Otherwise it will be used as a fence waiting for render done
206 int32_t acquire_fence_fd;
211 tpl_wl_vk_surface_t *wl_vk_surface;
215 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
217 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
219 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
221 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
223 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
225 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
227 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
229 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
230 tpl_wl_vk_buffer_t *wl_vk_buffer);
233 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
235 struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
237 if (!wl_vk_native_dpy) {
238 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
242 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
243 is a memory address pointing the structure of wl_display_interface. */
244 if (wl_vk_native_dpy == &wl_display_interface)
247 if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
248 strlen(wl_display_interface.name)) == 0) {
256 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
258 tpl_wl_vk_display_t *wl_vk_display = NULL;
259 tdm_error tdm_err = TDM_ERROR_NONE;
263 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
264 if (!wl_vk_display) {
265 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
266 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
270 tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client);
272 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
273 * When tdm_source is no longer available due to an unexpected situation,
274 * wl_vk_thread must remove it from the thread and destroy it.
275 * In that case, tdm_vblank can no longer be used for surfaces and displays
276 * that used this tdm_source. */
277 if (tdm_err != TDM_ERROR_NONE) {
278 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
280 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
282 tpl_gsource_destroy(gsource, TPL_FALSE);
284 wl_vk_display->tdm.tdm_source = NULL;
293 __thread_func_tdm_finalize(tpl_gsource *gsource)
295 tpl_wl_vk_display_t *wl_vk_display = NULL;
297 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
299 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
301 TPL_INFO("[TDM_CLIENT_FINI]",
302 "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
303 wl_vk_display, wl_vk_display->tdm.tdm_client,
304 wl_vk_display->tdm.tdm_display_fd);
306 if (wl_vk_display->tdm.tdm_client) {
307 tdm_client_destroy(wl_vk_display->tdm.tdm_client);
308 wl_vk_display->tdm.tdm_client = NULL;
309 wl_vk_display->tdm.tdm_display_fd = -1;
312 wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
313 wl_vk_display->tdm.gsource_finalized = TPL_TRUE;
315 tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond);
316 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
319 static tpl_gsource_functions tdm_funcs = {
322 .dispatch = __thread_func_tdm_dispatch,
323 .finalize = __thread_func_tdm_finalize,
327 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
329 tdm_client *tdm_client = NULL;
330 int tdm_display_fd = -1;
331 tdm_error tdm_err = TDM_ERROR_NONE;
333 tdm_client = tdm_client_create(&tdm_err);
334 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
335 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
336 return TPL_ERROR_INVALID_OPERATION;
339 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
340 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
341 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
342 tdm_client_destroy(tdm_client);
343 return TPL_ERROR_INVALID_OPERATION;
346 wl_vk_display->tdm.tdm_display_fd = tdm_display_fd;
347 wl_vk_display->tdm.tdm_client = tdm_client;
348 wl_vk_display->tdm.tdm_source = NULL;
349 wl_vk_display->tdm.tdm_initialized = TPL_TRUE;
351 TPL_INFO("[TDM_CLIENT_INIT]",
352 "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
353 wl_vk_display, tdm_client, tdm_display_fd);
355 return TPL_ERROR_NONE;
359 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
360 uint32_t name, const char *interface,
363 #if TIZEN_FEATURE_ENABLE
364 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
366 if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
367 char *env = tpl_getenv("TPL_EFS");
368 if (env && !atoi(env)) {
369 wl_vk_display->use_explicit_sync = TPL_FALSE;
371 wl_vk_display->explicit_sync =
372 wl_registry_bind(wl_registry, name,
373 &zwp_linux_explicit_synchronization_v1_interface, 1);
374 wl_vk_display->use_explicit_sync = TPL_TRUE;
375 TPL_LOG_D("[REGISTRY_BIND]",
376 "wl_vk_display(%p) bind zwp_linux_explicit_synchronization_v1_interface",
384 __cb_wl_resistry_global_remove_callback(void *data,
385 struct wl_registry *wl_registry,
390 static const struct wl_registry_listener registry_listener = {
391 __cb_wl_resistry_global_callback,
392 __cb_wl_resistry_global_remove_callback
396 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
397 const char *func_name)
401 strerror_r(errno, buf, sizeof(buf));
403 if (wl_vk_display->last_error == errno)
406 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
408 dpy_err = wl_display_get_error(wl_vk_display->wl_display);
409 if (dpy_err == EPROTO) {
410 const struct wl_interface *err_interface;
411 uint32_t err_proxy_id, err_code;
412 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
415 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
416 err_interface->name, err_code, err_proxy_id);
419 wl_vk_display->last_error = errno;
423 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
425 struct wl_registry *registry = NULL;
426 struct wl_event_queue *queue = NULL;
427 struct wl_display *display_wrapper = NULL;
428 struct wl_proxy *wl_tbm = NULL;
429 struct wayland_tbm_client *wl_tbm_client = NULL;
431 tpl_result_t result = TPL_ERROR_NONE;
433 queue = wl_display_create_queue(wl_vk_display->wl_display);
435 TPL_ERR("Failed to create wl_queue wl_display(%p)",
436 wl_vk_display->wl_display);
437 result = TPL_ERROR_INVALID_OPERATION;
441 wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
442 if (!wl_vk_display->ev_queue) {
443 TPL_ERR("Failed to create wl_queue wl_display(%p)",
444 wl_vk_display->wl_display);
445 result = TPL_ERROR_INVALID_OPERATION;
449 display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
450 if (!display_wrapper) {
451 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
452 wl_vk_display->wl_display);
453 result = TPL_ERROR_INVALID_OPERATION;
457 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
459 registry = wl_display_get_registry(display_wrapper);
461 TPL_ERR("Failed to create wl_registry");
462 result = TPL_ERROR_INVALID_OPERATION;
466 wl_proxy_wrapper_destroy(display_wrapper);
467 display_wrapper = NULL;
469 wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
470 if (!wl_tbm_client) {
471 TPL_ERR("Failed to initialize wl_tbm_client.");
472 result = TPL_ERROR_INVALID_CONNECTION;
476 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
478 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
479 result = TPL_ERROR_INVALID_CONNECTION;
483 wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
484 wl_vk_display->wl_tbm_client = wl_tbm_client;
486 if (wl_registry_add_listener(registry, ®istry_listener,
488 TPL_ERR("Failed to wl_registry_add_listener");
489 result = TPL_ERROR_INVALID_OPERATION;
493 ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
495 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
496 result = TPL_ERROR_INVALID_OPERATION;
500 #if TIZEN_FEATURE_ENABLE
501 if (wl_vk_display->explicit_sync) {
502 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
503 wl_vk_display->ev_queue);
504 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
505 wl_vk_display->explicit_sync);
509 wl_vk_display->wl_initialized = TPL_TRUE;
511 TPL_INFO("[WAYLAND_INIT]",
512 "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
513 wl_vk_display, wl_vk_display->wl_display,
514 wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
515 #if TIZEN_FEATURE_ENABLE
516 TPL_INFO("[WAYLAND_INIT]",
518 wl_vk_display->explicit_sync);
522 wl_proxy_wrapper_destroy(display_wrapper);
524 wl_registry_destroy(registry);
526 wl_event_queue_destroy(queue);
532 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
534 /* If wl_vk_display is in prepared state, cancel it */
535 if (wl_vk_display->prepared) {
536 wl_display_cancel_read(wl_vk_display->wl_display);
537 wl_vk_display->prepared = TPL_FALSE;
540 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
541 wl_vk_display->ev_queue) == -1) {
542 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
545 #if TIZEN_FEATURE_ENABLE
546 if (wl_vk_display->explicit_sync) {
547 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
548 "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
549 wl_vk_display, wl_vk_display->explicit_sync);
550 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
551 wl_vk_display->explicit_sync = NULL;
555 if (wl_vk_display->wl_tbm_client) {
556 struct wl_proxy *wl_tbm = NULL;
558 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
559 wl_vk_display->wl_tbm_client);
561 wl_proxy_set_queue(wl_tbm, NULL);
564 TPL_INFO("[WL_TBM_DEINIT]",
565 "wl_vk_display(%p) wl_tbm_client(%p)",
566 wl_vk_display, wl_vk_display->wl_tbm_client);
567 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
568 wl_vk_display->wl_tbm_client = NULL;
571 wl_event_queue_destroy(wl_vk_display->ev_queue);
573 wl_vk_display->wl_initialized = TPL_FALSE;
575 TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
576 wl_vk_display, wl_vk_display->wl_display);
580 _thread_init(void *data)
582 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
584 if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
585 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
586 wl_vk_display, wl_vk_display->wl_display);
589 if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
590 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
593 return wl_vk_display;
597 __thread_func_disp_prepare(tpl_gsource *gsource)
599 tpl_wl_vk_display_t *wl_vk_display =
600 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
602 /* If this wl_vk_display is already prepared,
603 * do nothing in this function. */
604 if (wl_vk_display->prepared)
607 /* If there is a last_error, there is no need to poll,
608 * so skip directly to dispatch.
609 * prepare -> dispatch */
610 if (wl_vk_display->last_error)
613 while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
614 wl_vk_display->ev_queue) != 0) {
615 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
616 wl_vk_display->ev_queue) == -1) {
617 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
621 wl_vk_display->prepared = TPL_TRUE;
623 wl_display_flush(wl_vk_display->wl_display);
629 __thread_func_disp_check(tpl_gsource *gsource)
631 tpl_wl_vk_display_t *wl_vk_display =
632 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
633 tpl_bool_t ret = TPL_FALSE;
635 if (!wl_vk_display->prepared)
638 /* If prepared, but last_error is set,
639 * cancel_read is executed and FALSE is returned.
640 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
641 * and skipping disp_check from prepare to disp_dispatch.
642 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
643 if (wl_vk_display->prepared && wl_vk_display->last_error) {
644 wl_display_cancel_read(wl_vk_display->wl_display);
648 if (tpl_gsource_check_io_condition(gsource)) {
649 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
650 _wl_display_print_err(wl_vk_display, "read_event");
653 wl_display_cancel_read(wl_vk_display->wl_display);
657 wl_vk_display->prepared = TPL_FALSE;
663 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
665 tpl_wl_vk_display_t *wl_vk_display =
666 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
670 /* If there is last_error, SOURCE_REMOVE should be returned
671 * to remove the gsource from the main loop.
672 * This is because wl_vk_display is not valid since last_error was set.*/
673 if (wl_vk_display->last_error) {
677 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
678 if (tpl_gsource_check_io_condition(gsource)) {
679 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
680 wl_vk_display->ev_queue) == -1) {
681 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
685 wl_display_flush(wl_vk_display->wl_display);
686 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
692 __thread_func_disp_finalize(tpl_gsource *gsource)
694 tpl_wl_vk_display_t *wl_vk_display =
695 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
697 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
698 TPL_LOG_D("[D_FINALIZE]", "wl_vk_display(%p) tpl_gsource(%p)",
699 wl_vk_display, gsource);
701 if (wl_vk_display->wl_initialized)
702 _thread_wl_display_fini(wl_vk_display);
704 wl_vk_display->gsource_finalized = TPL_TRUE;
706 tpl_gcond_signal(&wl_vk_display->disp_cond);
707 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
713 static tpl_gsource_functions disp_funcs = {
714 .prepare = __thread_func_disp_prepare,
715 .check = __thread_func_disp_check,
716 .dispatch = __thread_func_disp_dispatch,
717 .finalize = __thread_func_disp_finalize,
721 __tpl_wl_vk_display_init(tpl_display_t *display)
725 tpl_wl_vk_display_t *wl_vk_display = NULL;
727 /* Do not allow default display in wayland */
728 if (!display->native_handle) {
729 TPL_ERR("Invalid native handle for display.");
730 return TPL_ERROR_INVALID_PARAMETER;
733 if (!_check_native_handle_is_wl_display(display->native_handle)) {
734 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
735 return TPL_ERROR_INVALID_PARAMETER;
738 wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
739 sizeof(tpl_wl_vk_display_t));
740 if (!wl_vk_display) {
741 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
742 return TPL_ERROR_OUT_OF_MEMORY;
745 display->backend.data = wl_vk_display;
746 display->bufmgr_fd = -1;
748 wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
749 wl_vk_display->wl_initialized = TPL_FALSE;
751 wl_vk_display->ev_queue = NULL;
752 wl_vk_display->wl_display = (struct wl_display *)display->native_handle;
753 wl_vk_display->last_error = 0;
754 wl_vk_display->use_explicit_sync = TPL_FALSE; // default disabled
755 wl_vk_display->prepared = TPL_FALSE;
757 /* Wayland Interfaces */
758 #if TIZEN_FEATURE_ENABLE
759 wl_vk_display->explicit_sync = NULL;
761 wl_vk_display->wl_tbm_client = NULL;
763 /* Vulkan specific surface capabilities */
764 wl_vk_display->min_buffer = 2;
765 wl_vk_display->max_buffer = VK_CLIENT_QUEUE_SIZE;
766 wl_vk_display->present_modes = TPL_DISPLAY_PRESENT_MODE_FIFO;
768 wl_vk_display->use_wait_vblank = TPL_TRUE; // default enabled
770 char *env = tpl_getenv("TPL_WAIT_VBLANK");
771 if (env && !atoi(env)) {
772 wl_vk_display->use_wait_vblank = TPL_FALSE;
776 tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
778 tpl_gmutex_init(&wl_vk_display->disp_mutex);
779 tpl_gcond_init(&wl_vk_display->disp_cond);
782 wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
783 (tpl_gthread_func)_thread_init,
784 (void *)wl_vk_display);
785 if (!wl_vk_display->thread) {
786 TPL_ERR("Failed to create wl_vk_thread");
790 wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
791 (void *)wl_vk_display,
792 wl_display_get_fd(wl_vk_display->wl_display),
793 &disp_funcs, SOURCE_TYPE_NORMAL);
794 if (!wl_vk_display->disp_source) {
795 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
796 display->native_handle,
797 wl_vk_display->thread);
801 tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex);
802 tpl_gcond_init(&wl_vk_display->tdm.tdm_cond);
804 wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread,
805 (void *)wl_vk_display,
806 wl_vk_display->tdm.tdm_display_fd,
807 &tdm_funcs, SOURCE_TYPE_NORMAL);
808 if (!wl_vk_display->tdm.tdm_source) {
809 TPL_ERR("Failed to create tdm_gsource\n");
813 TPL_INFO("[DISPLAY_INIT]",
814 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
816 wl_vk_display->thread,
817 wl_vk_display->wl_display);
819 TPL_INFO("[DISPLAY_INIT]",
820 "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
821 wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
822 wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
824 return TPL_ERROR_NONE;
827 if (wl_vk_display->tdm.tdm_source) {
828 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
829 while (!wl_vk_display->tdm.gsource_finalized) {
830 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
831 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
833 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
836 if (wl_vk_display->disp_source) {
837 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
838 while (!wl_vk_display->gsource_finalized) {
839 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
840 tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
842 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
845 if (wl_vk_display->thread) {
846 tpl_gthread_destroy(wl_vk_display->thread);
849 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
850 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
851 tpl_gcond_clear(&wl_vk_display->disp_cond);
852 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
854 wl_vk_display->thread = NULL;
857 display->backend.data = NULL;
858 return TPL_ERROR_INVALID_OPERATION;
862 __tpl_wl_vk_display_fini(tpl_display_t *display)
864 tpl_wl_vk_display_t *wl_vk_display;
868 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
870 TPL_INFO("[DISPLAY_FINI]",
871 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
873 wl_vk_display->thread,
874 wl_vk_display->wl_display);
876 if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) {
877 /* This is a protection to prevent problems that arise in unexpected situations
878 * that g_cond_wait cannot work normally.
879 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
880 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
882 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
883 while (!wl_vk_display->tdm.gsource_finalized) {
884 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
885 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
887 wl_vk_display->tdm.tdm_source = NULL;
888 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
891 /* This is a protection to prevent problems that arise in unexpected situations
892 * that g_cond_wait cannot work normally.
893 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
894 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
896 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
897 while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) {
898 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
899 tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
901 wl_vk_display->disp_source = NULL;
902 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
904 if (wl_vk_display->thread) {
905 tpl_gthread_destroy(wl_vk_display->thread);
906 wl_vk_display->thread = NULL;
909 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
910 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
911 tpl_gcond_clear(&wl_vk_display->disp_cond);
912 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
914 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
919 display->backend.data = NULL;
923 __tpl_wl_vk_display_query_config(tpl_display_t *display,
924 tpl_surface_type_t surface_type,
925 int red_size, int green_size,
926 int blue_size, int alpha_size,
927 int color_depth, int *native_visual_id,
932 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
933 green_size == 8 && blue_size == 8 &&
934 (color_depth == 32 || color_depth == 24)) {
936 if (alpha_size == 8) {
937 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
938 if (is_slow) *is_slow = TPL_FALSE;
939 return TPL_ERROR_NONE;
941 if (alpha_size == 0) {
942 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
943 if (is_slow) *is_slow = TPL_FALSE;
944 return TPL_ERROR_NONE;
948 return TPL_ERROR_INVALID_PARAMETER;
952 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
957 TPL_IGNORE(visual_id);
958 TPL_IGNORE(alpha_size);
959 return TPL_ERROR_NONE;
963 __tpl_wl_vk_display_query_window_supported_buffer_count(
964 tpl_display_t *display,
965 tpl_handle_t window, int *min, int *max)
967 tpl_wl_vk_display_t *wl_vk_display = NULL;
972 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
973 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
975 if (min) *min = wl_vk_display->min_buffer;
976 if (max) *max = wl_vk_display->max_buffer;
978 return TPL_ERROR_NONE;
982 __tpl_wl_vk_display_query_window_supported_present_modes(
983 tpl_display_t *display,
984 tpl_handle_t window, int *present_modes)
986 tpl_wl_vk_display_t *wl_vk_display = NULL;
991 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
992 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
995 *present_modes = wl_vk_display->present_modes;
998 return TPL_ERROR_NONE;
1002 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
1004 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1005 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1006 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1007 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1008 tpl_bool_t need_to_release = TPL_FALSE;
1009 tpl_bool_t need_to_cancel = TPL_FALSE;
1010 buffer_status_t status = RELEASED;
1013 while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
1014 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1015 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1016 wl_vk_buffer = wl_vk_surface->buffers[idx];
1019 wl_vk_surface->buffers[idx] = NULL;
1020 wl_vk_surface->buffer_cnt--;
1022 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1023 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1028 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1030 tpl_gmutex_lock(&wl_vk_buffer->mutex);
1032 status = wl_vk_buffer->status;
1034 TPL_INFO("[BUFFER_CLEAR]",
1035 "[%d] wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
1036 idx, wl_vk_surface, wl_vk_buffer,
1037 wl_vk_buffer->tbm_surface,
1038 status_to_string[status]);
1040 if (status >= ENQUEUED) {
1041 tpl_bool_t need_to_wait = TPL_FALSE;
1042 tpl_result_t wait_result = TPL_ERROR_NONE;
1044 if (!wl_vk_display->use_explicit_sync &&
1045 status < WAITING_VBLANK)
1046 need_to_wait = TPL_TRUE;
1048 if (wl_vk_display->use_explicit_sync &&
1050 need_to_wait = TPL_TRUE;
1053 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1054 wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond,
1055 &wl_vk_buffer->mutex,
1057 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1059 status = wl_vk_buffer->status;
1061 if (wait_result == TPL_ERROR_TIME_OUT)
1062 TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
1067 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1068 /* It has been acquired but has not yet been released, so this
1069 * buffer must be released. */
1070 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
1072 /* After dequeue, it has not been enqueued yet
1073 * so cancel_dequeue must be performed. */
1074 need_to_cancel = (status == DEQUEUED);
1076 if (swapchain && swapchain->tbm_queue) {
1077 if (need_to_release) {
1078 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
1079 wl_vk_buffer->tbm_surface);
1080 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1081 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1082 wl_vk_buffer->tbm_surface, tsq_err);
1085 if (need_to_cancel) {
1086 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
1087 wl_vk_buffer->tbm_surface);
1088 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1089 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1090 wl_vk_buffer->tbm_surface, tsq_err);
1094 wl_vk_buffer->status = RELEASED;
1096 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1098 if (need_to_release || need_to_cancel)
1099 tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
1101 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1107 static tdm_client_vblank*
1108 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1110 tdm_client_vblank *vblank = NULL;
1111 tdm_client_output *tdm_output = NULL;
1112 tdm_error tdm_err = TDM_ERROR_NONE;
1115 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1119 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1120 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1121 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1125 vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1126 if (!vblank || tdm_err != TDM_ERROR_NONE) {
1127 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1131 tdm_err = tdm_client_handle_pending_events(tdm_client);
1132 if (tdm_err != TDM_ERROR_NONE) {
1133 TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err);
1136 tdm_client_vblank_set_enable_fake(vblank, 1);
1137 tdm_client_vblank_set_sync(vblank, 0);
1143 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1145 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1147 /* tbm_surface_queue will be created at swapchain_create */
1149 wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1150 wl_vk_display->tdm.tdm_client);
1151 if (wl_vk_surface->vblank) {
1152 TPL_INFO("[VBLANK_INIT]",
1153 "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1154 wl_vk_surface, wl_vk_display->tdm.tdm_client,
1155 wl_vk_surface->vblank);
1158 #if TIZEN_FEATURE_ENABLE
1159 if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1160 wl_vk_surface->surface_sync =
1161 zwp_linux_explicit_synchronization_v1_get_synchronization(
1162 wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1163 if (wl_vk_surface->surface_sync) {
1164 TPL_INFO("[EXPLICIT_SYNC_INIT]",
1165 "wl_vk_surface(%p) surface_sync(%p)",
1166 wl_vk_surface, wl_vk_surface->surface_sync);
1168 TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1170 wl_vk_display->use_explicit_sync = TPL_FALSE;
1174 wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1178 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1180 TPL_INFO("[SURFACE_FINI]",
1181 "wl_vk_surface(%p) wl_surface(%p)",
1182 wl_vk_surface, wl_vk_surface->wl_surface);
1184 if (wl_vk_surface->vblank_waiting_buffers) {
1185 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1186 wl_vk_surface->vblank_waiting_buffers = NULL;
1189 #if TIZEN_FEATURE_ENABLE
1190 if (wl_vk_surface->surface_sync) {
1191 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1192 "wl_vk_surface(%p) surface_sync(%p)",
1193 wl_vk_surface, wl_vk_surface->surface_sync);
1194 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1195 wl_vk_surface->surface_sync = NULL;
1199 if (wl_vk_surface->vblank) {
1200 TPL_INFO("[VBLANK_DESTROY]",
1201 "wl_vk_surface(%p) vblank(%p)",
1202 wl_vk_surface, wl_vk_surface->vblank);
1203 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1204 wl_vk_surface->vblank = NULL;
1209 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1211 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1213 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1215 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1216 if (message & INIT_SURFACE) { /* Initialize surface */
1217 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) initialize message received!",
1219 _thread_wl_vk_surface_init(wl_vk_surface);
1220 wl_vk_surface->initialized_in_thread = TPL_TRUE;
1221 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1224 if (message & ACQUIRABLE) { /* Acquirable message */
1225 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) acquirable message received!",
1227 if (_thread_surface_queue_acquire(wl_vk_surface)
1228 != TPL_ERROR_NONE) {
1229 TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1234 if (message & CREATE_QUEUE) { /* Create tbm_surface_queue */
1235 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) queue creation message received!",
1237 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1238 != TPL_ERROR_NONE) {
1239 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1242 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1245 if (message & DESTROY_QUEUE) { /* swapchain destroy */
1246 TPL_LOG_D("[MSG_RECEIVED]", "wl_vk_surface(%p) swapchain destroy message received!",
1248 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1249 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1252 /* init to NONE_MESSAGE */
1253 wl_vk_surface->sent_message = NONE_MESSAGE;
1255 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1261 __thread_func_surf_finalize(tpl_gsource *gsource)
1263 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1265 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1266 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1268 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1269 TPL_LOG_D("[S_FINALIZE]", "wl_vk_surface(%p) tpl_gsource(%p)",
1270 wl_vk_surface, gsource);
1272 _thread_wl_vk_surface_fini(wl_vk_surface);
1274 wl_vk_surface->gsource_finalized = TPL_TRUE;
1276 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1277 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1280 static tpl_gsource_functions surf_funcs = {
1283 .dispatch = __thread_func_surf_dispatch,
1284 .finalize = __thread_func_surf_finalize,
1289 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1291 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1292 tpl_wl_vk_display_t *wl_vk_display = NULL;
1293 tpl_gsource *surf_source = NULL;
1295 TPL_ASSERT(surface);
1296 TPL_ASSERT(surface->display);
1297 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1298 TPL_ASSERT(surface->native_handle);
1300 wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1301 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1303 wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1304 sizeof(tpl_wl_vk_surface_t));
1305 if (!wl_vk_surface) {
1306 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1307 return TPL_ERROR_OUT_OF_MEMORY;
1310 surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1311 -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1313 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1315 free(wl_vk_surface);
1316 surface->backend.data = NULL;
1317 return TPL_ERROR_INVALID_OPERATION;
1320 surface->backend.data = (void *)wl_vk_surface;
1321 surface->width = -1;
1322 surface->height = -1;
1324 wl_vk_surface->surf_source = surf_source;
1325 wl_vk_surface->swapchain = NULL;
1327 wl_vk_surface->wl_vk_display = wl_vk_display;
1328 wl_vk_surface->wl_surface = (struct wl_surface *)surface->native_handle;
1329 wl_vk_surface->tpl_surface = surface;
1331 wl_vk_surface->reset = TPL_FALSE;
1332 wl_vk_surface->is_activated = TPL_FALSE;
1333 wl_vk_surface->vblank_done = TPL_TRUE;
1334 wl_vk_surface->initialized_in_thread = TPL_FALSE;
1336 wl_vk_surface->render_done_cnt = 0;
1338 wl_vk_surface->vblank = NULL;
1339 #if TIZEN_FEATURE_ENABLE
1340 wl_vk_surface->surface_sync = NULL;
1343 wl_vk_surface->sent_message = NONE_MESSAGE;
1345 wl_vk_surface->post_interval = surface->post_interval;
1349 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1350 wl_vk_surface->buffers[i] = NULL;
1351 wl_vk_surface->buffer_cnt = 0;
1354 tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1355 tpl_gcond_init(&wl_vk_surface->surf_cond);
1357 tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1359 /* Initialize in thread */
1360 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1361 wl_vk_surface->sent_message = INIT_SURFACE;
1362 tpl_gsource_send_message(wl_vk_surface->surf_source,
1363 wl_vk_surface->sent_message);
1364 while (!wl_vk_surface->initialized_in_thread)
1365 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1366 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1368 TPL_INFO("[SURFACE_INIT]",
1369 "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1370 surface, wl_vk_surface, wl_vk_surface->surf_source);
1372 return TPL_ERROR_NONE;
1376 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1378 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1379 tpl_wl_vk_display_t *wl_vk_display = NULL;
1381 TPL_ASSERT(surface);
1382 TPL_ASSERT(surface->display);
1384 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1385 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1387 wl_vk_display = (tpl_wl_vk_display_t *)
1388 surface->display->backend.data;
1389 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1391 TPL_INFO("[SURFACE_FINI][BEGIN]",
1392 "wl_vk_surface(%p) wl_surface(%p)",
1393 wl_vk_surface, wl_vk_surface->wl_surface);
1395 if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1396 /* finalize swapchain */
1400 wl_vk_surface->swapchain = NULL;
1402 /* This is a protection to prevent problems that arise in unexpected situations
1403 * that g_cond_wait cannot work normally.
1404 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1405 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1407 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1408 while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) {
1409 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1410 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1412 wl_vk_surface->surf_source = NULL;
1413 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1415 _print_buffer_lists(wl_vk_surface);
1417 wl_vk_surface->wl_surface = NULL;
1418 wl_vk_surface->wl_vk_display = NULL;
1419 wl_vk_surface->tpl_surface = NULL;
1421 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1422 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1423 tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1424 tpl_gcond_clear(&wl_vk_surface->surf_cond);
1426 TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1428 free(wl_vk_surface);
1429 surface->backend.data = NULL;
1433 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1436 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1438 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1440 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1442 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1444 TPL_INFO("[SET_POST_INTERVAL]",
1445 "wl_vk_surface(%p) post_interval(%d -> %d)",
1446 wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1448 wl_vk_surface->post_interval = post_interval;
1450 return TPL_ERROR_NONE;
1454 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1456 TPL_ASSERT(surface);
1457 TPL_ASSERT(surface->backend.data);
1459 tpl_wl_vk_surface_t *wl_vk_surface =
1460 (tpl_wl_vk_surface_t *)surface->backend.data;
1462 return !(wl_vk_surface->reset);
1466 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1469 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1470 tpl_wl_vk_display_t *wl_vk_display = NULL;
1471 tpl_wl_vk_swapchain_t *swapchain = NULL;
1472 tpl_surface_t *surface = NULL;
1473 tpl_bool_t is_activated = TPL_FALSE;
1476 wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1477 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1479 wl_vk_display = wl_vk_surface->wl_vk_display;
1480 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1482 surface = wl_vk_surface->tpl_surface;
1483 TPL_CHECK_ON_NULL_RETURN(surface);
1485 swapchain = wl_vk_surface->swapchain;
1486 TPL_CHECK_ON_NULL_RETURN(swapchain);
1488 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1489 * the changed window size at the next frame. */
1490 width = tbm_surface_queue_get_width(tbm_queue);
1491 height = tbm_surface_queue_get_height(tbm_queue);
1492 if (surface->width != width || surface->height != height) {
1493 TPL_INFO("[QUEUE_RESIZE]",
1494 "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1495 wl_vk_surface, tbm_queue,
1496 surface->width, surface->height, width, height);
1499 /* When queue_reset_callback is called, if is_activated is different from
1500 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1501 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1502 is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1503 swapchain->tbm_queue);
1504 if (wl_vk_surface->is_activated != is_activated) {
1506 TPL_INFO("[ACTIVATED]",
1507 "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1508 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1510 TPL_INFO("[DEACTIVATED]",
1511 " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1512 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1516 wl_vk_surface->reset = TPL_TRUE;
1518 if (surface->reset_cb)
1519 surface->reset_cb(surface->reset_data);
1523 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1526 TPL_IGNORE(tbm_queue);
1528 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1529 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1531 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1532 if (wl_vk_surface->sent_message == NONE_MESSAGE) {
1533 wl_vk_surface->sent_message = ACQUIRABLE;
1534 tpl_gsource_send_message(wl_vk_surface->surf_source,
1535 wl_vk_surface->sent_message);
1537 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1541 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1543 TPL_ASSERT (wl_vk_surface);
1545 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1546 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1547 tbm_surface_queue_h tbm_queue = NULL;
1548 tbm_bufmgr bufmgr = NULL;
1549 unsigned int capability;
1551 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1552 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1554 if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1555 TPL_ERR("buffer count(%d) must be higher than (%d)",
1556 swapchain->properties.buffer_count,
1557 wl_vk_display->min_buffer);
1558 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1559 return TPL_ERROR_INVALID_PARAMETER;
1562 if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1563 TPL_ERR("buffer count(%d) must be lower than (%d)",
1564 swapchain->properties.buffer_count,
1565 wl_vk_display->max_buffer);
1566 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1567 return TPL_ERROR_INVALID_PARAMETER;
1570 if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1571 TPL_ERR("Unsupported present_mode(%d)",
1572 swapchain->properties.present_mode);
1573 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1574 return TPL_ERROR_INVALID_PARAMETER;
1577 if (swapchain->tbm_queue) {
1578 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1579 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1581 if (swapchain->swapchain_buffers) {
1583 for (i = 0; i < swapchain->properties.buffer_count; i++) {
1584 if (swapchain->swapchain_buffers[i]) {
1585 TPL_INFO("[UNTRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1586 i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i],
1587 _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1588 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1589 swapchain->swapchain_buffers[i] = NULL;
1593 free(swapchain->swapchain_buffers);
1594 swapchain->swapchain_buffers = NULL;
1597 if (old_width != swapchain->properties.width ||
1598 old_height != swapchain->properties.height) {
1599 tbm_surface_queue_reset(swapchain->tbm_queue,
1600 swapchain->properties.width,
1601 swapchain->properties.height,
1602 TBM_FORMAT_ARGB8888);
1603 TPL_INFO("[RESIZE]",
1604 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1605 wl_vk_surface, swapchain, swapchain->tbm_queue,
1606 old_width, old_height,
1607 swapchain->properties.width,
1608 swapchain->properties.height);
1611 swapchain->properties.buffer_count =
1612 tbm_surface_queue_get_size(swapchain->tbm_queue);
1614 wl_vk_surface->reset = TPL_FALSE;
1616 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1617 swapchain->create_done = TPL_TRUE;
1619 TPL_INFO("[SWAPCHAIN_REUSE]",
1620 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1621 wl_vk_surface, swapchain, swapchain->tbm_queue,
1622 swapchain->properties.buffer_count);
1624 return TPL_ERROR_NONE;
1627 bufmgr = tbm_bufmgr_init(-1);
1628 capability = tbm_bufmgr_get_capability(bufmgr);
1629 tbm_bufmgr_deinit(bufmgr);
1631 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1632 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1633 wl_vk_display->wl_tbm_client,
1634 wl_vk_surface->wl_surface,
1635 swapchain->properties.buffer_count,
1636 swapchain->properties.width,
1637 swapchain->properties.height,
1638 TBM_FORMAT_ARGB8888);
1640 tbm_queue = wayland_tbm_client_create_surface_queue(
1641 wl_vk_display->wl_tbm_client,
1642 wl_vk_surface->wl_surface,
1643 swapchain->properties.buffer_count,
1644 swapchain->properties.width,
1645 swapchain->properties.height,
1646 TBM_FORMAT_ARGB8888);
1650 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1652 swapchain->result = TPL_ERROR_OUT_OF_MEMORY;
1653 return TPL_ERROR_OUT_OF_MEMORY;
1656 if (tbm_surface_queue_set_modes(
1657 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1658 TBM_SURFACE_QUEUE_ERROR_NONE) {
1659 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1661 tbm_surface_queue_destroy(tbm_queue);
1662 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1663 return TPL_ERROR_INVALID_OPERATION;
1666 if (tbm_surface_queue_add_reset_cb(
1668 __cb_tbm_queue_reset_callback,
1669 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1670 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1672 tbm_surface_queue_destroy(tbm_queue);
1673 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1674 return TPL_ERROR_INVALID_OPERATION;
1677 if (tbm_surface_queue_add_acquirable_cb(
1679 __cb_tbm_queue_acquirable_callback,
1680 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1681 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1683 tbm_surface_queue_destroy(tbm_queue);
1684 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1685 return TPL_ERROR_INVALID_OPERATION;
1688 swapchain->tbm_queue = tbm_queue;
1689 swapchain->create_done = TPL_TRUE;
1691 TPL_INFO("[TBM_QUEUE_CREATED]",
1692 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1693 wl_vk_surface, swapchain, tbm_queue);
1695 return TPL_ERROR_NONE;
1699 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1700 tbm_format format, int width,
1701 int height, int buffer_count, int present_mode)
1703 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1704 tpl_wl_vk_display_t *wl_vk_display = NULL;
1705 tpl_wl_vk_swapchain_t *swapchain = NULL;
1707 TPL_ASSERT(surface);
1708 TPL_ASSERT(surface->display);
1710 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1711 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1713 wl_vk_display = (tpl_wl_vk_display_t *)
1714 surface->display->backend.data;
1715 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1717 swapchain = wl_vk_surface->swapchain;
1719 if (swapchain == NULL) {
1721 (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1722 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1723 swapchain->tbm_queue = NULL;
1726 swapchain->properties.buffer_count = buffer_count;
1727 swapchain->properties.width = width;
1728 swapchain->properties.height = height;
1729 swapchain->properties.present_mode = present_mode;
1730 swapchain->wl_vk_surface = wl_vk_surface;
1731 swapchain->properties.format = format;
1733 swapchain->result = TPL_ERROR_NONE;
1734 swapchain->create_done = TPL_FALSE;
1736 wl_vk_surface->swapchain = swapchain;
1738 __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1740 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1741 /* send swapchain create tbm_queue message */
1742 wl_vk_surface->sent_message = CREATE_QUEUE;
1743 tpl_gsource_send_message(wl_vk_surface->surf_source,
1744 wl_vk_surface->sent_message);
1745 while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE)
1746 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1747 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1749 TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1750 swapchain->tbm_queue != NULL,
1751 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1753 wl_vk_surface->reset = TPL_FALSE;
1755 return TPL_ERROR_NONE;
1759 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1761 TPL_ASSERT(wl_vk_surface);
1763 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1765 TPL_CHECK_ON_NULL_RETURN(swapchain);
1767 if (swapchain->tbm_queue) {
1768 TPL_INFO("[TBM_QUEUE_DESTROY]",
1769 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1770 wl_vk_surface, swapchain, swapchain->tbm_queue);
1771 tbm_surface_queue_destroy(swapchain->tbm_queue);
1772 swapchain->tbm_queue = NULL;
1777 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1779 tpl_wl_vk_swapchain_t *swapchain = NULL;
1780 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1781 tpl_wl_vk_display_t *wl_vk_display = NULL;
1783 TPL_ASSERT(surface);
1784 TPL_ASSERT(surface->display);
1786 wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1787 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1789 wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1790 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1792 swapchain = wl_vk_surface->swapchain;
1794 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1796 return TPL_ERROR_INVALID_OPERATION;
1799 if (!swapchain->tbm_queue) {
1800 TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.",
1801 wl_vk_surface, wl_vk_surface->swapchain);
1802 return TPL_ERROR_INVALID_OPERATION;
1805 if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1806 TPL_INFO("[DESTROY_SWAPCHAIN]",
1807 "wl_vk_surface(%p) swapchain(%p) still valid.",
1808 wl_vk_surface, swapchain);
1809 return TPL_ERROR_NONE;
1812 TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1813 "wl_vk_surface(%p) swapchain(%p)",
1814 wl_vk_surface, wl_vk_surface->swapchain);
1816 if (swapchain->swapchain_buffers) {
1817 for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1818 if (swapchain->swapchain_buffers[i]) {
1819 TPL_INFO("[UNTRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1820 i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i],
1821 _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1822 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1823 swapchain->swapchain_buffers[i] = NULL;
1827 free(swapchain->swapchain_buffers);
1828 swapchain->swapchain_buffers = NULL;
1831 _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1833 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1834 wl_vk_surface->sent_message = DESTROY_QUEUE;
1835 tpl_gsource_send_message(wl_vk_surface->surf_source,
1836 wl_vk_surface->sent_message);
1837 while (swapchain->tbm_queue)
1838 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1839 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1841 _print_buffer_lists(wl_vk_surface);
1844 wl_vk_surface->swapchain = NULL;
1846 return TPL_ERROR_NONE;
1850 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1851 tbm_surface_h **buffers,
1854 TPL_ASSERT(surface);
1855 TPL_ASSERT(surface->backend.data);
1856 TPL_ASSERT(surface->display);
1857 TPL_ASSERT(surface->display->backend.data);
1859 tpl_wl_vk_surface_t *wl_vk_surface =
1860 (tpl_wl_vk_surface_t *)surface->backend.data;
1861 tpl_wl_vk_display_t *wl_vk_display =
1862 (tpl_wl_vk_display_t *)surface->display->backend.data;
1863 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1864 tpl_result_t ret = TPL_ERROR_NONE;
1867 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1868 TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1870 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1873 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1874 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1875 return TPL_ERROR_NONE;
1878 swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1880 sizeof(tbm_surface_h));
1881 if (!swapchain->swapchain_buffers) {
1882 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1884 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1885 return TPL_ERROR_OUT_OF_MEMORY;
1888 ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1889 swapchain->tbm_queue,
1890 swapchain->swapchain_buffers,
1893 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1894 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1895 free(swapchain->swapchain_buffers);
1896 swapchain->swapchain_buffers = NULL;
1897 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1898 return TPL_ERROR_INVALID_OPERATION;
1901 for (i = 0; i < *buffer_count; i++) {
1902 if (swapchain->swapchain_buffers[i]) {
1903 TPL_INFO("[TRACK_BUFFERS]", "[%d] wl_vk_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
1904 i, wl_vk_surface, swapchain, swapchain->swapchain_buffers[i],
1905 _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1906 tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1910 *buffers = swapchain->swapchain_buffers;
1912 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1914 return TPL_ERROR_NONE;
1918 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1920 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1921 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1923 TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1924 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1926 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1927 if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1928 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1929 wl_vk_surface->buffer_cnt--;
1931 wl_vk_buffer->idx = -1;
1933 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1935 wl_display_flush(wl_vk_display->wl_display);
1937 if (wl_vk_buffer->wl_buffer) {
1938 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1939 wl_vk_buffer->wl_buffer);
1940 wl_vk_buffer->wl_buffer = NULL;
1943 #if TIZEN_FEATURE_ENABLE
1944 if (wl_vk_buffer->buffer_release) {
1945 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1946 wl_vk_buffer->buffer_release = NULL;
1950 if (wl_vk_buffer->release_fence_fd != -1) {
1951 close(wl_vk_buffer->release_fence_fd);
1952 wl_vk_buffer->release_fence_fd = -1;
1955 if (wl_vk_buffer->rects) {
1956 free(wl_vk_buffer->rects);
1957 wl_vk_buffer->rects = NULL;
1958 wl_vk_buffer->num_rects = 0;
1961 wl_vk_buffer->tbm_surface = NULL;
1962 wl_vk_buffer->bo_name = -1;
1967 static tpl_wl_vk_buffer_t *
1968 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1970 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1971 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1972 (void **)&wl_vk_buffer);
1973 return wl_vk_buffer;
1976 static tpl_wl_vk_buffer_t *
1977 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
1978 tbm_surface_h tbm_surface)
1980 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1982 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
1984 if (!wl_vk_buffer) {
1985 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
1986 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
1988 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1989 (tbm_data_free)__cb_wl_vk_buffer_free);
1990 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1993 wl_vk_buffer->wl_buffer = NULL;
1994 wl_vk_buffer->tbm_surface = tbm_surface;
1995 wl_vk_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
1996 wl_vk_buffer->wl_vk_surface = wl_vk_surface;
1998 wl_vk_buffer->status = RELEASED;
2000 wl_vk_buffer->acquire_fence_fd = -1;
2001 wl_vk_buffer->release_fence_fd = -1;
2003 wl_vk_buffer->dx = 0;
2004 wl_vk_buffer->dy = 0;
2005 wl_vk_buffer->width = tbm_surface_get_width(tbm_surface);
2006 wl_vk_buffer->height = tbm_surface_get_height(tbm_surface);
2008 wl_vk_buffer->rects = NULL;
2009 wl_vk_buffer->num_rects = 0;
2011 wl_vk_buffer->need_to_commit = TPL_FALSE;
2012 #if TIZEN_FEATURE_ENABLE
2013 wl_vk_buffer->buffer_release = NULL;
2015 tpl_gmutex_init(&wl_vk_buffer->mutex);
2016 tpl_gcond_init(&wl_vk_buffer->cond);
2018 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2021 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2022 if (wl_vk_surface->buffers[i] == NULL) break;
2024 /* If this exception is reached,
2025 * it may be a critical memory leak problem. */
2026 if (i == BUFFER_ARRAY_SIZE) {
2027 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
2028 int evicted_idx = 0; /* evict the frontmost buffer */
2030 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
2032 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
2034 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2035 evicted_buffer, evicted_buffer->tbm_surface,
2036 status_to_string[evicted_buffer->status]);
2038 /* [TODO] need to think about whether there will be
2039 * better modifications */
2040 wl_vk_surface->buffer_cnt--;
2041 wl_vk_surface->buffers[evicted_idx] = NULL;
2046 wl_vk_surface->buffer_cnt++;
2047 wl_vk_surface->buffers[i] = wl_vk_buffer;
2048 wl_vk_buffer->idx = i;
2050 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2052 TPL_INFO("[WL_VK_BUFFER_CREATE]",
2053 "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2054 wl_vk_surface, wl_vk_buffer, tbm_surface,
2055 wl_vk_buffer->bo_name);
2058 return wl_vk_buffer;
2061 static tbm_surface_h
2062 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
2063 uint64_t timeout_ns,
2064 int32_t *release_fence)
2066 TPL_ASSERT(surface);
2067 TPL_ASSERT(surface->backend.data);
2068 TPL_ASSERT(surface->display);
2069 TPL_ASSERT(surface->display->backend.data);
2070 TPL_OBJECT_CHECK_RETURN(surface, NULL);
2072 tpl_wl_vk_surface_t *wl_vk_surface =
2073 (tpl_wl_vk_surface_t *)surface->backend.data;
2074 tpl_wl_vk_display_t *wl_vk_display =
2075 (tpl_wl_vk_display_t *)surface->display->backend.data;
2076 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2077 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2079 tbm_surface_h tbm_surface = NULL;
2080 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2082 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
2083 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
2085 TPL_OBJECT_UNLOCK(surface);
2086 TRACE_BEGIN("WAIT_DEQUEUEABLE");
2087 if (timeout_ns != UINT64_MAX) {
2088 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2089 swapchain->tbm_queue, timeout_ns/1000);
2091 tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
2094 TPL_OBJECT_LOCK(surface);
2096 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2097 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
2100 } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2101 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
2102 wl_vk_surface, swapchain->tbm_queue, tsq_err);
2106 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
2108 if (wl_vk_surface->reset) {
2109 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
2110 swapchain, swapchain->tbm_queue);
2111 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2115 tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
2118 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
2119 swapchain->tbm_queue, wl_vk_surface, tsq_err);
2120 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2124 tbm_surface_internal_ref(tbm_surface);
2126 wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
2127 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
2129 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2130 wl_vk_buffer->status = DEQUEUED;
2132 if (release_fence) {
2133 #if TIZEN_FEATURE_ENABLE
2134 if (wl_vk_surface->surface_sync) {
2135 *release_fence = wl_vk_buffer->release_fence_fd;
2136 TPL_LOG_D("[EXPLICIT_FENCE]", "wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
2137 wl_vk_surface, wl_vk_buffer, *release_fence);
2138 wl_vk_buffer->release_fence_fd = -1;
2142 *release_fence = -1;
2146 wl_vk_surface->reset = TPL_FALSE;
2148 TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2149 wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
2150 release_fence ? *release_fence : -1);
2152 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2153 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2159 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
2160 tbm_surface_h tbm_surface)
2162 TPL_ASSERT(surface);
2163 TPL_ASSERT(surface->backend.data);
2165 tpl_wl_vk_surface_t *wl_vk_surface =
2166 (tpl_wl_vk_surface_t *)surface->backend.data;
2167 tpl_wl_vk_swapchain_t *swapchain = NULL;
2168 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2169 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2171 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2172 TPL_ERROR_INVALID_PARAMETER);
2174 swapchain = wl_vk_surface->swapchain;
2175 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2176 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2177 TPL_ERROR_INVALID_PARAMETER);
2179 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2181 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2182 wl_vk_buffer->status = RELEASED;
2183 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2186 tbm_surface_internal_unref(tbm_surface);
2188 TPL_INFO("[CANCEL BUFFER]",
2189 "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2190 wl_vk_surface, swapchain, tbm_surface,
2191 _get_tbm_surface_bo_name(tbm_surface));
2193 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2195 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2196 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2197 return TPL_ERROR_INVALID_OPERATION;
2200 return TPL_ERROR_NONE;
2204 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2205 tbm_surface_h tbm_surface,
2206 int num_rects, const int *rects,
2207 int32_t acquire_fence)
2209 TPL_ASSERT(surface);
2210 TPL_ASSERT(surface->display);
2211 TPL_ASSERT(surface->backend.data);
2212 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2214 tpl_wl_vk_surface_t *wl_vk_surface =
2215 (tpl_wl_vk_surface_t *) surface->backend.data;
2216 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2217 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2218 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2221 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2222 TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2223 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2224 TPL_ERROR_INVALID_PARAMETER);
2226 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2227 if (!wl_vk_buffer) {
2228 TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
2229 return TPL_ERROR_INVALID_PARAMETER;
2232 bo_name = wl_vk_buffer->bo_name;
2234 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2236 /* If there are received region information, save it to wl_vk_buffer */
2237 if (num_rects && rects) {
2238 if (wl_vk_buffer->rects != NULL) {
2239 free(wl_vk_buffer->rects);
2240 wl_vk_buffer->rects = NULL;
2241 wl_vk_buffer->num_rects = 0;
2244 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2245 wl_vk_buffer->num_rects = num_rects;
2247 if (wl_vk_buffer->rects) {
2248 memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2249 sizeof(int) * 4 * num_rects);
2251 TPL_ERR("Failed to allocate memory for rects info.");
2255 if (wl_vk_buffer->acquire_fence_fd != -1)
2256 close(wl_vk_buffer->acquire_fence_fd);
2258 wl_vk_buffer->acquire_fence_fd = acquire_fence;
2260 wl_vk_buffer->status = ENQUEUED;
2262 "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2263 wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2265 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2267 tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2269 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2270 tbm_surface_internal_unref(tbm_surface);
2271 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2272 tbm_surface, wl_vk_surface, tsq_err);
2273 return TPL_ERROR_INVALID_OPERATION;
2276 tbm_surface_internal_unref(tbm_surface);
2278 return TPL_ERROR_NONE;
2281 static const struct wl_buffer_listener wl_buffer_release_listener = {
2282 (void *)__cb_wl_buffer_release,
2286 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2288 tbm_surface_h tbm_surface = NULL;
2289 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2290 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2291 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2292 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2293 tpl_bool_t ready_to_commit = TPL_TRUE;
2295 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2297 while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2298 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2300 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2301 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2302 swapchain->tbm_queue);
2303 return TPL_ERROR_INVALID_OPERATION;
2306 tbm_surface_internal_ref(tbm_surface);
2308 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2309 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2310 "wl_vk_buffer sould be not NULL");
2312 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2314 wl_vk_buffer->status = ACQUIRED;
2316 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2317 wl_vk_buffer, tbm_surface,
2318 _get_tbm_surface_bo_name(tbm_surface));
2320 if (wl_vk_buffer->wl_buffer == NULL) {
2321 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2322 wl_vk_display->wl_tbm_client, tbm_surface);
2324 if (!wl_vk_buffer->wl_buffer) {
2325 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2326 wl_vk_display->wl_tbm_client, tbm_surface);
2328 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2329 wl_vk_display->use_explicit_sync == TPL_FALSE) {
2330 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2331 &wl_buffer_release_listener, wl_vk_buffer);
2335 "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2336 wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2340 if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done)
2341 ready_to_commit = TPL_TRUE;
2343 wl_vk_buffer->status = WAITING_VBLANK;
2344 __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2345 ready_to_commit = TPL_FALSE;
2348 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2350 if (ready_to_commit)
2351 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2354 return TPL_ERROR_NONE;
2357 #if TIZEN_FEATURE_ENABLE
2359 __cb_buffer_fenced_release(void *data,
2360 struct zwp_linux_buffer_release_v1 *release,
2363 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2364 tbm_surface_h tbm_surface = NULL;
2366 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2368 tbm_surface = wl_vk_buffer->tbm_surface;
2370 if (tbm_surface_internal_is_valid(tbm_surface)) {
2371 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2372 tpl_wl_vk_swapchain_t *swapchain = NULL;
2374 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2375 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2376 tbm_surface_internal_unref(tbm_surface);
2380 swapchain = wl_vk_surface->swapchain;
2382 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2383 if (wl_vk_buffer->status == COMMITTED) {
2384 tbm_surface_queue_error_e tsq_err;
2386 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2387 wl_vk_buffer->buffer_release = NULL;
2389 wl_vk_buffer->release_fence_fd = fence;
2390 wl_vk_buffer->status = RELEASED;
2392 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2393 wl_vk_buffer->bo_name,
2395 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2396 wl_vk_buffer->bo_name);
2399 "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2400 wl_vk_buffer, tbm_surface,
2401 wl_vk_buffer->bo_name,
2404 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2406 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2407 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2409 tbm_surface_internal_unref(tbm_surface);
2412 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2415 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2420 __cb_buffer_immediate_release(void *data,
2421 struct zwp_linux_buffer_release_v1 *release)
2423 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2424 tbm_surface_h tbm_surface = NULL;
2426 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2428 tbm_surface = wl_vk_buffer->tbm_surface;
2430 if (tbm_surface_internal_is_valid(tbm_surface)) {
2431 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2432 tpl_wl_vk_swapchain_t *swapchain = NULL;
2434 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2435 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2436 tbm_surface_internal_unref(tbm_surface);
2440 swapchain = wl_vk_surface->swapchain;
2442 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2443 if (wl_vk_buffer->status == COMMITTED) {
2444 tbm_surface_queue_error_e tsq_err;
2446 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2447 wl_vk_buffer->buffer_release = NULL;
2449 wl_vk_buffer->release_fence_fd = -1;
2450 wl_vk_buffer->status = RELEASED;
2452 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2453 _get_tbm_surface_bo_name(tbm_surface));
2454 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2455 _get_tbm_surface_bo_name(tbm_surface));
2458 "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2459 wl_vk_buffer, tbm_surface,
2460 _get_tbm_surface_bo_name(tbm_surface));
2462 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2464 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2465 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2467 tbm_surface_internal_unref(tbm_surface);
2470 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2473 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2477 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2478 __cb_buffer_fenced_release,
2479 __cb_buffer_immediate_release,
2484 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2486 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2487 tbm_surface_h tbm_surface = NULL;
2489 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2491 tbm_surface = wl_vk_buffer->tbm_surface;
2493 if (tbm_surface_internal_is_valid(tbm_surface)) {
2494 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2495 tpl_wl_vk_swapchain_t *swapchain = NULL;
2496 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2498 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2499 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2500 tbm_surface_internal_unref(tbm_surface);
2504 swapchain = wl_vk_surface->swapchain;
2506 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2508 if (wl_vk_buffer->status == COMMITTED) {
2510 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2512 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2513 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2515 wl_vk_buffer->status = RELEASED;
2517 TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2518 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2519 wl_vk_buffer->bo_name);
2521 TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2522 wl_vk_buffer->wl_buffer, tbm_surface,
2523 wl_vk_buffer->bo_name);
2525 tbm_surface_internal_unref(tbm_surface);
2528 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2530 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2535 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2536 unsigned int sequence, unsigned int tv_sec,
2537 unsigned int tv_usec, void *user_data)
2539 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2540 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2542 TRACE_ASYNC_END((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2543 TPL_LOG_D("[VBLANK_DONE]", "wl_vk_surface(%p)", wl_vk_surface);
2545 if (error == TDM_ERROR_TIMEOUT)
2546 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2549 wl_vk_surface->vblank_done = TPL_TRUE;
2551 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2552 wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front(
2553 wl_vk_surface->vblank_waiting_buffers,
2556 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2557 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2561 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2563 tdm_error tdm_err = TDM_ERROR_NONE;
2564 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2566 if (wl_vk_surface->vblank == NULL) {
2567 wl_vk_surface->vblank =
2568 _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client);
2569 if (!wl_vk_surface->vblank) {
2570 TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2572 return TPL_ERROR_OUT_OF_MEMORY;
2576 tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2577 wl_vk_surface->post_interval,
2578 __cb_tdm_client_vblank,
2579 (void *)wl_vk_surface);
2581 if (tdm_err == TDM_ERROR_NONE) {
2582 wl_vk_surface->vblank_done = TPL_FALSE;
2583 TRACE_ASYNC_BEGIN((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2585 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2586 return TPL_ERROR_INVALID_OPERATION;
2589 return TPL_ERROR_NONE;
2593 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2594 tpl_wl_vk_buffer_t *wl_vk_buffer)
2596 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2597 struct wl_surface *wl_surface = wl_vk_surface->wl_surface;
2600 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2601 "wl_vk_buffer sould be not NULL");
2603 if (wl_vk_buffer->wl_buffer == NULL) {
2604 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2605 wl_vk_display->wl_tbm_client,
2606 wl_vk_buffer->tbm_surface);
2607 if (wl_vk_buffer->wl_buffer &&
2608 (wl_vk_buffer->acquire_fence_fd == -1 ||
2609 wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2610 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2611 &wl_buffer_release_listener, wl_vk_buffer);
2614 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2615 "[FATAL] Failed to create wl_buffer");
2617 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2619 wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2620 wl_vk_buffer->dx, wl_vk_buffer->dy);
2622 if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2624 wl_surface_damage(wl_surface,
2625 wl_vk_buffer->dx, wl_vk_buffer->dy,
2626 wl_vk_buffer->width, wl_vk_buffer->height);
2628 wl_surface_damage_buffer(wl_surface,
2630 wl_vk_buffer->width, wl_vk_buffer->height);
2634 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2636 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2637 wl_vk_buffer->rects[i * 4 + 3]);
2639 wl_surface_damage(wl_surface,
2640 wl_vk_buffer->rects[i * 4 + 0],
2642 wl_vk_buffer->rects[i * 4 + 2],
2643 wl_vk_buffer->rects[i * 4 + 3]);
2645 wl_surface_damage_buffer(wl_surface,
2646 wl_vk_buffer->rects[i * 4 + 0],
2648 wl_vk_buffer->rects[i * 4 + 2],
2649 wl_vk_buffer->rects[i * 4 + 3]);
2654 #if TIZEN_FEATURE_ENABLE
2655 if (wl_vk_display->use_explicit_sync &&
2656 wl_vk_surface->surface_sync &&
2657 wl_vk_buffer->acquire_fence_fd != -1) {
2659 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2660 wl_vk_buffer->acquire_fence_fd);
2661 TPL_LOG_D("[SET_ACQUIRE_FENCE][1/2]", "wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2662 wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2663 close(wl_vk_buffer->acquire_fence_fd);
2664 wl_vk_buffer->acquire_fence_fd = -1;
2666 wl_vk_buffer->buffer_release =
2667 zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2668 if (!wl_vk_buffer->buffer_release) {
2669 TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2671 zwp_linux_buffer_release_v1_add_listener(
2672 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2673 TPL_LOG_D("[SET_ACQUIRE_FENCE][2/2]", "add explicit_sync_release_listener.");
2678 wl_surface_commit(wl_surface);
2680 wl_display_flush(wl_vk_display->wl_display);
2682 TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2683 wl_vk_buffer->bo_name);
2685 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2687 wl_vk_buffer->need_to_commit = TPL_FALSE;
2688 wl_vk_buffer->status = COMMITTED;
2690 tpl_gcond_signal(&wl_vk_buffer->cond);
2692 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2695 "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2696 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2697 wl_vk_buffer->bo_name);
2699 if (wl_vk_display->use_wait_vblank &&
2700 _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2701 TPL_ERR("Failed to set wait vblank.");
2705 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2707 if (!native_dpy) return TPL_FALSE;
2709 if (_check_native_handle_is_wl_display(native_dpy))
2716 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2718 TPL_ASSERT(backend);
2720 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2721 backend->data = NULL;
2723 backend->init = __tpl_wl_vk_display_init;
2724 backend->fini = __tpl_wl_vk_display_fini;
2725 backend->query_config = __tpl_wl_vk_display_query_config;
2726 backend->filter_config = __tpl_wl_vk_display_filter_config;
2727 backend->query_window_supported_buffer_count =
2728 __tpl_wl_vk_display_query_window_supported_buffer_count;
2729 backend->query_window_supported_present_modes =
2730 __tpl_wl_vk_display_query_window_supported_present_modes;
2734 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2736 TPL_ASSERT(backend);
2738 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2739 backend->data = NULL;
2741 backend->init = __tpl_wl_vk_surface_init;
2742 backend->fini = __tpl_wl_vk_surface_fini;
2743 backend->validate = __tpl_wl_vk_surface_validate;
2744 backend->cancel_dequeued_buffer =
2745 __tpl_wl_vk_surface_cancel_buffer;
2746 backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2747 backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2748 backend->get_swapchain_buffers =
2749 __tpl_wl_vk_surface_get_swapchain_buffers;
2750 backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2751 backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2752 backend->set_post_interval =
2753 __tpl_wl_vk_surface_set_post_interval;
2757 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2759 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2763 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2767 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2768 TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2769 wl_vk_surface, wl_vk_surface->buffer_cnt);
2770 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2771 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2774 "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2775 idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2776 wl_vk_buffer->bo_name,
2777 status_to_string[wl_vk_buffer->status]);
2780 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);