1 #define inline __inline__
4 #include "tpl_internal.h"
9 #include <sys/eventfd.h>
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
20 #include <tdm_client.h>
22 #ifndef TIZEN_FEATURE_ENABLE
23 #define TIZEN_FEATURE_ENABLE 1
26 #if TIZEN_FEATURE_ENABLE
27 #include <tizen-surface-client-protocol.h>
28 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
31 #include "tpl_utils_gthread.h"
33 #define BUFFER_ARRAY_SIZE 10
34 #define VK_CLIENT_QUEUE_SIZE 3
36 static int wl_vk_buffer_key;
37 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
39 typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t;
40 typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t;
41 typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t;
42 typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t;
44 struct _tpl_wl_vk_display {
45 tpl_gsource *disp_source;
47 tpl_gmutex wl_event_mutex;
49 struct wl_display *wl_display;
50 struct wl_event_queue *ev_queue;
51 struct wayland_tbm_client *wl_tbm_client;
52 int last_error; /* errno of the last wl_display error*/
54 tpl_bool_t wl_initialized;
55 tpl_bool_t tdm_initialized;
57 tdm_client *tdm_client;
58 tpl_gsource *tdm_source;
61 tpl_bool_t use_wait_vblank;
62 tpl_bool_t use_explicit_sync;
65 /* device surface capabilities */
69 #if TIZEN_FEATURE_ENABLE
70 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
74 struct _tpl_wl_vk_swapchain {
75 tpl_wl_vk_surface_t *wl_vk_surface;
77 tbm_surface_queue_h tbm_queue;
87 tbm_surface_h *swapchain_buffers;
89 tpl_util_atomic_uint ref_cnt;
92 typedef enum surf_message {
100 struct _tpl_wl_vk_surface {
101 tpl_gsource *surf_source;
103 tpl_wl_vk_swapchain_t *swapchain;
105 struct wl_surface *wl_surface;
106 #if TIZEN_FEATURE_ENABLE
107 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
109 tdm_client_vblank *vblank;
111 /* surface information */
114 tpl_wl_vk_display_t *wl_vk_display;
115 tpl_surface_t *tpl_surface;
117 /* wl_vk_buffer array for buffer tracing */
118 tpl_wl_vk_buffer_t *buffers[BUFFER_ARRAY_SIZE];
119 int buffer_cnt; /* the number of using wl_vk_buffers */
120 tpl_gmutex buffers_mutex;
122 tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
124 tpl_gmutex surf_mutex;
127 /* for waiting draw done */
128 tpl_bool_t is_activated;
129 tpl_bool_t reset; /* TRUE if queue reseted by external */
130 tpl_bool_t vblank_done;
132 surf_message sent_message;
137 typedef enum buffer_status {
142 WAITING_SIGNALED, // 4
147 static const char *status_to_string[7] = {
152 "WAITING_SIGNALED", // 4
153 "WAITING_VBLANK", // 5
157 struct _tpl_wl_vk_buffer {
158 tbm_surface_h tbm_surface;
161 struct wl_buffer *wl_buffer;
162 int dx, dy; /* position to attach to wl_surface */
163 int width, height; /* size to attach to wl_surface */
165 buffer_status_t status; /* for tracing buffer status */
166 int idx; /* position index in buffers array of wl_vk_surface */
168 /* for damage region */
172 /* for checking need_to_commit (frontbuffer mode) */
173 tpl_bool_t need_to_commit;
175 #if TIZEN_FEATURE_ENABLE
176 /* to get release event via zwp_linux_buffer_release_v1 */
177 struct zwp_linux_buffer_release_v1 *buffer_release;
180 /* each buffers own its release_fence_fd, until it passes ownership
182 int32_t release_fence_fd;
184 /* each buffers own its acquire_fence_fd.
185 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
186 * will be passed to display server
187 * Otherwise it will be used as a fence waiting for render done
189 int32_t acquire_fence_fd;
194 tpl_wl_vk_surface_t *wl_vk_surface;
198 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
200 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
202 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
204 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
206 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
208 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
210 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
212 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
213 tpl_wl_vk_buffer_t *wl_vk_buffer);
216 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
218 struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
220 if (!wl_vk_native_dpy) {
221 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
225 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
226 is a memory address pointing the structure of wl_display_interface. */
227 if (wl_vk_native_dpy == &wl_display_interface)
230 if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
231 strlen(wl_display_interface.name)) == 0) {
239 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
241 tpl_wl_vk_display_t *wl_vk_display = NULL;
242 tdm_error tdm_err = TDM_ERROR_NONE;
246 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
247 if (!wl_vk_display) {
248 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
249 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
253 tdm_err = tdm_client_handle_events(wl_vk_display->tdm_client);
255 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
256 * When tdm_source is no longer available due to an unexpected situation,
257 * wl_vk_thread must remove it from the thread and destroy it.
258 * In that case, tdm_vblank can no longer be used for surfaces and displays
259 * that used this tdm_source. */
260 if (tdm_err != TDM_ERROR_NONE) {
261 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
263 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
265 tpl_gsource_destroy(gsource, TPL_FALSE);
267 wl_vk_display->tdm_source = NULL;
276 __thread_func_tdm_finalize(tpl_gsource *gsource)
278 tpl_wl_vk_display_t *wl_vk_display = NULL;
280 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
283 "tdm_destroy| wl_vk_display(%p) tdm_client(%p) tpl_gsource(%p)",
284 wl_vk_display, wl_vk_display->tdm_client, gsource);
286 if (wl_vk_display->tdm_client) {
287 tdm_client_destroy(wl_vk_display->tdm_client);
288 wl_vk_display->tdm_client = NULL;
289 wl_vk_display->tdm_display_fd = -1;
292 wl_vk_display->tdm_initialized = TPL_FALSE;
295 static tpl_gsource_functions tdm_funcs = {
298 .dispatch = __thread_func_tdm_dispatch,
299 .finalize = __thread_func_tdm_finalize,
303 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
305 tdm_client *tdm_client = NULL;
306 int tdm_display_fd = -1;
307 tdm_error tdm_err = TDM_ERROR_NONE;
309 tdm_client = tdm_client_create(&tdm_err);
310 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
311 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
312 return TPL_ERROR_INVALID_OPERATION;
315 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
316 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
317 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
318 tdm_client_destroy(tdm_client);
319 return TPL_ERROR_INVALID_OPERATION;
322 wl_vk_display->tdm_display_fd = tdm_display_fd;
323 wl_vk_display->tdm_client = tdm_client;
324 wl_vk_display->tdm_source = NULL;
325 wl_vk_display->tdm_initialized = TPL_TRUE;
327 TPL_INFO("[TDM_CLIENT_INIT]",
328 "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
329 wl_vk_display, tdm_client, tdm_display_fd);
331 return TPL_ERROR_NONE;
335 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
336 uint32_t name, const char *interface,
339 #if TIZEN_FEATURE_ENABLE
340 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
342 if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
343 char *env = tpl_getenv("TPL_EFS");
344 if (env && !atoi(env)) {
345 wl_vk_display->use_explicit_sync = TPL_FALSE;
347 wl_vk_display->explicit_sync =
348 wl_registry_bind(wl_registry, name,
349 &zwp_linux_explicit_synchronization_v1_interface, 1);
350 wl_vk_display->use_explicit_sync = TPL_TRUE;
351 TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
358 __cb_wl_resistry_global_remove_callback(void *data,
359 struct wl_registry *wl_registry,
364 static const struct wl_registry_listener registry_listener = {
365 __cb_wl_resistry_global_callback,
366 __cb_wl_resistry_global_remove_callback
370 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
371 const char *func_name)
375 strerror_r(errno, buf, sizeof(buf));
377 if (wl_vk_display->last_error == errno)
380 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
382 dpy_err = wl_display_get_error(wl_vk_display->wl_display);
383 if (dpy_err == EPROTO) {
384 const struct wl_interface *err_interface;
385 uint32_t err_proxy_id, err_code;
386 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
389 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
390 err_interface->name, err_code, err_proxy_id);
393 wl_vk_display->last_error = errno;
397 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
399 struct wl_registry *registry = NULL;
400 struct wl_event_queue *queue = NULL;
401 struct wl_display *display_wrapper = NULL;
402 struct wl_proxy *wl_tbm = NULL;
403 struct wayland_tbm_client *wl_tbm_client = NULL;
405 tpl_result_t result = TPL_ERROR_NONE;
407 queue = wl_display_create_queue(wl_vk_display->wl_display);
409 TPL_ERR("Failed to create wl_queue wl_display(%p)",
410 wl_vk_display->wl_display);
411 result = TPL_ERROR_INVALID_OPERATION;
415 wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
416 if (!wl_vk_display->ev_queue) {
417 TPL_ERR("Failed to create wl_queue wl_display(%p)",
418 wl_vk_display->wl_display);
419 result = TPL_ERROR_INVALID_OPERATION;
423 display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
424 if (!display_wrapper) {
425 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
426 wl_vk_display->wl_display);
427 result = TPL_ERROR_INVALID_OPERATION;
431 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
433 registry = wl_display_get_registry(display_wrapper);
435 TPL_ERR("Failed to create wl_registry");
436 result = TPL_ERROR_INVALID_OPERATION;
440 wl_proxy_wrapper_destroy(display_wrapper);
441 display_wrapper = NULL;
443 wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
444 if (!wl_tbm_client) {
445 TPL_ERR("Failed to initialize wl_tbm_client.");
446 result = TPL_ERROR_INVALID_CONNECTION;
450 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
452 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
453 result = TPL_ERROR_INVALID_CONNECTION;
457 wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
458 wl_vk_display->wl_tbm_client = wl_tbm_client;
460 if (wl_registry_add_listener(registry, ®istry_listener,
462 TPL_ERR("Failed to wl_registry_add_listener");
463 result = TPL_ERROR_INVALID_OPERATION;
467 ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
469 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
470 result = TPL_ERROR_INVALID_OPERATION;
474 #if TIZEN_FEATURE_ENABLE
475 if (wl_vk_display->explicit_sync) {
476 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
477 wl_vk_display->ev_queue);
478 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
479 wl_vk_display->explicit_sync);
483 wl_vk_display->wl_initialized = TPL_TRUE;
485 TPL_INFO("[WAYLAND_INIT]",
486 "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
487 wl_vk_display, wl_vk_display->wl_display,
488 wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
489 #if TIZEN_FEATURE_ENABLE
490 TPL_INFO("[WAYLAND_INIT]",
492 wl_vk_display->explicit_sync);
496 wl_proxy_wrapper_destroy(display_wrapper);
498 wl_registry_destroy(registry);
500 wl_event_queue_destroy(queue);
506 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
508 /* If wl_vk_display is in prepared state, cancel it */
509 if (wl_vk_display->prepared) {
510 wl_display_cancel_read(wl_vk_display->wl_display);
511 wl_vk_display->prepared = TPL_FALSE;
514 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
515 wl_vk_display->ev_queue) == -1) {
516 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
519 #if TIZEN_FEATURE_ENABLE
520 if (wl_vk_display->explicit_sync) {
521 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
522 "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
523 wl_vk_display, wl_vk_display->explicit_sync);
524 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
525 wl_vk_display->explicit_sync = NULL;
529 if (wl_vk_display->wl_tbm_client) {
530 struct wl_proxy *wl_tbm = NULL;
532 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
533 wl_vk_display->wl_tbm_client);
535 wl_proxy_set_queue(wl_tbm, NULL);
538 TPL_INFO("[WL_TBM_DEINIT]",
539 "wl_vk_display(%p) wl_tbm_client(%p)",
540 wl_vk_display, wl_vk_display->wl_tbm_client);
541 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
542 wl_vk_display->wl_tbm_client = NULL;
545 wl_event_queue_destroy(wl_vk_display->ev_queue);
547 wl_vk_display->wl_initialized = TPL_FALSE;
549 TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
550 wl_vk_display, wl_vk_display->wl_display);
554 _thread_init(void *data)
556 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
558 if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
559 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
560 wl_vk_display, wl_vk_display->wl_display);
563 if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
564 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
567 return wl_vk_display;
571 __thread_func_disp_prepare(tpl_gsource *gsource)
573 tpl_wl_vk_display_t *wl_vk_display =
574 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
576 /* If this wl_vk_display is already prepared,
577 * do nothing in this function. */
578 if (wl_vk_display->prepared)
581 /* If there is a last_error, there is no need to poll,
582 * so skip directly to dispatch.
583 * prepare -> dispatch */
584 if (wl_vk_display->last_error)
587 while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
588 wl_vk_display->ev_queue) != 0) {
589 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
590 wl_vk_display->ev_queue) == -1) {
591 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
595 wl_vk_display->prepared = TPL_TRUE;
597 wl_display_flush(wl_vk_display->wl_display);
603 __thread_func_disp_check(tpl_gsource *gsource)
605 tpl_wl_vk_display_t *wl_vk_display =
606 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
607 tpl_bool_t ret = TPL_FALSE;
609 if (!wl_vk_display->prepared)
612 /* If prepared, but last_error is set,
613 * cancel_read is executed and FALSE is returned.
614 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
615 * and skipping disp_check from prepare to disp_dispatch.
616 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
617 if (wl_vk_display->prepared && wl_vk_display->last_error) {
618 wl_display_cancel_read(wl_vk_display->wl_display);
622 if (tpl_gsource_check_io_condition(gsource)) {
623 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
624 _wl_display_print_err(wl_vk_display, "read_event");
627 wl_display_cancel_read(wl_vk_display->wl_display);
631 wl_vk_display->prepared = TPL_FALSE;
637 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
639 tpl_wl_vk_display_t *wl_vk_display =
640 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
644 /* If there is last_error, SOURCE_REMOVE should be returned
645 * to remove the gsource from the main loop.
646 * This is because wl_vk_display is not valid since last_error was set.*/
647 if (wl_vk_display->last_error) {
651 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
652 if (tpl_gsource_check_io_condition(gsource)) {
653 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
654 wl_vk_display->ev_queue) == -1) {
655 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
659 wl_display_flush(wl_vk_display->wl_display);
660 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
666 __thread_func_disp_finalize(tpl_gsource *gsource)
668 tpl_wl_vk_display_t *wl_vk_display =
669 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
671 if (wl_vk_display->wl_initialized)
672 _thread_wl_display_fini(wl_vk_display);
674 TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)",
675 wl_vk_display, gsource);
681 static tpl_gsource_functions disp_funcs = {
682 .prepare = __thread_func_disp_prepare,
683 .check = __thread_func_disp_check,
684 .dispatch = __thread_func_disp_dispatch,
685 .finalize = __thread_func_disp_finalize,
689 __tpl_wl_vk_display_init(tpl_display_t *display)
693 tpl_wl_vk_display_t *wl_vk_display = NULL;
695 /* Do not allow default display in wayland */
696 if (!display->native_handle) {
697 TPL_ERR("Invalid native handle for display.");
698 return TPL_ERROR_INVALID_PARAMETER;
701 if (!_check_native_handle_is_wl_display(display->native_handle)) {
702 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
703 return TPL_ERROR_INVALID_PARAMETER;
706 wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
707 sizeof(tpl_wl_vk_display_t));
708 if (!wl_vk_display) {
709 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
710 return TPL_ERROR_OUT_OF_MEMORY;
713 display->backend.data = wl_vk_display;
714 display->bufmgr_fd = -1;
716 wl_vk_display->tdm_initialized = TPL_FALSE;
717 wl_vk_display->wl_initialized = TPL_FALSE;
719 wl_vk_display->ev_queue = NULL;
720 wl_vk_display->wl_display = (struct wl_display *)display->native_handle;
721 wl_vk_display->last_error = 0;
722 wl_vk_display->use_explicit_sync = TPL_FALSE; // default disabled
723 wl_vk_display->prepared = TPL_FALSE;
725 /* Wayland Interfaces */
726 #if TIZEN_FEATURE_ENABLE
727 wl_vk_display->explicit_sync = NULL;
729 wl_vk_display->wl_tbm_client = NULL;
731 /* Vulkan specific surface capabilities */
732 wl_vk_display->min_buffer = 2;
733 wl_vk_display->max_buffer = VK_CLIENT_QUEUE_SIZE;
734 wl_vk_display->present_modes = TPL_DISPLAY_PRESENT_MODE_FIFO;
736 wl_vk_display->use_wait_vblank = TPL_TRUE; // default enabled
738 char *env = tpl_getenv("TPL_WAIT_VBLANK");
739 if (env && !atoi(env)) {
740 wl_vk_display->use_wait_vblank = TPL_FALSE;
744 tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
747 wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
748 (tpl_gthread_func)_thread_init,
749 (void *)wl_vk_display);
750 if (!wl_vk_display->thread) {
751 TPL_ERR("Failed to create wl_vk_thread");
755 wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
756 (void *)wl_vk_display,
757 wl_display_get_fd(wl_vk_display->wl_display),
758 &disp_funcs, SOURCE_TYPE_NORMAL);
759 if (!wl_vk_display->disp_source) {
760 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
761 display->native_handle,
762 wl_vk_display->thread);
766 wl_vk_display->tdm_source = tpl_gsource_create(wl_vk_display->thread,
767 (void *)wl_vk_display,
768 wl_vk_display->tdm_display_fd,
769 &tdm_funcs, SOURCE_TYPE_NORMAL);
770 if (!wl_vk_display->tdm_source) {
771 TPL_ERR("Failed to create tdm_gsource\n");
775 TPL_INFO("[DISPLAY_INIT]",
776 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
778 wl_vk_display->thread,
779 wl_vk_display->wl_display);
781 TPL_INFO("[DISPLAY_INIT]",
782 "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
783 wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
784 wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
786 return TPL_ERROR_NONE;
789 if (wl_vk_display->thread) {
790 if (wl_vk_display->tdm_source)
791 tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
792 if (wl_vk_display->disp_source)
793 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
795 tpl_gthread_destroy(wl_vk_display->thread);
798 wl_vk_display->thread = NULL;
801 display->backend.data = NULL;
802 return TPL_ERROR_INVALID_OPERATION;
806 __tpl_wl_vk_display_fini(tpl_display_t *display)
808 tpl_wl_vk_display_t *wl_vk_display;
812 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
814 TPL_INFO("[DISPLAY_FINI]",
815 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
817 wl_vk_display->thread,
818 wl_vk_display->wl_display);
820 if (wl_vk_display->tdm_source && wl_vk_display->tdm_initialized) {
821 tpl_gsource_destroy(wl_vk_display->tdm_source, TPL_TRUE);
822 wl_vk_display->tdm_source = NULL;
825 if (wl_vk_display->disp_source) {
826 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
827 wl_vk_display->disp_source = NULL;
830 if (wl_vk_display->thread) {
831 tpl_gthread_destroy(wl_vk_display->thread);
832 wl_vk_display->thread = NULL;
835 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
840 display->backend.data = NULL;
844 __tpl_wl_vk_display_query_config(tpl_display_t *display,
845 tpl_surface_type_t surface_type,
846 int red_size, int green_size,
847 int blue_size, int alpha_size,
848 int color_depth, int *native_visual_id,
853 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
854 green_size == 8 && blue_size == 8 &&
855 (color_depth == 32 || color_depth == 24)) {
857 if (alpha_size == 8) {
858 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
859 if (is_slow) *is_slow = TPL_FALSE;
860 return TPL_ERROR_NONE;
862 if (alpha_size == 0) {
863 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
864 if (is_slow) *is_slow = TPL_FALSE;
865 return TPL_ERROR_NONE;
869 return TPL_ERROR_INVALID_PARAMETER;
873 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
878 TPL_IGNORE(visual_id);
879 TPL_IGNORE(alpha_size);
880 return TPL_ERROR_NONE;
884 __tpl_wl_vk_display_query_window_supported_buffer_count(
885 tpl_display_t *display,
886 tpl_handle_t window, int *min, int *max)
888 tpl_wl_vk_display_t *wl_vk_display = NULL;
893 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
894 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
896 if (min) *min = wl_vk_display->min_buffer;
897 if (max) *max = wl_vk_display->max_buffer;
899 return TPL_ERROR_NONE;
903 __tpl_wl_vk_display_query_window_supported_present_modes(
904 tpl_display_t *display,
905 tpl_handle_t window, int *present_modes)
907 tpl_wl_vk_display_t *wl_vk_display = NULL;
912 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
913 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
916 *present_modes = wl_vk_display->present_modes;
919 return TPL_ERROR_NONE;
923 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
925 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
926 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
927 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
928 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
929 tpl_bool_t need_to_release = TPL_FALSE;
930 tpl_bool_t need_to_cancel = TPL_FALSE;
931 buffer_status_t status = RELEASED;
934 while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
935 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
936 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
937 wl_vk_buffer = wl_vk_surface->buffers[idx];
940 wl_vk_surface->buffers[idx] = NULL;
941 wl_vk_surface->buffer_cnt--;
943 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
944 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
949 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
951 tpl_gmutex_lock(&wl_vk_buffer->mutex);
953 status = wl_vk_buffer->status;
955 TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
957 wl_vk_buffer->tbm_surface,
958 status_to_string[status]);
960 if (status >= ENQUEUED) {
961 tpl_bool_t need_to_wait = TPL_FALSE;
962 tpl_result_t wait_result = TPL_ERROR_NONE;
964 if (!wl_vk_display->use_explicit_sync &&
965 status < WAITING_VBLANK)
966 need_to_wait = TPL_TRUE;
968 if (wl_vk_display->use_explicit_sync &&
970 need_to_wait = TPL_TRUE;
973 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
974 wait_result = tpl_cond_timed_wait(&wl_vk_buffer->cond,
975 &wl_vk_buffer->mutex,
977 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
979 status = wl_vk_buffer->status;
981 if (wait_result == TPL_ERROR_TIME_OUT)
982 TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
987 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
988 /* It has been acquired but has not yet been released, so this
989 * buffer must be released. */
990 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
992 /* After dequeue, it has not been enqueued yet
993 * so cancel_dequeue must be performed. */
994 need_to_cancel = (status == DEQUEUED);
996 if (swapchain && swapchain->tbm_queue) {
997 if (need_to_release) {
998 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
999 wl_vk_buffer->tbm_surface);
1000 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1001 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1002 wl_vk_buffer->tbm_surface, tsq_err);
1005 if (need_to_cancel) {
1006 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
1007 wl_vk_buffer->tbm_surface);
1008 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1009 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1010 wl_vk_buffer->tbm_surface, tsq_err);
1014 wl_vk_buffer->status = RELEASED;
1016 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1018 if (need_to_release || need_to_cancel)
1019 tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
1021 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1027 static tdm_client_vblank*
1028 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1030 tdm_client_vblank *vblank = NULL;
1031 tdm_client_output *tdm_output = NULL;
1032 tdm_error tdm_err = TDM_ERROR_NONE;
1035 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1039 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1040 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1041 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1045 vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1046 if (!vblank || tdm_err != TDM_ERROR_NONE) {
1047 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1051 tdm_client_vblank_set_enable_fake(vblank, 1);
1052 tdm_client_vblank_set_sync(vblank, 0);
1058 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1060 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1062 /* tbm_surface_queue will be created at swapchain_create */
1064 wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1065 wl_vk_display->tdm_client);
1066 if (wl_vk_surface->vblank) {
1067 TPL_INFO("[VBLANK_INIT]",
1068 "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1069 wl_vk_surface, wl_vk_display->tdm_client,
1070 wl_vk_surface->vblank);
1073 #if TIZEN_FEATURE_ENABLE
1074 if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1075 wl_vk_surface->surface_sync =
1076 zwp_linux_explicit_synchronization_v1_get_synchronization(
1077 wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1078 if (wl_vk_surface->surface_sync) {
1079 TPL_INFO("[EXPLICIT_SYNC_INIT]",
1080 "wl_vk_surface(%p) surface_sync(%p)",
1081 wl_vk_surface, wl_vk_surface->surface_sync);
1083 TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1085 wl_vk_display->use_explicit_sync = TPL_FALSE;
1089 wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1093 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1095 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1097 TPL_INFO("[SURFACE_FINI]",
1098 "wl_vk_surface(%p) wl_surface(%p)",
1099 wl_vk_surface, wl_vk_surface->wl_surface);
1101 if (wl_vk_surface->vblank_waiting_buffers) {
1102 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1103 wl_vk_surface->vblank_waiting_buffers = NULL;
1106 #if TIZEN_FEATURE_ENABLE
1107 if (wl_vk_surface->surface_sync) {
1108 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1109 "wl_vk_surface(%p) surface_sync(%p)",
1110 wl_vk_surface, wl_vk_surface->surface_sync);
1111 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1112 wl_vk_surface->surface_sync = NULL;
1116 if (wl_vk_surface->vblank) {
1117 TPL_INFO("[VBLANK_DESTROY]",
1118 "wl_vk_surface(%p) vblank(%p)",
1119 wl_vk_surface, wl_vk_surface->vblank);
1120 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1121 wl_vk_surface->vblank = NULL;
1124 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1128 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1130 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1132 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1134 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1135 if (message == INIT_SURFACE) { /* Initialize surface */
1136 TPL_DEBUG("wl_vk_surface(%p) initialize message received!",
1138 _thread_wl_vk_surface_init(wl_vk_surface);
1139 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1140 } else if (message == CREATE_QUEUE) { /* Create tbm_surface_queue */
1141 TPL_DEBUG("wl_vk_surface(%p) queue creation message received!",
1143 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1144 != TPL_ERROR_NONE) {
1145 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1148 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1149 } else if (message == DESTROY_QUEUE) { /* swapchain destroy */
1150 TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!",
1152 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1153 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1154 } else if (message == ACQUIRABLE) { /* Acquirable message */
1155 TPL_DEBUG("wl_vk_surface(%p) acquirable message received!",
1157 if (_thread_surface_queue_acquire(wl_vk_surface)
1158 != TPL_ERROR_NONE) {
1159 TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1164 /* init to NONE_MESSAGE */
1165 wl_vk_surface->sent_message = NONE_MESSAGE;
1167 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1173 __thread_func_surf_finalize(tpl_gsource *gsource)
1175 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1177 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1178 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1180 _thread_wl_vk_surface_fini(wl_vk_surface);
1182 TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)",
1183 wl_vk_surface, gsource);
1186 static tpl_gsource_functions surf_funcs = {
1189 .dispatch = __thread_func_surf_dispatch,
1190 .finalize = __thread_func_surf_finalize,
1195 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1197 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1198 tpl_wl_vk_display_t *wl_vk_display = NULL;
1199 tpl_gsource *surf_source = NULL;
1201 TPL_ASSERT(surface);
1202 TPL_ASSERT(surface->display);
1203 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1204 TPL_ASSERT(surface->native_handle);
1206 wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1207 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1209 wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1210 sizeof(tpl_wl_vk_surface_t));
1211 if (!wl_vk_surface) {
1212 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1213 return TPL_ERROR_OUT_OF_MEMORY;
1216 surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1217 -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1219 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1221 free(wl_vk_surface);
1222 surface->backend.data = NULL;
1223 return TPL_ERROR_INVALID_OPERATION;
1226 surface->backend.data = (void *)wl_vk_surface;
1227 surface->width = -1;
1228 surface->height = -1;
1230 wl_vk_surface->surf_source = surf_source;
1231 wl_vk_surface->swapchain = NULL;
1233 wl_vk_surface->wl_vk_display = wl_vk_display;
1234 wl_vk_surface->wl_surface = (struct wl_surface *)surface->native_handle;
1236 wl_vk_surface->reset = TPL_FALSE;
1237 wl_vk_surface->is_activated = TPL_FALSE;
1238 wl_vk_surface->vblank_done = TPL_TRUE;
1240 wl_vk_surface->render_done_cnt = 0;
1242 wl_vk_surface->vblank = NULL;
1243 #if TIZEN_FEATURE_ENABLE
1244 wl_vk_surface->surface_sync = NULL;
1247 wl_vk_surface->sent_message = NONE_MESSAGE;
1249 wl_vk_surface->post_interval = surface->post_interval;
1253 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1254 wl_vk_surface->buffers[i] = NULL;
1255 wl_vk_surface->buffer_cnt = 0;
1258 tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1259 tpl_gcond_init(&wl_vk_surface->surf_cond);
1261 tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1263 /* Initialize in thread */
1264 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1265 wl_vk_surface->sent_message = INIT_SURFACE;
1266 tpl_gsource_send_message(wl_vk_surface->surf_source,
1267 wl_vk_surface->sent_message);
1268 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1269 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1271 TPL_INFO("[SURFACE_INIT]",
1272 "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1273 surface, wl_vk_surface, wl_vk_surface->surf_source);
1275 return TPL_ERROR_NONE;
1279 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1281 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1282 tpl_wl_vk_display_t *wl_vk_display = NULL;
1284 TPL_ASSERT(surface);
1285 TPL_ASSERT(surface->display);
1287 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1288 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1290 wl_vk_display = (tpl_wl_vk_display_t *)
1291 surface->display->backend.data;
1292 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1294 TPL_INFO("[SURFACE_FINI][BEGIN]",
1295 "wl_vk_surface(%p) wl_surface(%p)",
1296 wl_vk_surface, wl_vk_surface->wl_surface);
1298 if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1299 /* finalize swapchain */
1303 wl_vk_surface->swapchain = NULL;
1305 if (wl_vk_surface->surf_source)
1306 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1307 wl_vk_surface->surf_source = NULL;
1309 _print_buffer_lists(wl_vk_surface);
1311 wl_vk_surface->wl_surface = NULL;
1312 wl_vk_surface->wl_vk_display = NULL;
1313 wl_vk_surface->tpl_surface = NULL;
1315 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1316 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1317 tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1318 tpl_gcond_clear(&wl_vk_surface->surf_cond);
1320 TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1322 free(wl_vk_surface);
1323 surface->backend.data = NULL;
1327 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1330 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1332 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1334 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1336 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1338 TPL_INFO("[SET_POST_INTERVAL]",
1339 "wl_vk_surface(%p) post_interval(%d -> %d)",
1340 wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1342 wl_vk_surface->post_interval = post_interval;
1344 return TPL_ERROR_NONE;
1348 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1350 TPL_ASSERT(surface);
1351 TPL_ASSERT(surface->backend.data);
1353 tpl_wl_vk_surface_t *wl_vk_surface =
1354 (tpl_wl_vk_surface_t *)surface->backend.data;
1356 return !(wl_vk_surface->reset);
1360 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1363 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1364 tpl_wl_vk_display_t *wl_vk_display = NULL;
1365 tpl_wl_vk_swapchain_t *swapchain = NULL;
1366 tpl_surface_t *surface = NULL;
1367 tpl_bool_t is_activated = TPL_FALSE;
1370 wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1371 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1373 wl_vk_display = wl_vk_surface->wl_vk_display;
1374 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1376 surface = wl_vk_surface->tpl_surface;
1377 TPL_CHECK_ON_NULL_RETURN(surface);
1379 swapchain = wl_vk_surface->swapchain;
1380 TPL_CHECK_ON_NULL_RETURN(swapchain);
1382 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1383 * the changed window size at the next frame. */
1384 width = tbm_surface_queue_get_width(tbm_queue);
1385 height = tbm_surface_queue_get_height(tbm_queue);
1386 if (surface->width != width || surface->height != height) {
1387 TPL_INFO("[QUEUE_RESIZE]",
1388 "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1389 wl_vk_surface, tbm_queue,
1390 surface->width, surface->height, width, height);
1393 /* When queue_reset_callback is called, if is_activated is different from
1394 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1395 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1396 is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1397 swapchain->tbm_queue);
1398 if (wl_vk_surface->is_activated != is_activated) {
1400 TPL_INFO("[ACTIVATED]",
1401 "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1402 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1404 TPL_LOG_T("[DEACTIVATED]",
1405 " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1406 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1410 wl_vk_surface->reset = TPL_TRUE;
1412 if (surface->reset_cb)
1413 surface->reset_cb(surface->reset_data);
1417 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1420 TPL_IGNORE(tbm_queue);
1422 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1423 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1425 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1426 if (wl_vk_surface->sent_message == NONE_MESSAGE) {
1427 wl_vk_surface->sent_message = ACQUIRABLE;
1428 tpl_gsource_send_message(wl_vk_surface->surf_source,
1429 wl_vk_surface->sent_message);
1431 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1435 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1437 TPL_ASSERT (wl_vk_surface);
1439 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1440 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1441 tbm_surface_queue_h tbm_queue = NULL;
1442 tbm_bufmgr bufmgr = NULL;
1443 unsigned int capability;
1445 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1446 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1448 if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1449 TPL_ERR("buffer count(%d) must be higher than (%d)",
1450 swapchain->properties.buffer_count,
1451 wl_vk_display->min_buffer);
1452 return TPL_ERROR_INVALID_PARAMETER;
1455 if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1456 TPL_ERR("buffer count(%d) must be lower than (%d)",
1457 swapchain->properties.buffer_count,
1458 wl_vk_display->max_buffer);
1459 return TPL_ERROR_INVALID_PARAMETER;
1462 if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1463 TPL_ERR("Unsupported present_mode(%d)",
1464 swapchain->properties.present_mode);
1465 return TPL_ERROR_INVALID_PARAMETER;
1468 if (swapchain->tbm_queue) {
1469 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1470 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1472 if (swapchain->swapchain_buffers) {
1474 for (i = 0; i < swapchain->properties.buffer_count; i++) {
1475 if (swapchain->swapchain_buffers[i]) {
1476 TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]);
1477 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1478 swapchain->swapchain_buffers[i] = NULL;
1482 free(swapchain->swapchain_buffers);
1483 swapchain->swapchain_buffers = NULL;
1486 if (old_width != swapchain->properties.width ||
1487 old_height != swapchain->properties.height) {
1488 tbm_surface_queue_reset(swapchain->tbm_queue,
1489 swapchain->properties.width,
1490 swapchain->properties.height,
1491 swapchain->properties.format);
1492 TPL_INFO("[RESIZE]",
1493 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1494 wl_vk_surface, swapchain, swapchain->tbm_queue,
1495 old_width, old_height,
1496 swapchain->properties.width,
1497 swapchain->properties.height);
1500 swapchain->properties.buffer_count =
1501 tbm_surface_queue_get_size(swapchain->tbm_queue);
1503 wl_vk_surface->reset = TPL_FALSE;
1505 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1507 TPL_INFO("[SWAPCHAIN_REUSE]",
1508 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1509 wl_vk_surface, swapchain, swapchain->tbm_queue,
1510 swapchain->properties.buffer_count);
1512 return TPL_ERROR_NONE;
1515 bufmgr = tbm_bufmgr_init(-1);
1516 capability = tbm_bufmgr_get_capability(bufmgr);
1517 tbm_bufmgr_deinit(bufmgr);
1519 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1520 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1521 wl_vk_display->wl_tbm_client,
1522 wl_vk_surface->wl_surface,
1523 swapchain->properties.buffer_count,
1524 swapchain->properties.width,
1525 swapchain->properties.height,
1526 TBM_FORMAT_ARGB8888);
1528 tbm_queue = wayland_tbm_client_create_surface_queue(
1529 wl_vk_display->wl_tbm_client,
1530 wl_vk_surface->wl_surface,
1531 swapchain->properties.buffer_count,
1532 swapchain->properties.width,
1533 swapchain->properties.height,
1534 TBM_FORMAT_ARGB8888);
1538 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1540 return TPL_ERROR_OUT_OF_MEMORY;
1543 if (tbm_surface_queue_set_modes(
1544 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1545 TBM_SURFACE_QUEUE_ERROR_NONE) {
1546 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1548 tbm_surface_queue_destroy(tbm_queue);
1549 return TPL_ERROR_INVALID_OPERATION;
1552 if (tbm_surface_queue_add_reset_cb(
1554 __cb_tbm_queue_reset_callback,
1555 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1556 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1558 tbm_surface_queue_destroy(tbm_queue);
1559 return TPL_ERROR_INVALID_OPERATION;
1562 if (tbm_surface_queue_add_acquirable_cb(
1564 __cb_tbm_queue_acquirable_callback,
1565 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1566 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1568 tbm_surface_queue_destroy(tbm_queue);
1569 return TPL_ERROR_INVALID_OPERATION;
1572 swapchain->tbm_queue = tbm_queue;
1574 TPL_INFO("[TBM_QUEUE_CREATED]",
1575 "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)",
1576 wl_vk_surface, swapchain, tbm_queue);
1578 return TPL_ERROR_NONE;
1582 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1583 tbm_format format, int width,
1584 int height, int buffer_count, int present_mode)
1586 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1587 tpl_wl_vk_display_t *wl_vk_display = NULL;
1588 tpl_wl_vk_swapchain_t *swapchain = NULL;
1590 TPL_ASSERT(surface);
1591 TPL_ASSERT(surface->display);
1593 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1594 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1596 wl_vk_display = (tpl_wl_vk_display_t *)
1597 surface->display->backend.data;
1598 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1600 swapchain = wl_vk_surface->swapchain;
1602 if (swapchain == NULL) {
1604 (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1605 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1606 swapchain->tbm_queue = NULL;
1609 swapchain->properties.buffer_count = buffer_count;
1610 swapchain->properties.width = width;
1611 swapchain->properties.height = height;
1612 swapchain->properties.present_mode = present_mode;
1613 swapchain->wl_vk_surface = wl_vk_surface;
1615 wl_vk_surface->swapchain = swapchain;
1617 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1618 /* send swapchain create tbm_queue message */
1619 wl_vk_surface->sent_message = CREATE_QUEUE;
1620 tpl_gsource_send_message(wl_vk_surface->surf_source,
1621 wl_vk_surface->sent_message);
1622 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1623 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1625 TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1626 swapchain->tbm_queue != NULL,
1627 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1629 wl_vk_surface->reset = TPL_FALSE;
1631 __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1633 return TPL_ERROR_NONE;
1637 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1639 TPL_ASSERT(wl_vk_surface);
1641 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1643 TPL_CHECK_ON_NULL_RETURN(swapchain);
1645 if (swapchain->tbm_queue) {
1646 TPL_INFO("[TBM_QUEUE_DESTROY]",
1647 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1648 wl_vk_surface, swapchain, swapchain->tbm_queue);
1649 tbm_surface_queue_destroy(swapchain->tbm_queue);
1650 swapchain->tbm_queue = NULL;
1655 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1657 tpl_wl_vk_swapchain_t *swapchain = NULL;
1658 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1659 tpl_wl_vk_display_t *wl_vk_display = NULL;
1661 TPL_ASSERT(surface);
1662 TPL_ASSERT(surface->display);
1664 wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1665 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1667 wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1668 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1670 swapchain = wl_vk_surface->swapchain;
1672 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1674 return TPL_ERROR_INVALID_OPERATION;
1677 if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1678 TPL_INFO("[DESTROY_SWAPCHAIN]",
1679 "wl_vk_surface(%p) swapchain(%p) still valid.",
1680 wl_vk_surface, swapchain);
1681 return TPL_ERROR_NONE;
1684 TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1685 "wl_vk_surface(%p) swapchain(%p)",
1686 wl_vk_surface, wl_vk_surface->swapchain);
1688 if (swapchain->swapchain_buffers) {
1689 for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1690 if (swapchain->swapchain_buffers[i]) {
1691 TPL_DEBUG("Stop tracking tbm_surface(%p)",
1692 swapchain->swapchain_buffers[i]);
1693 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1694 swapchain->swapchain_buffers[i] = NULL;
1698 free(swapchain->swapchain_buffers);
1699 swapchain->swapchain_buffers = NULL;
1702 _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1704 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1705 wl_vk_surface->sent_message = DESTROY_QUEUE;
1706 tpl_gsource_send_message(wl_vk_surface->surf_source,
1707 wl_vk_surface->sent_message);
1708 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1709 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1711 _print_buffer_lists(wl_vk_surface);
1714 wl_vk_surface->swapchain = NULL;
1716 return TPL_ERROR_NONE;
1720 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1721 tbm_surface_h **buffers,
1724 TPL_ASSERT(surface);
1725 TPL_ASSERT(surface->backend.data);
1726 TPL_ASSERT(surface->display);
1727 TPL_ASSERT(surface->display->backend.data);
1729 tpl_wl_vk_surface_t *wl_vk_surface =
1730 (tpl_wl_vk_surface_t *)surface->backend.data;
1731 tpl_wl_vk_display_t *wl_vk_display =
1732 (tpl_wl_vk_display_t *)surface->display->backend.data;
1733 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1734 tpl_result_t ret = TPL_ERROR_NONE;
1737 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1738 TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1740 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1743 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1744 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1745 return TPL_ERROR_NONE;
1748 swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1750 sizeof(tbm_surface_h));
1751 if (!swapchain->swapchain_buffers) {
1752 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1754 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1755 return TPL_ERROR_OUT_OF_MEMORY;
1758 ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1759 swapchain->tbm_queue,
1760 swapchain->swapchain_buffers,
1763 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1764 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1765 free(swapchain->swapchain_buffers);
1766 swapchain->swapchain_buffers = NULL;
1767 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1768 return TPL_ERROR_INVALID_OPERATION;
1771 for (i = 0; i < *buffer_count; i++) {
1772 if (swapchain->swapchain_buffers[i]) {
1773 TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)",
1774 i, swapchain->swapchain_buffers[i],
1775 _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1776 tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1780 *buffers = swapchain->swapchain_buffers;
1782 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1784 return TPL_ERROR_NONE;
1788 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1790 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1791 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1793 TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1794 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1796 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1797 if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1798 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1799 wl_vk_surface->buffer_cnt--;
1801 wl_vk_buffer->idx = -1;
1803 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1805 wl_display_flush(wl_vk_display->wl_display);
1807 if (wl_vk_buffer->wl_buffer) {
1808 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1809 wl_vk_buffer->wl_buffer);
1810 wl_vk_buffer->wl_buffer = NULL;
1813 #if TIZEN_FEATURE_ENABLE
1814 if (wl_vk_buffer->buffer_release) {
1815 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1816 wl_vk_buffer->buffer_release = NULL;
1820 if (wl_vk_buffer->release_fence_fd != -1) {
1821 close(wl_vk_buffer->release_fence_fd);
1822 wl_vk_buffer->release_fence_fd = -1;
1825 if (wl_vk_buffer->rects) {
1826 free(wl_vk_buffer->rects);
1827 wl_vk_buffer->rects = NULL;
1828 wl_vk_buffer->num_rects = 0;
1831 wl_vk_buffer->tbm_surface = NULL;
1832 wl_vk_buffer->bo_name = -1;
1837 static tpl_wl_vk_buffer_t *
1838 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1840 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1841 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1842 (void **)&wl_vk_buffer);
1843 return wl_vk_buffer;
1846 static tpl_wl_vk_buffer_t *
1847 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
1848 tbm_surface_h tbm_surface)
1850 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1852 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
1854 if (!wl_vk_buffer) {
1855 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
1856 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
1858 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1859 (tbm_data_free)__cb_wl_vk_buffer_free);
1860 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1863 wl_vk_buffer->wl_buffer = NULL;
1864 wl_vk_buffer->tbm_surface = tbm_surface;
1865 wl_vk_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
1866 wl_vk_buffer->wl_vk_surface = wl_vk_surface;
1868 wl_vk_buffer->status = RELEASED;
1870 wl_vk_buffer->acquire_fence_fd = -1;
1871 wl_vk_buffer->release_fence_fd = -1;
1873 wl_vk_buffer->dx = 0;
1874 wl_vk_buffer->dy = 0;
1875 wl_vk_buffer->width = tbm_surface_get_width(tbm_surface);
1876 wl_vk_buffer->height = tbm_surface_get_height(tbm_surface);
1878 wl_vk_buffer->rects = NULL;
1879 wl_vk_buffer->num_rects = 0;
1881 wl_vk_buffer->need_to_commit = TPL_FALSE;
1882 #if TIZEN_FEATURE_ENABLE
1883 wl_vk_buffer->buffer_release = NULL;
1885 tpl_gmutex_init(&wl_vk_buffer->mutex);
1886 tpl_gcond_init(&wl_vk_buffer->cond);
1888 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1891 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1892 if (wl_vk_surface->buffers[i] == NULL) break;
1894 /* If this exception is reached,
1895 * it may be a critical memory leak problem. */
1896 if (i == BUFFER_ARRAY_SIZE) {
1897 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
1898 int evicted_idx = 0; /* evict the frontmost buffer */
1900 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
1902 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
1904 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
1905 evicted_buffer, evicted_buffer->tbm_surface,
1906 status_to_string[evicted_buffer->status]);
1908 /* [TODO] need to think about whether there will be
1909 * better modifications */
1910 wl_vk_surface->buffer_cnt--;
1911 wl_vk_surface->buffers[evicted_idx] = NULL;
1916 wl_vk_surface->buffer_cnt++;
1917 wl_vk_surface->buffers[i] = wl_vk_buffer;
1918 wl_vk_buffer->idx = i;
1920 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1922 TPL_INFO("[WL_VK_BUFFER_CREATE]",
1923 "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
1924 wl_vk_surface, wl_vk_buffer, tbm_surface,
1925 wl_vk_buffer->bo_name);
1928 return wl_vk_buffer;
1931 static tbm_surface_h
1932 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
1933 uint64_t timeout_ns,
1934 int32_t *release_fence)
1936 TPL_ASSERT(surface);
1937 TPL_ASSERT(surface->backend.data);
1938 TPL_ASSERT(surface->display);
1939 TPL_ASSERT(surface->display->backend.data);
1940 TPL_OBJECT_CHECK_RETURN(surface, NULL);
1942 tpl_wl_vk_surface_t *wl_vk_surface =
1943 (tpl_wl_vk_surface_t *)surface->backend.data;
1944 tpl_wl_vk_display_t *wl_vk_display =
1945 (tpl_wl_vk_display_t *)surface->display->backend.data;
1946 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1947 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1949 tbm_surface_h tbm_surface = NULL;
1950 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1952 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
1953 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
1955 TPL_OBJECT_UNLOCK(surface);
1956 TRACE_BEGIN("WAIT_DEQUEUEABLE");
1957 if (timeout_ns != UINT64_MAX) {
1958 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
1959 swapchain->tbm_queue, timeout_ns/1000);
1961 tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
1964 TPL_OBJECT_LOCK(surface);
1966 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
1967 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
1970 } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1971 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
1972 wl_vk_surface, swapchain->tbm_queue, tsq_err);
1976 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1978 if (wl_vk_surface->reset) {
1979 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
1980 swapchain, swapchain->tbm_queue);
1981 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1985 tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
1988 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
1989 swapchain->tbm_queue, wl_vk_surface, tsq_err);
1990 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1994 tbm_surface_internal_ref(tbm_surface);
1996 wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
1997 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
1999 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2000 wl_vk_buffer->status = DEQUEUED;
2002 if (release_fence) {
2003 #if TIZEN_FEATURE_ENABLE
2004 if (wl_vk_surface->surface_sync) {
2005 *release_fence = wl_vk_buffer->release_fence_fd;
2006 TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
2007 wl_vk_surface, wl_vk_buffer, *release_fence);
2008 wl_vk_buffer->release_fence_fd = -1;
2012 *release_fence = -1;
2016 wl_vk_surface->reset = TPL_FALSE;
2018 TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2019 wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
2020 release_fence ? *release_fence : -1);
2022 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2023 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2029 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
2030 tbm_surface_h tbm_surface)
2032 TPL_ASSERT(surface);
2033 TPL_ASSERT(surface->backend.data);
2035 tpl_wl_vk_surface_t *wl_vk_surface =
2036 (tpl_wl_vk_surface_t *)surface->backend.data;
2037 tpl_wl_vk_swapchain_t *swapchain = NULL;
2038 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2039 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2041 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2042 TPL_ERROR_INVALID_PARAMETER);
2044 swapchain = wl_vk_surface->swapchain;
2045 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2046 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2047 TPL_ERROR_INVALID_PARAMETER);
2049 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2051 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2052 wl_vk_buffer->status = RELEASED;
2053 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2056 tbm_surface_internal_unref(tbm_surface);
2058 TPL_INFO("[CANCEL BUFFER]",
2059 "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2060 wl_vk_surface, swapchain, tbm_surface,
2061 _get_tbm_surface_bo_name(tbm_surface));
2063 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2065 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2066 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2067 return TPL_ERROR_INVALID_OPERATION;
2070 return TPL_ERROR_NONE;
2074 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2075 tbm_surface_h tbm_surface,
2076 int num_rects, const int *rects,
2077 int32_t acquire_fence)
2079 TPL_ASSERT(surface);
2080 TPL_ASSERT(surface->display);
2081 TPL_ASSERT(surface->backend.data);
2082 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2084 tpl_wl_vk_surface_t *wl_vk_surface =
2085 (tpl_wl_vk_surface_t *) surface->backend.data;
2086 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2087 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2088 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2091 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2092 TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2093 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2094 TPL_ERROR_INVALID_PARAMETER);
2096 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2097 if (!wl_vk_buffer) {
2098 TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
2099 return TPL_ERROR_INVALID_PARAMETER;
2102 bo_name = wl_vk_buffer->bo_name;
2104 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2106 /* If there are received region information, save it to wl_vk_buffer */
2107 if (num_rects && rects) {
2108 if (wl_vk_buffer->rects != NULL) {
2109 free(wl_vk_buffer->rects);
2110 wl_vk_buffer->rects = NULL;
2111 wl_vk_buffer->num_rects = 0;
2114 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2115 wl_vk_buffer->num_rects = num_rects;
2117 if (wl_vk_buffer->rects) {
2118 memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2119 sizeof(int) * 4 * num_rects);
2121 TPL_ERR("Failed to allocate memory for rects info.");
2125 if (wl_vk_buffer->acquire_fence_fd != -1)
2126 close(wl_vk_buffer->acquire_fence_fd);
2128 wl_vk_buffer->acquire_fence_fd = acquire_fence;
2130 wl_vk_buffer->status = ENQUEUED;
2132 "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2133 wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2135 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2137 tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2139 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2140 tbm_surface_internal_unref(tbm_surface);
2141 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2142 tbm_surface, wl_vk_surface, tsq_err);
2143 return TPL_ERROR_INVALID_OPERATION;
2146 tbm_surface_internal_unref(tbm_surface);
2148 return TPL_ERROR_NONE;
2151 static const struct wl_buffer_listener wl_buffer_release_listener = {
2152 (void *)__cb_wl_buffer_release,
2156 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2158 tbm_surface_h tbm_surface = NULL;
2159 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2160 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2161 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2162 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2163 tpl_bool_t ready_to_commit = TPL_TRUE;
2165 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2167 while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2168 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2170 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2171 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2172 swapchain->tbm_queue);
2173 return TPL_ERROR_INVALID_OPERATION;
2176 tbm_surface_internal_ref(tbm_surface);
2178 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2179 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2180 "wl_vk_buffer sould be not NULL");
2182 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2184 wl_vk_buffer->status = ACQUIRED;
2186 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2187 wl_vk_buffer, tbm_surface,
2188 _get_tbm_surface_bo_name(tbm_surface));
2190 if (wl_vk_buffer->wl_buffer == NULL) {
2191 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2192 wl_vk_display->wl_tbm_client, tbm_surface);
2194 if (!wl_vk_buffer->wl_buffer) {
2195 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2196 wl_vk_display->wl_tbm_client, tbm_surface);
2198 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2199 wl_vk_display->use_explicit_sync == TPL_FALSE) {
2200 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2201 &wl_buffer_release_listener, wl_vk_buffer);
2205 "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2206 wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2210 if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done)
2211 ready_to_commit = TPL_TRUE;
2213 wl_vk_buffer->status = WAITING_VBLANK;
2214 __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2215 ready_to_commit = TPL_FALSE;
2218 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2220 if (ready_to_commit)
2221 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2224 return TPL_ERROR_NONE;
2227 #if TIZEN_FEATURE_ENABLE
2229 __cb_buffer_fenced_release(void *data,
2230 struct zwp_linux_buffer_release_v1 *release,
2233 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2234 tbm_surface_h tbm_surface = NULL;
2236 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2238 tbm_surface = wl_vk_buffer->tbm_surface;
2240 if (tbm_surface_internal_is_valid(tbm_surface)) {
2241 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2242 tpl_wl_vk_swapchain_t *swapchain = NULL;
2244 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2245 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2246 tbm_surface_internal_unref(tbm_surface);
2250 swapchain = wl_vk_surface->swapchain;
2252 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2253 if (wl_vk_buffer->status == COMMITTED) {
2254 tbm_surface_queue_error_e tsq_err;
2256 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2257 wl_vk_buffer->buffer_release = NULL;
2259 wl_vk_buffer->release_fence_fd = fence;
2260 wl_vk_buffer->status = RELEASED;
2262 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2263 wl_vk_buffer->bo_name,
2265 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2266 wl_vk_buffer->bo_name);
2269 "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2270 wl_vk_buffer, tbm_surface,
2271 wl_vk_buffer->bo_name,
2274 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2276 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2277 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2279 tbm_surface_internal_unref(tbm_surface);
2282 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2285 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2290 __cb_buffer_immediate_release(void *data,
2291 struct zwp_linux_buffer_release_v1 *release)
2293 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2294 tbm_surface_h tbm_surface = NULL;
2296 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2298 tbm_surface = wl_vk_buffer->tbm_surface;
2300 if (tbm_surface_internal_is_valid(tbm_surface)) {
2301 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2302 tpl_wl_vk_swapchain_t *swapchain = NULL;
2304 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2305 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2306 tbm_surface_internal_unref(tbm_surface);
2310 swapchain = wl_vk_surface->swapchain;
2312 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2313 if (wl_vk_buffer->status == COMMITTED) {
2314 tbm_surface_queue_error_e tsq_err;
2316 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2317 wl_vk_buffer->buffer_release = NULL;
2319 wl_vk_buffer->release_fence_fd = -1;
2320 wl_vk_buffer->status = RELEASED;
2322 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2323 _get_tbm_surface_bo_name(tbm_surface));
2324 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2325 _get_tbm_surface_bo_name(tbm_surface));
2328 "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2329 wl_vk_buffer, tbm_surface,
2330 _get_tbm_surface_bo_name(tbm_surface));
2332 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2334 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2335 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2337 tbm_surface_internal_unref(tbm_surface);
2340 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2343 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2347 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2348 __cb_buffer_fenced_release,
2349 __cb_buffer_immediate_release,
2354 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2356 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2357 tbm_surface_h tbm_surface = NULL;
2359 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2361 tbm_surface = wl_vk_buffer->tbm_surface;
2363 if (tbm_surface_internal_is_valid(tbm_surface)) {
2364 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2365 tpl_wl_vk_swapchain_t *swapchain = NULL;
2366 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2368 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2369 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2370 tbm_surface_internal_unref(tbm_surface);
2374 swapchain = wl_vk_surface->swapchain;
2376 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2378 if (wl_vk_buffer->status == COMMITTED) {
2380 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2382 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2383 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2385 wl_vk_buffer->status = RELEASED;
2387 TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2388 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2389 wl_vk_buffer->bo_name);
2391 TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2392 wl_vk_buffer->wl_buffer, tbm_surface,
2393 wl_vk_buffer->bo_name);
2395 tbm_surface_internal_unref(tbm_surface);
2398 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2400 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2405 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2406 unsigned int sequence, unsigned int tv_sec,
2407 unsigned int tv_usec, void *user_data)
2409 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2410 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2412 TRACE_ASYNC_END((int)wl_vk_surface, "WAIT_VBLANK");
2413 TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface);
2415 if (error == TDM_ERROR_TIMEOUT)
2416 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2419 wl_vk_surface->vblank_done = TPL_TRUE;
2421 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2422 wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front(
2423 wl_vk_surface->vblank_waiting_buffers,
2426 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2427 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2431 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2433 tdm_error tdm_err = TDM_ERROR_NONE;
2434 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2436 if (wl_vk_surface->vblank == NULL) {
2437 wl_vk_surface->vblank =
2438 _thread_create_tdm_client_vblank(wl_vk_display->tdm_client);
2439 if (!wl_vk_surface->vblank) {
2440 TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2442 return TPL_ERROR_OUT_OF_MEMORY;
2446 tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2447 wl_vk_surface->post_interval,
2448 __cb_tdm_client_vblank,
2449 (void *)wl_vk_surface);
2451 if (tdm_err == TDM_ERROR_NONE) {
2452 wl_vk_surface->vblank_done = TPL_FALSE;
2453 TRACE_ASYNC_BEGIN((int)wl_vk_surface, "WAIT_VBLANK");
2455 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2456 return TPL_ERROR_INVALID_OPERATION;
2459 return TPL_ERROR_NONE;
2463 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2464 tpl_wl_vk_buffer_t *wl_vk_buffer)
2466 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2467 struct wl_surface *wl_surface = wl_vk_surface->wl_surface;
2470 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2471 "wl_vk_buffer sould be not NULL");
2473 if (wl_vk_buffer->wl_buffer == NULL) {
2474 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2475 wl_vk_display->wl_tbm_client,
2476 wl_vk_buffer->tbm_surface);
2477 if (wl_vk_buffer->wl_buffer &&
2478 (wl_vk_buffer->acquire_fence_fd == -1 ||
2479 wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2480 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2481 &wl_buffer_release_listener, wl_vk_buffer);
2484 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2485 "[FATAL] Failed to create wl_buffer");
2487 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2489 wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2490 wl_vk_buffer->dx, wl_vk_buffer->dy);
2492 if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2494 wl_surface_damage(wl_surface,
2495 wl_vk_buffer->dx, wl_vk_buffer->dy,
2496 wl_vk_buffer->width, wl_vk_buffer->height);
2498 wl_surface_damage_buffer(wl_surface,
2500 wl_vk_buffer->width, wl_vk_buffer->height);
2504 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2506 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2507 wl_vk_buffer->rects[i * 4 + 3]);
2509 wl_surface_damage(wl_surface,
2510 wl_vk_buffer->rects[i * 4 + 0],
2512 wl_vk_buffer->rects[i * 4 + 2],
2513 wl_vk_buffer->rects[i * 4 + 3]);
2515 wl_surface_damage_buffer(wl_surface,
2516 wl_vk_buffer->rects[i * 4 + 0],
2518 wl_vk_buffer->rects[i * 4 + 2],
2519 wl_vk_buffer->rects[i * 4 + 3]);
2524 #if TIZEN_FEATURE_ENABLE
2525 if (wl_vk_display->use_explicit_sync &&
2526 wl_vk_surface->surface_sync &&
2527 wl_vk_buffer->acquire_fence_fd != -1) {
2529 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2530 wl_vk_buffer->acquire_fence_fd);
2531 TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2532 wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2533 close(wl_vk_buffer->acquire_fence_fd);
2534 wl_vk_buffer->acquire_fence_fd = -1;
2536 wl_vk_buffer->buffer_release =
2537 zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2538 if (!wl_vk_buffer->buffer_release) {
2539 TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2541 zwp_linux_buffer_release_v1_add_listener(
2542 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2543 TPL_DEBUG("add explicit_sync_release_listener.");
2548 wl_surface_commit(wl_surface);
2550 wl_display_flush(wl_vk_display->wl_display);
2552 TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2553 wl_vk_buffer->bo_name);
2555 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2557 wl_vk_buffer->need_to_commit = TPL_FALSE;
2558 wl_vk_buffer->status = COMMITTED;
2560 tpl_gcond_signal(&wl_vk_buffer->cond);
2562 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2565 "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2566 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2567 wl_vk_buffer->bo_name);
2569 if (wl_vk_display->use_wait_vblank &&
2570 _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2571 TPL_ERR("Failed to set wait vblank.");
2575 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2577 if (!native_dpy) return TPL_FALSE;
2579 if (_check_native_handle_is_wl_display(native_dpy))
2586 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2588 TPL_ASSERT(backend);
2590 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2591 backend->data = NULL;
2593 backend->init = __tpl_wl_vk_display_init;
2594 backend->fini = __tpl_wl_vk_display_fini;
2595 backend->query_config = __tpl_wl_vk_display_query_config;
2596 backend->filter_config = __tpl_wl_vk_display_filter_config;
2597 backend->query_window_supported_buffer_count =
2598 __tpl_wl_vk_display_query_window_supported_buffer_count;
2599 backend->query_window_supported_present_modes =
2600 __tpl_wl_vk_display_query_window_supported_present_modes;
2604 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2606 TPL_ASSERT(backend);
2608 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2609 backend->data = NULL;
2611 backend->init = __tpl_wl_vk_surface_init;
2612 backend->fini = __tpl_wl_vk_surface_fini;
2613 backend->validate = __tpl_wl_vk_surface_validate;
2614 backend->cancel_dequeued_buffer =
2615 __tpl_wl_vk_surface_cancel_buffer;
2616 backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2617 backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2618 backend->get_swapchain_buffers =
2619 __tpl_wl_vk_surface_get_swapchain_buffers;
2620 backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2621 backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2622 backend->set_post_interval =
2623 __tpl_wl_vk_surface_set_post_interval;
2627 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2629 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2633 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2637 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2638 TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2639 wl_vk_surface, wl_vk_surface->buffer_cnt);
2640 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2641 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2644 "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2645 idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2646 wl_vk_buffer->bo_name,
2647 status_to_string[wl_vk_buffer->status]);
2650 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);