1 #define inline __inline__
4 #include "tpl_internal.h"
9 #include <sys/eventfd.h>
11 #include <tbm_bufmgr.h>
12 #include <tbm_surface.h>
13 #include <tbm_surface_internal.h>
14 #include <tbm_surface_queue.h>
16 #include <wayland-client.h>
17 #include <wayland-tbm-server.h>
18 #include <wayland-tbm-client.h>
20 #include <tdm_client.h>
22 #ifndef TIZEN_FEATURE_ENABLE
23 #define TIZEN_FEATURE_ENABLE 1
26 #if TIZEN_FEATURE_ENABLE
27 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
30 #include "tpl_utils_gthread.h"
32 #define BUFFER_ARRAY_SIZE 10
33 #define VK_CLIENT_QUEUE_SIZE 3
35 static int wl_vk_buffer_key;
36 #define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
38 typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t;
39 typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t;
40 typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t;
41 typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t;
43 struct _tpl_wl_vk_display {
44 tpl_gsource *disp_source;
46 tpl_gmutex wl_event_mutex;
48 struct wl_display *wl_display;
49 struct wl_event_queue *ev_queue;
50 struct wayland_tbm_client *wl_tbm_client;
51 int last_error; /* errno of the last wl_display error*/
53 tpl_bool_t wl_initialized;
56 tdm_client *tdm_client;
57 tpl_gsource *tdm_source;
59 tpl_bool_t tdm_initialized;
60 /* To make sure that tpl_gsource has been successfully finalized. */
61 tpl_bool_t gsource_finalized;
66 tpl_bool_t use_wait_vblank;
67 tpl_bool_t use_explicit_sync;
70 /* To make sure that tpl_gsource has been successfully finalized. */
71 tpl_bool_t gsource_finalized;
72 tpl_gmutex disp_mutex;
75 /* device surface capabilities */
79 #if TIZEN_FEATURE_ENABLE
80 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
84 struct _tpl_wl_vk_swapchain {
85 tpl_wl_vk_surface_t *wl_vk_surface;
87 tbm_surface_queue_h tbm_queue;
90 tpl_bool_t create_done;
100 tbm_surface_h *swapchain_buffers;
102 tpl_util_atomic_uint ref_cnt;
105 typedef enum surf_message {
113 struct _tpl_wl_vk_surface {
114 tpl_gsource *surf_source;
116 tpl_wl_vk_swapchain_t *swapchain;
118 struct wl_surface *wl_surface;
119 #if TIZEN_FEATURE_ENABLE
120 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
122 tdm_client_vblank *vblank;
124 /* surface information */
127 tpl_wl_vk_display_t *wl_vk_display;
128 tpl_surface_t *tpl_surface;
130 /* wl_vk_buffer array for buffer tracing */
131 tpl_wl_vk_buffer_t *buffers[BUFFER_ARRAY_SIZE];
132 int buffer_cnt; /* the number of using wl_vk_buffers */
133 tpl_gmutex buffers_mutex;
135 tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
137 tpl_gmutex surf_mutex;
140 /* for waiting draw done */
141 tpl_bool_t is_activated;
142 tpl_bool_t reset; /* TRUE if queue reseted by external */
143 tpl_bool_t vblank_done;
144 tpl_bool_t initialized_in_thread;
146 /* To make sure that tpl_gsource has been successfully finalized. */
147 tpl_bool_t gsource_finalized;
149 surf_message sent_message;
154 typedef enum buffer_status {
159 WAITING_SIGNALED, // 4
164 static const char *status_to_string[7] = {
169 "WAITING_SIGNALED", // 4
170 "WAITING_VBLANK", // 5
174 struct _tpl_wl_vk_buffer {
175 tbm_surface_h tbm_surface;
178 struct wl_buffer *wl_buffer;
179 int dx, dy; /* position to attach to wl_surface */
180 int width, height; /* size to attach to wl_surface */
182 buffer_status_t status; /* for tracing buffer status */
183 int idx; /* position index in buffers array of wl_vk_surface */
185 /* for damage region */
189 /* for checking need_to_commit (frontbuffer mode) */
190 tpl_bool_t need_to_commit;
192 #if TIZEN_FEATURE_ENABLE
193 /* to get release event via zwp_linux_buffer_release_v1 */
194 struct zwp_linux_buffer_release_v1 *buffer_release;
197 /* each buffers own its release_fence_fd, until it passes ownership
199 int32_t release_fence_fd;
201 /* each buffers own its acquire_fence_fd.
202 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
203 * will be passed to display server
204 * Otherwise it will be used as a fence waiting for render done
206 int32_t acquire_fence_fd;
211 tpl_wl_vk_surface_t *wl_vk_surface;
215 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
217 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
219 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
221 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer);
223 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
225 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
227 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
229 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
230 tpl_wl_vk_buffer_t *wl_vk_buffer);
233 _check_native_handle_is_wl_display(tpl_handle_t native_dpy)
235 struct wl_interface *wl_vk_native_dpy = *(void **) native_dpy;
237 if (!wl_vk_native_dpy) {
238 TPL_ERR("Invalid parameter. native_display(%p)", wl_vk_native_dpy);
242 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
243 is a memory address pointing the structure of wl_display_interface. */
244 if (wl_vk_native_dpy == &wl_display_interface)
247 if (strncmp(wl_vk_native_dpy->name, wl_display_interface.name,
248 strlen(wl_display_interface.name)) == 0) {
256 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
258 tpl_wl_vk_display_t *wl_vk_display = NULL;
259 tdm_error tdm_err = TDM_ERROR_NONE;
263 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
264 if (!wl_vk_display) {
265 TPL_ERR("Failed to get wl_vk_display from gsource(%p)", gsource);
266 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
270 tdm_err = tdm_client_handle_events(wl_vk_display->tdm.tdm_client);
272 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
273 * When tdm_source is no longer available due to an unexpected situation,
274 * wl_vk_thread must remove it from the thread and destroy it.
275 * In that case, tdm_vblank can no longer be used for surfaces and displays
276 * that used this tdm_source. */
277 if (tdm_err != TDM_ERROR_NONE) {
278 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
280 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
282 tpl_gsource_destroy(gsource, TPL_FALSE);
284 wl_vk_display->tdm.tdm_source = NULL;
293 __thread_func_tdm_finalize(tpl_gsource *gsource)
295 tpl_wl_vk_display_t *wl_vk_display = NULL;
297 wl_vk_display = (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
299 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
301 TPL_INFO("[TDM_CLIENT_FINI]",
302 "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
303 wl_vk_display, wl_vk_display->tdm.tdm_client,
304 wl_vk_display->tdm.tdm_display_fd);
306 if (wl_vk_display->tdm.tdm_client) {
307 tdm_client_destroy(wl_vk_display->tdm.tdm_client);
308 wl_vk_display->tdm.tdm_client = NULL;
309 wl_vk_display->tdm.tdm_display_fd = -1;
312 wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
313 wl_vk_display->tdm.gsource_finalized = TPL_TRUE;
315 tpl_gcond_signal(&wl_vk_display->tdm.tdm_cond);
316 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
319 static tpl_gsource_functions tdm_funcs = {
322 .dispatch = __thread_func_tdm_dispatch,
323 .finalize = __thread_func_tdm_finalize,
327 _thread_tdm_init(tpl_wl_vk_display_t *wl_vk_display)
329 tdm_client *tdm_client = NULL;
330 int tdm_display_fd = -1;
331 tdm_error tdm_err = TDM_ERROR_NONE;
333 tdm_client = tdm_client_create(&tdm_err);
334 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
335 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
336 return TPL_ERROR_INVALID_OPERATION;
339 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
340 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
341 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
342 tdm_client_destroy(tdm_client);
343 return TPL_ERROR_INVALID_OPERATION;
346 wl_vk_display->tdm.tdm_display_fd = tdm_display_fd;
347 wl_vk_display->tdm.tdm_client = tdm_client;
348 wl_vk_display->tdm.tdm_source = NULL;
349 wl_vk_display->tdm.tdm_initialized = TPL_TRUE;
351 TPL_INFO("[TDM_CLIENT_INIT]",
352 "wl_vk_display(%p) tdm_client(%p) tdm_display_fd(%d)",
353 wl_vk_display, tdm_client, tdm_display_fd);
355 return TPL_ERROR_NONE;
359 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
360 uint32_t name, const char *interface,
363 #if TIZEN_FEATURE_ENABLE
364 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
366 if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
367 char *env = tpl_getenv("TPL_EFS");
368 if (env && !atoi(env)) {
369 wl_vk_display->use_explicit_sync = TPL_FALSE;
371 wl_vk_display->explicit_sync =
372 wl_registry_bind(wl_registry, name,
373 &zwp_linux_explicit_synchronization_v1_interface, 1);
374 wl_vk_display->use_explicit_sync = TPL_TRUE;
375 TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
382 __cb_wl_resistry_global_remove_callback(void *data,
383 struct wl_registry *wl_registry,
388 static const struct wl_registry_listener registry_listener = {
389 __cb_wl_resistry_global_callback,
390 __cb_wl_resistry_global_remove_callback
394 _wl_display_print_err(tpl_wl_vk_display_t *wl_vk_display,
395 const char *func_name)
399 strerror_r(errno, buf, sizeof(buf));
401 if (wl_vk_display->last_error == errno)
404 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
406 dpy_err = wl_display_get_error(wl_vk_display->wl_display);
407 if (dpy_err == EPROTO) {
408 const struct wl_interface *err_interface;
409 uint32_t err_proxy_id, err_code;
410 err_code = wl_display_get_protocol_error(wl_vk_display->wl_display,
413 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
414 err_interface->name, err_code, err_proxy_id);
417 wl_vk_display->last_error = errno;
421 _thread_wl_display_init(tpl_wl_vk_display_t *wl_vk_display)
423 struct wl_registry *registry = NULL;
424 struct wl_event_queue *queue = NULL;
425 struct wl_display *display_wrapper = NULL;
426 struct wl_proxy *wl_tbm = NULL;
427 struct wayland_tbm_client *wl_tbm_client = NULL;
429 tpl_result_t result = TPL_ERROR_NONE;
431 queue = wl_display_create_queue(wl_vk_display->wl_display);
433 TPL_ERR("Failed to create wl_queue wl_display(%p)",
434 wl_vk_display->wl_display);
435 result = TPL_ERROR_INVALID_OPERATION;
439 wl_vk_display->ev_queue = wl_display_create_queue(wl_vk_display->wl_display);
440 if (!wl_vk_display->ev_queue) {
441 TPL_ERR("Failed to create wl_queue wl_display(%p)",
442 wl_vk_display->wl_display);
443 result = TPL_ERROR_INVALID_OPERATION;
447 display_wrapper = wl_proxy_create_wrapper(wl_vk_display->wl_display);
448 if (!display_wrapper) {
449 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
450 wl_vk_display->wl_display);
451 result = TPL_ERROR_INVALID_OPERATION;
455 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
457 registry = wl_display_get_registry(display_wrapper);
459 TPL_ERR("Failed to create wl_registry");
460 result = TPL_ERROR_INVALID_OPERATION;
464 wl_proxy_wrapper_destroy(display_wrapper);
465 display_wrapper = NULL;
467 wl_tbm_client = wayland_tbm_client_init(wl_vk_display->wl_display);
468 if (!wl_tbm_client) {
469 TPL_ERR("Failed to initialize wl_tbm_client.");
470 result = TPL_ERROR_INVALID_CONNECTION;
474 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
476 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
477 result = TPL_ERROR_INVALID_CONNECTION;
481 wl_proxy_set_queue(wl_tbm, wl_vk_display->ev_queue);
482 wl_vk_display->wl_tbm_client = wl_tbm_client;
484 if (wl_registry_add_listener(registry, ®istry_listener,
486 TPL_ERR("Failed to wl_registry_add_listener");
487 result = TPL_ERROR_INVALID_OPERATION;
491 ret = wl_display_roundtrip_queue(wl_vk_display->wl_display, queue);
493 _wl_display_print_err(wl_vk_display, "roundtrip_queue");
494 result = TPL_ERROR_INVALID_OPERATION;
498 #if TIZEN_FEATURE_ENABLE
499 if (wl_vk_display->explicit_sync) {
500 wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
501 wl_vk_display->ev_queue);
502 TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
503 wl_vk_display->explicit_sync);
507 wl_vk_display->wl_initialized = TPL_TRUE;
509 TPL_INFO("[WAYLAND_INIT]",
510 "wl_vk_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
511 wl_vk_display, wl_vk_display->wl_display,
512 wl_vk_display->wl_tbm_client, wl_vk_display->ev_queue);
513 #if TIZEN_FEATURE_ENABLE
514 TPL_INFO("[WAYLAND_INIT]",
516 wl_vk_display->explicit_sync);
520 wl_proxy_wrapper_destroy(display_wrapper);
522 wl_registry_destroy(registry);
524 wl_event_queue_destroy(queue);
530 _thread_wl_display_fini(tpl_wl_vk_display_t *wl_vk_display)
532 /* If wl_vk_display is in prepared state, cancel it */
533 if (wl_vk_display->prepared) {
534 wl_display_cancel_read(wl_vk_display->wl_display);
535 wl_vk_display->prepared = TPL_FALSE;
538 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
539 wl_vk_display->ev_queue) == -1) {
540 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
543 #if TIZEN_FEATURE_ENABLE
544 if (wl_vk_display->explicit_sync) {
545 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
546 "wl_vk_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
547 wl_vk_display, wl_vk_display->explicit_sync);
548 zwp_linux_explicit_synchronization_v1_destroy(wl_vk_display->explicit_sync);
549 wl_vk_display->explicit_sync = NULL;
553 if (wl_vk_display->wl_tbm_client) {
554 struct wl_proxy *wl_tbm = NULL;
556 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
557 wl_vk_display->wl_tbm_client);
559 wl_proxy_set_queue(wl_tbm, NULL);
562 TPL_INFO("[WL_TBM_DEINIT]",
563 "wl_vk_display(%p) wl_tbm_client(%p)",
564 wl_vk_display, wl_vk_display->wl_tbm_client);
565 wayland_tbm_client_deinit(wl_vk_display->wl_tbm_client);
566 wl_vk_display->wl_tbm_client = NULL;
569 wl_event_queue_destroy(wl_vk_display->ev_queue);
571 wl_vk_display->wl_initialized = TPL_FALSE;
573 TPL_INFO("[DISPLAY_FINI]", "wl_vk_display(%p) wl_display(%p)",
574 wl_vk_display, wl_vk_display->wl_display);
578 _thread_init(void *data)
580 tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)data;
582 if (_thread_wl_display_init(wl_vk_display) != TPL_ERROR_NONE) {
583 TPL_ERR("Failed to initialize wl_vk_display(%p) with wl_display(%p)",
584 wl_vk_display, wl_vk_display->wl_display);
587 if (_thread_tdm_init(wl_vk_display) != TPL_ERROR_NONE) {
588 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
591 return wl_vk_display;
595 __thread_func_disp_prepare(tpl_gsource *gsource)
597 tpl_wl_vk_display_t *wl_vk_display =
598 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
600 /* If this wl_vk_display is already prepared,
601 * do nothing in this function. */
602 if (wl_vk_display->prepared)
605 /* If there is a last_error, there is no need to poll,
606 * so skip directly to dispatch.
607 * prepare -> dispatch */
608 if (wl_vk_display->last_error)
611 while (wl_display_prepare_read_queue(wl_vk_display->wl_display,
612 wl_vk_display->ev_queue) != 0) {
613 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
614 wl_vk_display->ev_queue) == -1) {
615 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
619 wl_vk_display->prepared = TPL_TRUE;
621 wl_display_flush(wl_vk_display->wl_display);
627 __thread_func_disp_check(tpl_gsource *gsource)
629 tpl_wl_vk_display_t *wl_vk_display =
630 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
631 tpl_bool_t ret = TPL_FALSE;
633 if (!wl_vk_display->prepared)
636 /* If prepared, but last_error is set,
637 * cancel_read is executed and FALSE is returned.
638 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
639 * and skipping disp_check from prepare to disp_dispatch.
640 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
641 if (wl_vk_display->prepared && wl_vk_display->last_error) {
642 wl_display_cancel_read(wl_vk_display->wl_display);
646 if (tpl_gsource_check_io_condition(gsource)) {
647 if (wl_display_read_events(wl_vk_display->wl_display) == -1)
648 _wl_display_print_err(wl_vk_display, "read_event");
651 wl_display_cancel_read(wl_vk_display->wl_display);
655 wl_vk_display->prepared = TPL_FALSE;
661 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
663 tpl_wl_vk_display_t *wl_vk_display =
664 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
668 /* If there is last_error, SOURCE_REMOVE should be returned
669 * to remove the gsource from the main loop.
670 * This is because wl_vk_display is not valid since last_error was set.*/
671 if (wl_vk_display->last_error) {
675 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
676 if (tpl_gsource_check_io_condition(gsource)) {
677 if (wl_display_dispatch_queue_pending(wl_vk_display->wl_display,
678 wl_vk_display->ev_queue) == -1) {
679 _wl_display_print_err(wl_vk_display, "dispatch_queue_pending");
683 wl_display_flush(wl_vk_display->wl_display);
684 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
690 __thread_func_disp_finalize(tpl_gsource *gsource)
692 tpl_wl_vk_display_t *wl_vk_display =
693 (tpl_wl_vk_display_t *)tpl_gsource_get_data(gsource);
695 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
696 TPL_DEBUG("[FINALIZE] wl_vk_display(%p) tpl_gsource(%p)",
697 wl_vk_display, gsource);
699 if (wl_vk_display->wl_initialized)
700 _thread_wl_display_fini(wl_vk_display);
702 wl_vk_display->gsource_finalized = TPL_TRUE;
704 tpl_gcond_signal(&wl_vk_display->disp_cond);
705 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
711 static tpl_gsource_functions disp_funcs = {
712 .prepare = __thread_func_disp_prepare,
713 .check = __thread_func_disp_check,
714 .dispatch = __thread_func_disp_dispatch,
715 .finalize = __thread_func_disp_finalize,
719 __tpl_wl_vk_display_init(tpl_display_t *display)
723 tpl_wl_vk_display_t *wl_vk_display = NULL;
725 /* Do not allow default display in wayland */
726 if (!display->native_handle) {
727 TPL_ERR("Invalid native handle for display.");
728 return TPL_ERROR_INVALID_PARAMETER;
731 if (!_check_native_handle_is_wl_display(display->native_handle)) {
732 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
733 return TPL_ERROR_INVALID_PARAMETER;
736 wl_vk_display = (tpl_wl_vk_display_t *) calloc(1,
737 sizeof(tpl_wl_vk_display_t));
738 if (!wl_vk_display) {
739 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
740 return TPL_ERROR_OUT_OF_MEMORY;
743 display->backend.data = wl_vk_display;
744 display->bufmgr_fd = -1;
746 wl_vk_display->tdm.tdm_initialized = TPL_FALSE;
747 wl_vk_display->wl_initialized = TPL_FALSE;
749 wl_vk_display->ev_queue = NULL;
750 wl_vk_display->wl_display = (struct wl_display *)display->native_handle;
751 wl_vk_display->last_error = 0;
752 wl_vk_display->use_explicit_sync = TPL_FALSE; // default disabled
753 wl_vk_display->prepared = TPL_FALSE;
755 /* Wayland Interfaces */
756 #if TIZEN_FEATURE_ENABLE
757 wl_vk_display->explicit_sync = NULL;
759 wl_vk_display->wl_tbm_client = NULL;
761 /* Vulkan specific surface capabilities */
762 wl_vk_display->min_buffer = 2;
763 wl_vk_display->max_buffer = VK_CLIENT_QUEUE_SIZE;
764 wl_vk_display->present_modes = TPL_DISPLAY_PRESENT_MODE_FIFO;
766 wl_vk_display->use_wait_vblank = TPL_TRUE; // default enabled
768 char *env = tpl_getenv("TPL_WAIT_VBLANK");
769 if (env && !atoi(env)) {
770 wl_vk_display->use_wait_vblank = TPL_FALSE;
774 tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
776 tpl_gmutex_init(&wl_vk_display->disp_mutex);
777 tpl_gcond_init(&wl_vk_display->disp_cond);
780 wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
781 (tpl_gthread_func)_thread_init,
782 (void *)wl_vk_display);
783 if (!wl_vk_display->thread) {
784 TPL_ERR("Failed to create wl_vk_thread");
788 wl_vk_display->disp_source = tpl_gsource_create(wl_vk_display->thread,
789 (void *)wl_vk_display,
790 wl_display_get_fd(wl_vk_display->wl_display),
791 &disp_funcs, SOURCE_TYPE_NORMAL);
792 if (!wl_vk_display->disp_source) {
793 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
794 display->native_handle,
795 wl_vk_display->thread);
799 tpl_gmutex_init(&wl_vk_display->tdm.tdm_mutex);
800 tpl_gcond_init(&wl_vk_display->tdm.tdm_cond);
802 wl_vk_display->tdm.tdm_source = tpl_gsource_create(wl_vk_display->thread,
803 (void *)wl_vk_display,
804 wl_vk_display->tdm.tdm_display_fd,
805 &tdm_funcs, SOURCE_TYPE_NORMAL);
806 if (!wl_vk_display->tdm.tdm_source) {
807 TPL_ERR("Failed to create tdm_gsource\n");
811 TPL_INFO("[DISPLAY_INIT]",
812 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
814 wl_vk_display->thread,
815 wl_vk_display->wl_display);
817 TPL_INFO("[DISPLAY_INIT]",
818 "USE_WAIT_VBLANK(%s) USE_EXPLICIT_SYNC(%s)",
819 wl_vk_display->use_wait_vblank ? "TRUE" : "FALSE",
820 wl_vk_display->use_explicit_sync ? "TRUE" : "FALSE");
822 return TPL_ERROR_NONE;
825 if (wl_vk_display->tdm.tdm_source) {
826 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
827 while (!wl_vk_display->tdm.gsource_finalized) {
828 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
829 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
831 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
834 if (wl_vk_display->disp_source) {
835 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
836 while (!wl_vk_display->gsource_finalized) {
837 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
838 tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
840 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
843 if (wl_vk_display->thread) {
844 tpl_gthread_destroy(wl_vk_display->thread);
847 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
848 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
849 tpl_gcond_clear(&wl_vk_display->disp_cond);
850 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
852 wl_vk_display->thread = NULL;
855 display->backend.data = NULL;
856 return TPL_ERROR_INVALID_OPERATION;
860 __tpl_wl_vk_display_fini(tpl_display_t *display)
862 tpl_wl_vk_display_t *wl_vk_display;
866 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
868 TPL_INFO("[DISPLAY_FINI]",
869 "wl_vk_display(%p) tpl_gthread(%p) wl_display(%p)",
871 wl_vk_display->thread,
872 wl_vk_display->wl_display);
874 if (wl_vk_display->tdm.tdm_source && wl_vk_display->tdm.tdm_initialized) {
875 /* This is a protection to prevent problems that arise in unexpected situations
876 * that g_cond_wait cannot work normally.
877 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
878 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
880 tpl_gmutex_lock(&wl_vk_display->tdm.tdm_mutex);
881 while (!wl_vk_display->tdm.gsource_finalized) {
882 tpl_gsource_destroy(wl_vk_display->tdm.tdm_source, TPL_TRUE);
883 tpl_gcond_wait(&wl_vk_display->tdm.tdm_cond, &wl_vk_display->tdm.tdm_mutex);
885 wl_vk_display->tdm.tdm_source = NULL;
886 tpl_gmutex_unlock(&wl_vk_display->tdm.tdm_mutex);
889 /* This is a protection to prevent problems that arise in unexpected situations
890 * that g_cond_wait cannot work normally.
891 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
892 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
894 tpl_gmutex_lock(&wl_vk_display->disp_mutex);
895 while (wl_vk_display->disp_source && !wl_vk_display->gsource_finalized) {
896 tpl_gsource_destroy(wl_vk_display->disp_source, TPL_TRUE);
897 tpl_gcond_wait(&wl_vk_display->disp_cond, &wl_vk_display->disp_mutex);
899 wl_vk_display->disp_source = NULL;
900 tpl_gmutex_unlock(&wl_vk_display->disp_mutex);
902 if (wl_vk_display->thread) {
903 tpl_gthread_destroy(wl_vk_display->thread);
904 wl_vk_display->thread = NULL;
907 tpl_gcond_clear(&wl_vk_display->tdm.tdm_cond);
908 tpl_gmutex_clear(&wl_vk_display->tdm.tdm_mutex);
909 tpl_gcond_clear(&wl_vk_display->disp_cond);
910 tpl_gmutex_clear(&wl_vk_display->disp_mutex);
912 tpl_gmutex_clear(&wl_vk_display->wl_event_mutex);
917 display->backend.data = NULL;
921 __tpl_wl_vk_display_query_config(tpl_display_t *display,
922 tpl_surface_type_t surface_type,
923 int red_size, int green_size,
924 int blue_size, int alpha_size,
925 int color_depth, int *native_visual_id,
930 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
931 green_size == 8 && blue_size == 8 &&
932 (color_depth == 32 || color_depth == 24)) {
934 if (alpha_size == 8) {
935 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
936 if (is_slow) *is_slow = TPL_FALSE;
937 return TPL_ERROR_NONE;
939 if (alpha_size == 0) {
940 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
941 if (is_slow) *is_slow = TPL_FALSE;
942 return TPL_ERROR_NONE;
946 return TPL_ERROR_INVALID_PARAMETER;
950 __tpl_wl_vk_display_filter_config(tpl_display_t *display,
955 TPL_IGNORE(visual_id);
956 TPL_IGNORE(alpha_size);
957 return TPL_ERROR_NONE;
961 __tpl_wl_vk_display_query_window_supported_buffer_count(
962 tpl_display_t *display,
963 tpl_handle_t window, int *min, int *max)
965 tpl_wl_vk_display_t *wl_vk_display = NULL;
970 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
971 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
973 if (min) *min = wl_vk_display->min_buffer;
974 if (max) *max = wl_vk_display->max_buffer;
976 return TPL_ERROR_NONE;
980 __tpl_wl_vk_display_query_window_supported_present_modes(
981 tpl_display_t *display,
982 tpl_handle_t window, int *present_modes)
984 tpl_wl_vk_display_t *wl_vk_display = NULL;
989 wl_vk_display = (tpl_wl_vk_display_t *)display->backend.data;
990 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
993 *present_modes = wl_vk_display->present_modes;
996 return TPL_ERROR_NONE;
1000 _tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface)
1002 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1003 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1004 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1005 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1006 tpl_bool_t need_to_release = TPL_FALSE;
1007 tpl_bool_t need_to_cancel = TPL_FALSE;
1008 buffer_status_t status = RELEASED;
1011 while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) {
1012 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1013 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1014 wl_vk_buffer = wl_vk_surface->buffers[idx];
1017 wl_vk_surface->buffers[idx] = NULL;
1018 wl_vk_surface->buffer_cnt--;
1020 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1021 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1026 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1028 tpl_gmutex_lock(&wl_vk_buffer->mutex);
1030 status = wl_vk_buffer->status;
1032 TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)",
1034 wl_vk_buffer->tbm_surface,
1035 status_to_string[status]);
1037 if (status >= ENQUEUED) {
1038 tpl_bool_t need_to_wait = TPL_FALSE;
1039 tpl_result_t wait_result = TPL_ERROR_NONE;
1041 if (!wl_vk_display->use_explicit_sync &&
1042 status < WAITING_VBLANK)
1043 need_to_wait = TPL_TRUE;
1045 if (wl_vk_display->use_explicit_sync &&
1047 need_to_wait = TPL_TRUE;
1050 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1051 wait_result = tpl_gcond_timed_wait(&wl_vk_buffer->cond,
1052 &wl_vk_buffer->mutex,
1054 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1056 status = wl_vk_buffer->status;
1058 if (wait_result == TPL_ERROR_TIME_OUT)
1059 TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)",
1064 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1065 /* It has been acquired but has not yet been released, so this
1066 * buffer must be released. */
1067 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
1069 /* After dequeue, it has not been enqueued yet
1070 * so cancel_dequeue must be performed. */
1071 need_to_cancel = (status == DEQUEUED);
1073 if (swapchain && swapchain->tbm_queue) {
1074 if (need_to_release) {
1075 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
1076 wl_vk_buffer->tbm_surface);
1077 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1078 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1079 wl_vk_buffer->tbm_surface, tsq_err);
1082 if (need_to_cancel) {
1083 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
1084 wl_vk_buffer->tbm_surface);
1085 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1086 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1087 wl_vk_buffer->tbm_surface, tsq_err);
1091 wl_vk_buffer->status = RELEASED;
1093 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
1095 if (need_to_release || need_to_cancel)
1096 tbm_surface_internal_unref(wl_vk_buffer->tbm_surface);
1098 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1104 static tdm_client_vblank*
1105 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1107 tdm_client_vblank *vblank = NULL;
1108 tdm_client_output *tdm_output = NULL;
1109 tdm_error tdm_err = TDM_ERROR_NONE;
1112 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1116 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1117 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1118 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1122 vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1123 if (!vblank || tdm_err != TDM_ERROR_NONE) {
1124 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1128 tdm_client_vblank_set_enable_fake(vblank, 1);
1129 tdm_client_vblank_set_sync(vblank, 0);
1135 _thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface)
1137 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1139 /* tbm_surface_queue will be created at swapchain_create */
1141 wl_vk_surface->vblank = _thread_create_tdm_client_vblank(
1142 wl_vk_display->tdm.tdm_client);
1143 if (wl_vk_surface->vblank) {
1144 TPL_INFO("[VBLANK_INIT]",
1145 "wl_vk_surface(%p) tdm_client(%p) vblank(%p)",
1146 wl_vk_surface, wl_vk_display->tdm.tdm_client,
1147 wl_vk_surface->vblank);
1150 #if TIZEN_FEATURE_ENABLE
1151 if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) {
1152 wl_vk_surface->surface_sync =
1153 zwp_linux_explicit_synchronization_v1_get_synchronization(
1154 wl_vk_display->explicit_sync, wl_vk_surface->wl_surface);
1155 if (wl_vk_surface->surface_sync) {
1156 TPL_INFO("[EXPLICIT_SYNC_INIT]",
1157 "wl_vk_surface(%p) surface_sync(%p)",
1158 wl_vk_surface, wl_vk_surface->surface_sync);
1160 TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)",
1162 wl_vk_display->use_explicit_sync = TPL_FALSE;
1166 wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc();
1170 _thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface)
1172 TPL_INFO("[SURFACE_FINI]",
1173 "wl_vk_surface(%p) wl_surface(%p)",
1174 wl_vk_surface, wl_vk_surface->wl_surface);
1176 if (wl_vk_surface->vblank_waiting_buffers) {
1177 __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL);
1178 wl_vk_surface->vblank_waiting_buffers = NULL;
1181 #if TIZEN_FEATURE_ENABLE
1182 if (wl_vk_surface->surface_sync) {
1183 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1184 "wl_vk_surface(%p) surface_sync(%p)",
1185 wl_vk_surface, wl_vk_surface->surface_sync);
1186 zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync);
1187 wl_vk_surface->surface_sync = NULL;
1191 if (wl_vk_surface->vblank) {
1192 TPL_INFO("[VBLANK_DESTROY]",
1193 "wl_vk_surface(%p) vblank(%p)",
1194 wl_vk_surface, wl_vk_surface->vblank);
1195 tdm_client_vblank_destroy(wl_vk_surface->vblank);
1196 wl_vk_surface->vblank = NULL;
1201 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1203 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1205 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1207 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1208 if (message == INIT_SURFACE) { /* Initialize surface */
1209 TPL_DEBUG("wl_vk_surface(%p) initialize message received!",
1211 _thread_wl_vk_surface_init(wl_vk_surface);
1212 wl_vk_surface->initialized_in_thread = TPL_TRUE;
1213 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1214 } else if (message == CREATE_QUEUE) { /* Create tbm_surface_queue */
1215 TPL_DEBUG("wl_vk_surface(%p) queue creation message received!",
1217 if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
1218 != TPL_ERROR_NONE) {
1219 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1222 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1223 } else if (message == DESTROY_QUEUE) { /* swapchain destroy */
1224 TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!",
1226 _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
1227 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1228 } else if (message == ACQUIRABLE) { /* Acquirable message */
1229 TPL_DEBUG("wl_vk_surface(%p) acquirable message received!",
1231 if (_thread_surface_queue_acquire(wl_vk_surface)
1232 != TPL_ERROR_NONE) {
1233 TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
1238 /* init to NONE_MESSAGE */
1239 wl_vk_surface->sent_message = NONE_MESSAGE;
1241 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1247 __thread_func_surf_finalize(tpl_gsource *gsource)
1249 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1251 wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource);
1252 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1254 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1255 TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)",
1256 wl_vk_surface, gsource);
1258 _thread_wl_vk_surface_fini(wl_vk_surface);
1260 wl_vk_surface->gsource_finalized = TPL_TRUE;
1262 tpl_gcond_signal(&wl_vk_surface->surf_cond);
1263 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1266 static tpl_gsource_functions surf_funcs = {
1269 .dispatch = __thread_func_surf_dispatch,
1270 .finalize = __thread_func_surf_finalize,
1275 __tpl_wl_vk_surface_init(tpl_surface_t *surface)
1277 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1278 tpl_wl_vk_display_t *wl_vk_display = NULL;
1279 tpl_gsource *surf_source = NULL;
1281 TPL_ASSERT(surface);
1282 TPL_ASSERT(surface->display);
1283 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1284 TPL_ASSERT(surface->native_handle);
1286 wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
1287 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1289 wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1,
1290 sizeof(tpl_wl_vk_surface_t));
1291 if (!wl_vk_surface) {
1292 TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
1293 return TPL_ERROR_OUT_OF_MEMORY;
1296 surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface,
1297 -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1299 TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)",
1301 free(wl_vk_surface);
1302 surface->backend.data = NULL;
1303 return TPL_ERROR_INVALID_OPERATION;
1306 surface->backend.data = (void *)wl_vk_surface;
1307 surface->width = -1;
1308 surface->height = -1;
1310 wl_vk_surface->surf_source = surf_source;
1311 wl_vk_surface->swapchain = NULL;
1313 wl_vk_surface->wl_vk_display = wl_vk_display;
1314 wl_vk_surface->wl_surface = (struct wl_surface *)surface->native_handle;
1315 wl_vk_surface->tpl_surface = surface;
1317 wl_vk_surface->reset = TPL_FALSE;
1318 wl_vk_surface->is_activated = TPL_FALSE;
1319 wl_vk_surface->vblank_done = TPL_TRUE;
1320 wl_vk_surface->initialized_in_thread = TPL_FALSE;
1322 wl_vk_surface->render_done_cnt = 0;
1324 wl_vk_surface->vblank = NULL;
1325 #if TIZEN_FEATURE_ENABLE
1326 wl_vk_surface->surface_sync = NULL;
1329 wl_vk_surface->sent_message = NONE_MESSAGE;
1331 wl_vk_surface->post_interval = surface->post_interval;
1335 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1336 wl_vk_surface->buffers[i] = NULL;
1337 wl_vk_surface->buffer_cnt = 0;
1340 tpl_gmutex_init(&wl_vk_surface->surf_mutex);
1341 tpl_gcond_init(&wl_vk_surface->surf_cond);
1343 tpl_gmutex_init(&wl_vk_surface->buffers_mutex);
1345 /* Initialize in thread */
1346 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1347 wl_vk_surface->sent_message = INIT_SURFACE;
1348 tpl_gsource_send_message(wl_vk_surface->surf_source,
1349 wl_vk_surface->sent_message);
1350 while (!wl_vk_surface->initialized_in_thread)
1351 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1352 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1354 TPL_INFO("[SURFACE_INIT]",
1355 "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)",
1356 surface, wl_vk_surface, wl_vk_surface->surf_source);
1358 return TPL_ERROR_NONE;
1362 __tpl_wl_vk_surface_fini(tpl_surface_t *surface)
1364 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1365 tpl_wl_vk_display_t *wl_vk_display = NULL;
1367 TPL_ASSERT(surface);
1368 TPL_ASSERT(surface->display);
1370 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1371 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1373 wl_vk_display = (tpl_wl_vk_display_t *)
1374 surface->display->backend.data;
1375 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1377 TPL_INFO("[SURFACE_FINI][BEGIN]",
1378 "wl_vk_surface(%p) wl_surface(%p)",
1379 wl_vk_surface, wl_vk_surface->wl_surface);
1381 if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) {
1382 /* finalize swapchain */
1386 wl_vk_surface->swapchain = NULL;
1388 /* This is a protection to prevent problems that arise in unexpected situations
1389 * that g_cond_wait cannot work normally.
1390 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1391 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1393 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1394 while (wl_vk_surface->surf_source && !wl_vk_surface->gsource_finalized) {
1395 tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE);
1396 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1398 wl_vk_surface->surf_source = NULL;
1399 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1401 _print_buffer_lists(wl_vk_surface);
1403 wl_vk_surface->wl_surface = NULL;
1404 wl_vk_surface->wl_vk_display = NULL;
1405 wl_vk_surface->tpl_surface = NULL;
1407 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1408 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1409 tpl_gmutex_clear(&wl_vk_surface->surf_mutex);
1410 tpl_gcond_clear(&wl_vk_surface->surf_cond);
1412 TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface);
1414 free(wl_vk_surface);
1415 surface->backend.data = NULL;
1419 __tpl_wl_vk_surface_set_post_interval(tpl_surface_t *surface,
1422 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1424 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
1426 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1428 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1430 TPL_INFO("[SET_POST_INTERVAL]",
1431 "wl_vk_surface(%p) post_interval(%d -> %d)",
1432 wl_vk_surface, wl_vk_surface->post_interval, post_interval);
1434 wl_vk_surface->post_interval = post_interval;
1436 return TPL_ERROR_NONE;
1440 __tpl_wl_vk_surface_validate(tpl_surface_t *surface)
1442 TPL_ASSERT(surface);
1443 TPL_ASSERT(surface->backend.data);
1445 tpl_wl_vk_surface_t *wl_vk_surface =
1446 (tpl_wl_vk_surface_t *)surface->backend.data;
1448 return !(wl_vk_surface->reset);
1452 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1455 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1456 tpl_wl_vk_display_t *wl_vk_display = NULL;
1457 tpl_wl_vk_swapchain_t *swapchain = NULL;
1458 tpl_surface_t *surface = NULL;
1459 tpl_bool_t is_activated = TPL_FALSE;
1462 wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1463 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1465 wl_vk_display = wl_vk_surface->wl_vk_display;
1466 TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
1468 surface = wl_vk_surface->tpl_surface;
1469 TPL_CHECK_ON_NULL_RETURN(surface);
1471 swapchain = wl_vk_surface->swapchain;
1472 TPL_CHECK_ON_NULL_RETURN(swapchain);
1474 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1475 * the changed window size at the next frame. */
1476 width = tbm_surface_queue_get_width(tbm_queue);
1477 height = tbm_surface_queue_get_height(tbm_queue);
1478 if (surface->width != width || surface->height != height) {
1479 TPL_INFO("[QUEUE_RESIZE]",
1480 "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1481 wl_vk_surface, tbm_queue,
1482 surface->width, surface->height, width, height);
1485 /* When queue_reset_callback is called, if is_activated is different from
1486 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1487 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1488 is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
1489 swapchain->tbm_queue);
1490 if (wl_vk_surface->is_activated != is_activated) {
1492 TPL_INFO("[ACTIVATED]",
1493 "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1494 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1496 TPL_LOG_T("[DEACTIVATED]",
1497 " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
1498 wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
1502 wl_vk_surface->reset = TPL_TRUE;
1504 if (surface->reset_cb)
1505 surface->reset_cb(surface->reset_data);
1509 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1512 TPL_IGNORE(tbm_queue);
1514 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
1515 TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
1517 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1518 if (wl_vk_surface->sent_message == NONE_MESSAGE) {
1519 wl_vk_surface->sent_message = ACQUIRABLE;
1520 tpl_gsource_send_message(wl_vk_surface->surf_source,
1521 wl_vk_surface->sent_message);
1523 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1527 _thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1529 TPL_ASSERT (wl_vk_surface);
1531 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1532 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1533 tbm_surface_queue_h tbm_queue = NULL;
1534 tbm_bufmgr bufmgr = NULL;
1535 unsigned int capability;
1537 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1538 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1540 if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
1541 TPL_ERR("buffer count(%d) must be higher than (%d)",
1542 swapchain->properties.buffer_count,
1543 wl_vk_display->min_buffer);
1544 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1545 return TPL_ERROR_INVALID_PARAMETER;
1548 if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
1549 TPL_ERR("buffer count(%d) must be lower than (%d)",
1550 swapchain->properties.buffer_count,
1551 wl_vk_display->max_buffer);
1552 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1553 return TPL_ERROR_INVALID_PARAMETER;
1556 if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
1557 TPL_ERR("Unsupported present_mode(%d)",
1558 swapchain->properties.present_mode);
1559 swapchain->result = TPL_ERROR_INVALID_PARAMETER;
1560 return TPL_ERROR_INVALID_PARAMETER;
1563 if (swapchain->tbm_queue) {
1564 int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
1565 int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
1567 if (swapchain->swapchain_buffers) {
1569 for (i = 0; i < swapchain->properties.buffer_count; i++) {
1570 if (swapchain->swapchain_buffers[i]) {
1571 TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]);
1572 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1573 swapchain->swapchain_buffers[i] = NULL;
1577 free(swapchain->swapchain_buffers);
1578 swapchain->swapchain_buffers = NULL;
1581 if (old_width != swapchain->properties.width ||
1582 old_height != swapchain->properties.height) {
1583 tbm_surface_queue_reset(swapchain->tbm_queue,
1584 swapchain->properties.width,
1585 swapchain->properties.height,
1586 TBM_FORMAT_ARGB8888);
1587 TPL_INFO("[RESIZE]",
1588 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
1589 wl_vk_surface, swapchain, swapchain->tbm_queue,
1590 old_width, old_height,
1591 swapchain->properties.width,
1592 swapchain->properties.height);
1595 swapchain->properties.buffer_count =
1596 tbm_surface_queue_get_size(swapchain->tbm_queue);
1598 wl_vk_surface->reset = TPL_FALSE;
1600 __tpl_util_atomic_inc(&swapchain->ref_cnt);
1601 swapchain->create_done = TPL_TRUE;
1603 TPL_INFO("[SWAPCHAIN_REUSE]",
1604 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
1605 wl_vk_surface, swapchain, swapchain->tbm_queue,
1606 swapchain->properties.buffer_count);
1608 return TPL_ERROR_NONE;
1611 bufmgr = tbm_bufmgr_init(-1);
1612 capability = tbm_bufmgr_get_capability(bufmgr);
1613 tbm_bufmgr_deinit(bufmgr);
1615 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1616 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1617 wl_vk_display->wl_tbm_client,
1618 wl_vk_surface->wl_surface,
1619 swapchain->properties.buffer_count,
1620 swapchain->properties.width,
1621 swapchain->properties.height,
1622 TBM_FORMAT_ARGB8888);
1624 tbm_queue = wayland_tbm_client_create_surface_queue(
1625 wl_vk_display->wl_tbm_client,
1626 wl_vk_surface->wl_surface,
1627 swapchain->properties.buffer_count,
1628 swapchain->properties.width,
1629 swapchain->properties.height,
1630 TBM_FORMAT_ARGB8888);
1634 TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
1636 swapchain->result = TPL_ERROR_OUT_OF_MEMORY;
1637 return TPL_ERROR_OUT_OF_MEMORY;
1640 if (tbm_surface_queue_set_modes(
1641 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1642 TBM_SURFACE_QUEUE_ERROR_NONE) {
1643 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1645 tbm_surface_queue_destroy(tbm_queue);
1646 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1647 return TPL_ERROR_INVALID_OPERATION;
1650 if (tbm_surface_queue_add_reset_cb(
1652 __cb_tbm_queue_reset_callback,
1653 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1654 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1656 tbm_surface_queue_destroy(tbm_queue);
1657 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1658 return TPL_ERROR_INVALID_OPERATION;
1661 if (tbm_surface_queue_add_acquirable_cb(
1663 __cb_tbm_queue_acquirable_callback,
1664 (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1665 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1667 tbm_surface_queue_destroy(tbm_queue);
1668 swapchain->result = TPL_ERROR_INVALID_OPERATION;
1669 return TPL_ERROR_INVALID_OPERATION;
1672 swapchain->tbm_queue = tbm_queue;
1673 swapchain->create_done = TPL_TRUE;
1675 TPL_INFO("[TBM_QUEUE_CREATED]",
1676 "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)",
1677 wl_vk_surface, swapchain, tbm_queue);
1679 return TPL_ERROR_NONE;
1683 __tpl_wl_vk_surface_create_swapchain(tpl_surface_t *surface,
1684 tbm_format format, int width,
1685 int height, int buffer_count, int present_mode)
1687 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1688 tpl_wl_vk_display_t *wl_vk_display = NULL;
1689 tpl_wl_vk_swapchain_t *swapchain = NULL;
1691 TPL_ASSERT(surface);
1692 TPL_ASSERT(surface->display);
1694 wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
1695 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1697 wl_vk_display = (tpl_wl_vk_display_t *)
1698 surface->display->backend.data;
1699 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1701 swapchain = wl_vk_surface->swapchain;
1703 if (swapchain == NULL) {
1705 (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
1706 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
1707 swapchain->tbm_queue = NULL;
1710 swapchain->properties.buffer_count = buffer_count;
1711 swapchain->properties.width = width;
1712 swapchain->properties.height = height;
1713 swapchain->properties.present_mode = present_mode;
1714 swapchain->wl_vk_surface = wl_vk_surface;
1715 swapchain->properties.format = format;
1717 swapchain->result = TPL_ERROR_NONE;
1718 swapchain->create_done = TPL_FALSE;
1720 wl_vk_surface->swapchain = swapchain;
1722 __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
1724 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1725 /* send swapchain create tbm_queue message */
1726 wl_vk_surface->sent_message = CREATE_QUEUE;
1727 tpl_gsource_send_message(wl_vk_surface->surf_source,
1728 wl_vk_surface->sent_message);
1729 while (!swapchain->create_done && swapchain->result == TPL_ERROR_NONE)
1730 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1731 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1733 TPL_CHECK_ON_FALSE_ASSERT_FAIL(
1734 swapchain->tbm_queue != NULL,
1735 "[CRITICAL FAIL] Failed to create tbm_surface_queue");
1737 wl_vk_surface->reset = TPL_FALSE;
1739 return TPL_ERROR_NONE;
1743 _thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
1745 TPL_ASSERT(wl_vk_surface);
1747 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1749 TPL_CHECK_ON_NULL_RETURN(swapchain);
1751 if (swapchain->tbm_queue) {
1752 TPL_INFO("[TBM_QUEUE_DESTROY]",
1753 "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
1754 wl_vk_surface, swapchain, swapchain->tbm_queue);
1755 tbm_surface_queue_destroy(swapchain->tbm_queue);
1756 swapchain->tbm_queue = NULL;
1761 __tpl_wl_vk_surface_destroy_swapchain(tpl_surface_t *surface)
1763 tpl_wl_vk_swapchain_t *swapchain = NULL;
1764 tpl_wl_vk_surface_t *wl_vk_surface = NULL;
1765 tpl_wl_vk_display_t *wl_vk_display = NULL;
1767 TPL_ASSERT(surface);
1768 TPL_ASSERT(surface->display);
1770 wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
1771 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
1773 wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
1774 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
1776 swapchain = wl_vk_surface->swapchain;
1778 TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
1780 return TPL_ERROR_INVALID_OPERATION;
1783 if (!swapchain->tbm_queue) {
1784 TPL_ERR("wl_vk_surface(%p)->swapchain(%p)->tbm_queue is NULL.",
1785 wl_vk_surface, wl_vk_surface->swapchain);
1786 return TPL_ERROR_INVALID_OPERATION;
1789 if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
1790 TPL_INFO("[DESTROY_SWAPCHAIN]",
1791 "wl_vk_surface(%p) swapchain(%p) still valid.",
1792 wl_vk_surface, swapchain);
1793 return TPL_ERROR_NONE;
1796 TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
1797 "wl_vk_surface(%p) swapchain(%p)",
1798 wl_vk_surface, wl_vk_surface->swapchain);
1800 if (swapchain->swapchain_buffers) {
1801 for (int i = 0; i < swapchain->properties.buffer_count; i++) {
1802 if (swapchain->swapchain_buffers[i]) {
1803 TPL_DEBUG("Stop tracking tbm_surface(%p)",
1804 swapchain->swapchain_buffers[i]);
1805 tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
1806 swapchain->swapchain_buffers[i] = NULL;
1810 free(swapchain->swapchain_buffers);
1811 swapchain->swapchain_buffers = NULL;
1814 _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
1816 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
1817 wl_vk_surface->sent_message = DESTROY_QUEUE;
1818 tpl_gsource_send_message(wl_vk_surface->surf_source,
1819 wl_vk_surface->sent_message);
1820 while (swapchain->tbm_queue)
1821 tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
1822 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
1824 _print_buffer_lists(wl_vk_surface);
1827 wl_vk_surface->swapchain = NULL;
1829 return TPL_ERROR_NONE;
1833 __tpl_wl_vk_surface_get_swapchain_buffers(tpl_surface_t *surface,
1834 tbm_surface_h **buffers,
1837 TPL_ASSERT(surface);
1838 TPL_ASSERT(surface->backend.data);
1839 TPL_ASSERT(surface->display);
1840 TPL_ASSERT(surface->display->backend.data);
1842 tpl_wl_vk_surface_t *wl_vk_surface =
1843 (tpl_wl_vk_surface_t *)surface->backend.data;
1844 tpl_wl_vk_display_t *wl_vk_display =
1845 (tpl_wl_vk_display_t *)surface->display->backend.data;
1846 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
1847 tpl_result_t ret = TPL_ERROR_NONE;
1850 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
1851 TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
1853 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
1856 *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
1857 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1858 return TPL_ERROR_NONE;
1861 swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
1863 sizeof(tbm_surface_h));
1864 if (!swapchain->swapchain_buffers) {
1865 TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
1867 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1868 return TPL_ERROR_OUT_OF_MEMORY;
1871 ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
1872 swapchain->tbm_queue,
1873 swapchain->swapchain_buffers,
1876 TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
1877 wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
1878 free(swapchain->swapchain_buffers);
1879 swapchain->swapchain_buffers = NULL;
1880 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1881 return TPL_ERROR_INVALID_OPERATION;
1884 for (i = 0; i < *buffer_count; i++) {
1885 if (swapchain->swapchain_buffers[i]) {
1886 TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)",
1887 i, swapchain->swapchain_buffers[i],
1888 _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
1889 tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
1893 *buffers = swapchain->swapchain_buffers;
1895 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
1897 return TPL_ERROR_NONE;
1901 __cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
1903 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
1904 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
1906 TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
1907 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
1909 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
1910 if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
1911 wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
1912 wl_vk_surface->buffer_cnt--;
1914 wl_vk_buffer->idx = -1;
1916 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
1918 wl_display_flush(wl_vk_display->wl_display);
1920 if (wl_vk_buffer->wl_buffer) {
1921 wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
1922 wl_vk_buffer->wl_buffer);
1923 wl_vk_buffer->wl_buffer = NULL;
1926 #if TIZEN_FEATURE_ENABLE
1927 if (wl_vk_buffer->buffer_release) {
1928 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
1929 wl_vk_buffer->buffer_release = NULL;
1933 if (wl_vk_buffer->release_fence_fd != -1) {
1934 close(wl_vk_buffer->release_fence_fd);
1935 wl_vk_buffer->release_fence_fd = -1;
1938 if (wl_vk_buffer->rects) {
1939 free(wl_vk_buffer->rects);
1940 wl_vk_buffer->rects = NULL;
1941 wl_vk_buffer->num_rects = 0;
1944 wl_vk_buffer->tbm_surface = NULL;
1945 wl_vk_buffer->bo_name = -1;
1950 static tpl_wl_vk_buffer_t *
1951 _get_wl_vk_buffer(tbm_surface_h tbm_surface)
1953 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1954 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1955 (void **)&wl_vk_buffer);
1956 return wl_vk_buffer;
1959 static tpl_wl_vk_buffer_t *
1960 _wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
1961 tbm_surface_h tbm_surface)
1963 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
1965 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
1967 if (!wl_vk_buffer) {
1968 wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
1969 TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
1971 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1972 (tbm_data_free)__cb_wl_vk_buffer_free);
1973 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
1976 wl_vk_buffer->wl_buffer = NULL;
1977 wl_vk_buffer->tbm_surface = tbm_surface;
1978 wl_vk_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
1979 wl_vk_buffer->wl_vk_surface = wl_vk_surface;
1981 wl_vk_buffer->status = RELEASED;
1983 wl_vk_buffer->acquire_fence_fd = -1;
1984 wl_vk_buffer->release_fence_fd = -1;
1986 wl_vk_buffer->dx = 0;
1987 wl_vk_buffer->dy = 0;
1988 wl_vk_buffer->width = tbm_surface_get_width(tbm_surface);
1989 wl_vk_buffer->height = tbm_surface_get_height(tbm_surface);
1991 wl_vk_buffer->rects = NULL;
1992 wl_vk_buffer->num_rects = 0;
1994 wl_vk_buffer->need_to_commit = TPL_FALSE;
1995 #if TIZEN_FEATURE_ENABLE
1996 wl_vk_buffer->buffer_release = NULL;
1998 tpl_gmutex_init(&wl_vk_buffer->mutex);
1999 tpl_gcond_init(&wl_vk_buffer->cond);
2001 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2004 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2005 if (wl_vk_surface->buffers[i] == NULL) break;
2007 /* If this exception is reached,
2008 * it may be a critical memory leak problem. */
2009 if (i == BUFFER_ARRAY_SIZE) {
2010 tpl_wl_vk_buffer_t *evicted_buffer = NULL;
2011 int evicted_idx = 0; /* evict the frontmost buffer */
2013 evicted_buffer = wl_vk_surface->buffers[evicted_idx];
2015 TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
2017 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2018 evicted_buffer, evicted_buffer->tbm_surface,
2019 status_to_string[evicted_buffer->status]);
2021 /* [TODO] need to think about whether there will be
2022 * better modifications */
2023 wl_vk_surface->buffer_cnt--;
2024 wl_vk_surface->buffers[evicted_idx] = NULL;
2029 wl_vk_surface->buffer_cnt++;
2030 wl_vk_surface->buffers[i] = wl_vk_buffer;
2031 wl_vk_buffer->idx = i;
2033 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
2035 TPL_INFO("[WL_VK_BUFFER_CREATE]",
2036 "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2037 wl_vk_surface, wl_vk_buffer, tbm_surface,
2038 wl_vk_buffer->bo_name);
2041 return wl_vk_buffer;
2044 static tbm_surface_h
2045 __tpl_wl_vk_surface_dequeue_buffer(tpl_surface_t *surface,
2046 uint64_t timeout_ns,
2047 int32_t *release_fence)
2049 TPL_ASSERT(surface);
2050 TPL_ASSERT(surface->backend.data);
2051 TPL_ASSERT(surface->display);
2052 TPL_ASSERT(surface->display->backend.data);
2053 TPL_OBJECT_CHECK_RETURN(surface, NULL);
2055 tpl_wl_vk_surface_t *wl_vk_surface =
2056 (tpl_wl_vk_surface_t *)surface->backend.data;
2057 tpl_wl_vk_display_t *wl_vk_display =
2058 (tpl_wl_vk_display_t *)surface->display->backend.data;
2059 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2060 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2062 tbm_surface_h tbm_surface = NULL;
2063 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2065 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
2066 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
2068 TPL_OBJECT_UNLOCK(surface);
2069 TRACE_BEGIN("WAIT_DEQUEUEABLE");
2070 if (timeout_ns != UINT64_MAX) {
2071 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2072 swapchain->tbm_queue, timeout_ns/1000);
2074 tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 1);
2077 TPL_OBJECT_LOCK(surface);
2079 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2080 TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
2083 } else if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2084 TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p) tsq_err(%d)",
2085 wl_vk_surface, swapchain->tbm_queue, tsq_err);
2089 tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
2091 if (wl_vk_surface->reset) {
2092 TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
2093 swapchain, swapchain->tbm_queue);
2094 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2098 tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
2101 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
2102 swapchain->tbm_queue, wl_vk_surface, tsq_err);
2103 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2107 tbm_surface_internal_ref(tbm_surface);
2109 wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
2110 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
2112 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2113 wl_vk_buffer->status = DEQUEUED;
2115 if (release_fence) {
2116 #if TIZEN_FEATURE_ENABLE
2117 if (wl_vk_surface->surface_sync) {
2118 *release_fence = wl_vk_buffer->release_fence_fd;
2119 TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
2120 wl_vk_surface, wl_vk_buffer, *release_fence);
2121 wl_vk_buffer->release_fence_fd = -1;
2125 *release_fence = -1;
2129 wl_vk_surface->reset = TPL_FALSE;
2131 TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2132 wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
2133 release_fence ? *release_fence : -1);
2135 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2136 tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
2142 __tpl_wl_vk_surface_cancel_buffer(tpl_surface_t *surface,
2143 tbm_surface_h tbm_surface)
2145 TPL_ASSERT(surface);
2146 TPL_ASSERT(surface->backend.data);
2148 tpl_wl_vk_surface_t *wl_vk_surface =
2149 (tpl_wl_vk_surface_t *)surface->backend.data;
2150 tpl_wl_vk_swapchain_t *swapchain = NULL;
2151 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2152 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2154 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2155 TPL_ERROR_INVALID_PARAMETER);
2157 swapchain = wl_vk_surface->swapchain;
2158 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2159 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
2160 TPL_ERROR_INVALID_PARAMETER);
2162 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2164 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2165 wl_vk_buffer->status = RELEASED;
2166 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2169 tbm_surface_internal_unref(tbm_surface);
2171 TPL_INFO("[CANCEL BUFFER]",
2172 "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
2173 wl_vk_surface, swapchain, tbm_surface,
2174 _get_tbm_surface_bo_name(tbm_surface));
2176 tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
2178 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2179 TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
2180 return TPL_ERROR_INVALID_OPERATION;
2183 return TPL_ERROR_NONE;
2187 __tpl_wl_vk_surface_enqueue_buffer(tpl_surface_t *surface,
2188 tbm_surface_h tbm_surface,
2189 int num_rects, const int *rects,
2190 int32_t acquire_fence)
2192 TPL_ASSERT(surface);
2193 TPL_ASSERT(surface->display);
2194 TPL_ASSERT(surface->backend.data);
2195 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2197 tpl_wl_vk_surface_t *wl_vk_surface =
2198 (tpl_wl_vk_surface_t *) surface->backend.data;
2199 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2200 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2201 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2204 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2205 TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
2206 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
2207 TPL_ERROR_INVALID_PARAMETER);
2209 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2210 if (!wl_vk_buffer) {
2211 TPL_ERR("Failed to get wl_vk_buffer from tbm_surface(%p)", tbm_surface);
2212 return TPL_ERROR_INVALID_PARAMETER;
2215 bo_name = wl_vk_buffer->bo_name;
2217 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2219 /* If there are received region information, save it to wl_vk_buffer */
2220 if (num_rects && rects) {
2221 if (wl_vk_buffer->rects != NULL) {
2222 free(wl_vk_buffer->rects);
2223 wl_vk_buffer->rects = NULL;
2224 wl_vk_buffer->num_rects = 0;
2227 wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2228 wl_vk_buffer->num_rects = num_rects;
2230 if (wl_vk_buffer->rects) {
2231 memcpy((char *)wl_vk_buffer->rects, (char *)rects,
2232 sizeof(int) * 4 * num_rects);
2234 TPL_ERR("Failed to allocate memory for rects info.");
2238 if (wl_vk_buffer->acquire_fence_fd != -1)
2239 close(wl_vk_buffer->acquire_fence_fd);
2241 wl_vk_buffer->acquire_fence_fd = acquire_fence;
2243 wl_vk_buffer->status = ENQUEUED;
2245 "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
2246 wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
2248 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2250 tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
2252 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2253 tbm_surface_internal_unref(tbm_surface);
2254 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
2255 tbm_surface, wl_vk_surface, tsq_err);
2256 return TPL_ERROR_INVALID_OPERATION;
2259 tbm_surface_internal_unref(tbm_surface);
2261 return TPL_ERROR_NONE;
2264 static const struct wl_buffer_listener wl_buffer_release_listener = {
2265 (void *)__cb_wl_buffer_release,
2269 _thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
2271 tbm_surface_h tbm_surface = NULL;
2272 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2273 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2274 tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
2275 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2276 tpl_bool_t ready_to_commit = TPL_TRUE;
2278 TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
2280 while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
2281 tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
2283 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2284 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2285 swapchain->tbm_queue);
2286 return TPL_ERROR_INVALID_OPERATION;
2289 tbm_surface_internal_ref(tbm_surface);
2291 wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
2292 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2293 "wl_vk_buffer sould be not NULL");
2295 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2297 wl_vk_buffer->status = ACQUIRED;
2299 TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2300 wl_vk_buffer, tbm_surface,
2301 _get_tbm_surface_bo_name(tbm_surface));
2303 if (wl_vk_buffer->wl_buffer == NULL) {
2304 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2305 wl_vk_display->wl_tbm_client, tbm_surface);
2307 if (!wl_vk_buffer->wl_buffer) {
2308 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2309 wl_vk_display->wl_tbm_client, tbm_surface);
2311 if (wl_vk_buffer->acquire_fence_fd == -1 ||
2312 wl_vk_display->use_explicit_sync == TPL_FALSE) {
2313 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2314 &wl_buffer_release_listener, wl_vk_buffer);
2318 "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2319 wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
2323 if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done)
2324 ready_to_commit = TPL_TRUE;
2326 wl_vk_buffer->status = WAITING_VBLANK;
2327 __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
2328 ready_to_commit = TPL_FALSE;
2331 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2333 if (ready_to_commit)
2334 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2337 return TPL_ERROR_NONE;
2340 #if TIZEN_FEATURE_ENABLE
2342 __cb_buffer_fenced_release(void *data,
2343 struct zwp_linux_buffer_release_v1 *release,
2346 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2347 tbm_surface_h tbm_surface = NULL;
2349 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2351 tbm_surface = wl_vk_buffer->tbm_surface;
2353 if (tbm_surface_internal_is_valid(tbm_surface)) {
2354 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2355 tpl_wl_vk_swapchain_t *swapchain = NULL;
2357 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2358 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2359 tbm_surface_internal_unref(tbm_surface);
2363 swapchain = wl_vk_surface->swapchain;
2365 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2366 if (wl_vk_buffer->status == COMMITTED) {
2367 tbm_surface_queue_error_e tsq_err;
2369 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2370 wl_vk_buffer->buffer_release = NULL;
2372 wl_vk_buffer->release_fence_fd = fence;
2373 wl_vk_buffer->status = RELEASED;
2375 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2376 wl_vk_buffer->bo_name,
2378 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2379 wl_vk_buffer->bo_name);
2382 "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2383 wl_vk_buffer, tbm_surface,
2384 wl_vk_buffer->bo_name,
2387 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2389 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2390 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2392 tbm_surface_internal_unref(tbm_surface);
2395 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2398 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2403 __cb_buffer_immediate_release(void *data,
2404 struct zwp_linux_buffer_release_v1 *release)
2406 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2407 tbm_surface_h tbm_surface = NULL;
2409 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
2411 tbm_surface = wl_vk_buffer->tbm_surface;
2413 if (tbm_surface_internal_is_valid(tbm_surface)) {
2414 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2415 tpl_wl_vk_swapchain_t *swapchain = NULL;
2417 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2418 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2419 tbm_surface_internal_unref(tbm_surface);
2423 swapchain = wl_vk_surface->swapchain;
2425 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2426 if (wl_vk_buffer->status == COMMITTED) {
2427 tbm_surface_queue_error_e tsq_err;
2429 zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
2430 wl_vk_buffer->buffer_release = NULL;
2432 wl_vk_buffer->release_fence_fd = -1;
2433 wl_vk_buffer->status = RELEASED;
2435 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2436 _get_tbm_surface_bo_name(tbm_surface));
2437 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2438 _get_tbm_surface_bo_name(tbm_surface));
2441 "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
2442 wl_vk_buffer, tbm_surface,
2443 _get_tbm_surface_bo_name(tbm_surface));
2445 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2447 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2448 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2450 tbm_surface_internal_unref(tbm_surface);
2453 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2456 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2460 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2461 __cb_buffer_fenced_release,
2462 __cb_buffer_immediate_release,
2467 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2469 tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
2470 tbm_surface_h tbm_surface = NULL;
2472 TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
2474 tbm_surface = wl_vk_buffer->tbm_surface;
2476 if (tbm_surface_internal_is_valid(tbm_surface)) {
2477 tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
2478 tpl_wl_vk_swapchain_t *swapchain = NULL;
2479 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2481 if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
2482 TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
2483 tbm_surface_internal_unref(tbm_surface);
2487 swapchain = wl_vk_surface->swapchain;
2489 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2491 if (wl_vk_buffer->status == COMMITTED) {
2493 tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
2495 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2496 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2498 wl_vk_buffer->status = RELEASED;
2500 TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
2501 TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2502 wl_vk_buffer->bo_name);
2504 TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2505 wl_vk_buffer->wl_buffer, tbm_surface,
2506 wl_vk_buffer->bo_name);
2508 tbm_surface_internal_unref(tbm_surface);
2511 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2513 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2518 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2519 unsigned int sequence, unsigned int tv_sec,
2520 unsigned int tv_usec, void *user_data)
2522 tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
2523 tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
2525 TRACE_ASYNC_END((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2526 TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface);
2528 if (error == TDM_ERROR_TIMEOUT)
2529 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
2532 wl_vk_surface->vblank_done = TPL_TRUE;
2534 tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
2535 wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front(
2536 wl_vk_surface->vblank_waiting_buffers,
2539 _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
2540 tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
2544 _thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
2546 tdm_error tdm_err = TDM_ERROR_NONE;
2547 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2549 if (wl_vk_surface->vblank == NULL) {
2550 wl_vk_surface->vblank =
2551 _thread_create_tdm_client_vblank(wl_vk_display->tdm.tdm_client);
2552 if (!wl_vk_surface->vblank) {
2553 TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
2555 return TPL_ERROR_OUT_OF_MEMORY;
2559 tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
2560 wl_vk_surface->post_interval,
2561 __cb_tdm_client_vblank,
2562 (void *)wl_vk_surface);
2564 if (tdm_err == TDM_ERROR_NONE) {
2565 wl_vk_surface->vblank_done = TPL_FALSE;
2566 TRACE_ASYNC_BEGIN((intptr_t)wl_vk_surface, "WAIT_VBLANK");
2568 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2569 return TPL_ERROR_INVALID_OPERATION;
2572 return TPL_ERROR_NONE;
2576 _thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
2577 tpl_wl_vk_buffer_t *wl_vk_buffer)
2579 tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
2580 struct wl_surface *wl_surface = wl_vk_surface->wl_surface;
2583 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
2584 "wl_vk_buffer sould be not NULL");
2586 if (wl_vk_buffer->wl_buffer == NULL) {
2587 wl_vk_buffer->wl_buffer = wayland_tbm_client_create_buffer(
2588 wl_vk_display->wl_tbm_client,
2589 wl_vk_buffer->tbm_surface);
2590 if (wl_vk_buffer->wl_buffer &&
2591 (wl_vk_buffer->acquire_fence_fd == -1 ||
2592 wl_vk_display->use_explicit_sync == TPL_FALSE)) {
2593 wl_buffer_add_listener(wl_vk_buffer->wl_buffer,
2594 &wl_buffer_release_listener, wl_vk_buffer);
2597 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
2598 "[FATAL] Failed to create wl_buffer");
2600 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
2602 wl_surface_attach(wl_surface, wl_vk_buffer->wl_buffer,
2603 wl_vk_buffer->dx, wl_vk_buffer->dy);
2605 if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
2607 wl_surface_damage(wl_surface,
2608 wl_vk_buffer->dx, wl_vk_buffer->dy,
2609 wl_vk_buffer->width, wl_vk_buffer->height);
2611 wl_surface_damage_buffer(wl_surface,
2613 wl_vk_buffer->width, wl_vk_buffer->height);
2617 for (i = 0; i < wl_vk_buffer->num_rects; i++) {
2619 wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
2620 wl_vk_buffer->rects[i * 4 + 3]);
2622 wl_surface_damage(wl_surface,
2623 wl_vk_buffer->rects[i * 4 + 0],
2625 wl_vk_buffer->rects[i * 4 + 2],
2626 wl_vk_buffer->rects[i * 4 + 3]);
2628 wl_surface_damage_buffer(wl_surface,
2629 wl_vk_buffer->rects[i * 4 + 0],
2631 wl_vk_buffer->rects[i * 4 + 2],
2632 wl_vk_buffer->rects[i * 4 + 3]);
2637 #if TIZEN_FEATURE_ENABLE
2638 if (wl_vk_display->use_explicit_sync &&
2639 wl_vk_surface->surface_sync &&
2640 wl_vk_buffer->acquire_fence_fd != -1) {
2642 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
2643 wl_vk_buffer->acquire_fence_fd);
2644 TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
2645 wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
2646 close(wl_vk_buffer->acquire_fence_fd);
2647 wl_vk_buffer->acquire_fence_fd = -1;
2649 wl_vk_buffer->buffer_release =
2650 zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
2651 if (!wl_vk_buffer->buffer_release) {
2652 TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
2654 zwp_linux_buffer_release_v1_add_listener(
2655 wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
2656 TPL_DEBUG("add explicit_sync_release_listener.");
2661 wl_surface_commit(wl_surface);
2663 wl_display_flush(wl_vk_display->wl_display);
2665 TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
2666 wl_vk_buffer->bo_name);
2668 tpl_gmutex_lock(&wl_vk_buffer->mutex);
2670 wl_vk_buffer->need_to_commit = TPL_FALSE;
2671 wl_vk_buffer->status = COMMITTED;
2673 tpl_gcond_signal(&wl_vk_buffer->cond);
2675 tpl_gmutex_unlock(&wl_vk_buffer->mutex);
2678 "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
2679 wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
2680 wl_vk_buffer->bo_name);
2682 if (wl_vk_display->use_wait_vblank &&
2683 _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
2684 TPL_ERR("Failed to set wait vblank.");
2688 __tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
2690 if (!native_dpy) return TPL_FALSE;
2692 if (_check_native_handle_is_wl_display(native_dpy))
2699 __tpl_display_init_backend_wl_vk_thread(tpl_display_backend_t *backend)
2701 TPL_ASSERT(backend);
2703 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2704 backend->data = NULL;
2706 backend->init = __tpl_wl_vk_display_init;
2707 backend->fini = __tpl_wl_vk_display_fini;
2708 backend->query_config = __tpl_wl_vk_display_query_config;
2709 backend->filter_config = __tpl_wl_vk_display_filter_config;
2710 backend->query_window_supported_buffer_count =
2711 __tpl_wl_vk_display_query_window_supported_buffer_count;
2712 backend->query_window_supported_present_modes =
2713 __tpl_wl_vk_display_query_window_supported_present_modes;
2717 __tpl_surface_init_backend_wl_vk_thread(tpl_surface_backend_t *backend)
2719 TPL_ASSERT(backend);
2721 backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD;
2722 backend->data = NULL;
2724 backend->init = __tpl_wl_vk_surface_init;
2725 backend->fini = __tpl_wl_vk_surface_fini;
2726 backend->validate = __tpl_wl_vk_surface_validate;
2727 backend->cancel_dequeued_buffer =
2728 __tpl_wl_vk_surface_cancel_buffer;
2729 backend->dequeue_buffer = __tpl_wl_vk_surface_dequeue_buffer;
2730 backend->enqueue_buffer = __tpl_wl_vk_surface_enqueue_buffer;
2731 backend->get_swapchain_buffers =
2732 __tpl_wl_vk_surface_get_swapchain_buffers;
2733 backend->create_swapchain = __tpl_wl_vk_surface_create_swapchain;
2734 backend->destroy_swapchain = __tpl_wl_vk_surface_destroy_swapchain;
2735 backend->set_post_interval =
2736 __tpl_wl_vk_surface_set_post_interval;
2740 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
2742 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
2746 _print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
2750 tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
2751 TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
2752 wl_vk_surface, wl_vk_surface->buffer_cnt);
2753 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
2754 tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
2757 "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
2758 idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
2759 wl_vk_buffer->bo_name,
2760 status_to_string[wl_vk_buffer->status]);
2763 tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);