2 #include "tpl_internal.h"
7 #include <sys/eventfd.h>
9 #include <tbm_bufmgr.h>
10 #include <tbm_surface.h>
11 #include <tbm_surface_internal.h>
12 #include <tbm_surface_queue.h>
14 #include <wayland-client.h>
15 #include <wayland-tbm-server.h>
16 #include <wayland-tbm-client.h>
17 #include <wayland-egl-backend.h>
19 #include <tdm_client.h>
21 #include "wayland-egl-tizen/wayland-egl-tizen.h"
22 #include "wayland-egl-tizen/wayland-egl-tizen-priv.h"
24 #ifndef TIZEN_FEATURE_ENABLE
25 #define TIZEN_FEATURE_ENABLE 1
28 #if TIZEN_FEATURE_ENABLE
29 #include <tizen-surface-client-protocol.h>
30 #include <presentation-time-client-protocol.h>
31 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
34 #include "tpl_utils_gthread.h"
36 static int wl_egl_buffer_key;
37 #define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key)
39 /* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
40 #define BUFFER_ARRAY_SIZE 9
42 typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
43 typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
44 typedef struct _tpl_wl_egl_buffer tpl_wl_egl_buffer_t;
45 typedef struct _surface_vblank tpl_surface_vblank_t;
47 struct _tpl_wl_egl_display {
48 tpl_gsource *disp_source;
50 tpl_gmutex wl_event_mutex;
52 struct wl_display *wl_display;
53 struct wl_event_queue *ev_queue;
54 struct wayland_tbm_client *wl_tbm_client;
55 int last_error; /* errno of the last wl_display error*/
57 tpl_bool_t wl_initialized;
59 tpl_bool_t use_wait_vblank;
60 tpl_bool_t use_explicit_sync;
63 /* To make sure that tpl_gsource has been successfully finalized. */
64 tpl_bool_t gsource_finalized;
65 tpl_gmutex disp_mutex;
68 tdm_client *tdm_client;
69 tpl_gsource *tdm_source;
71 tpl_bool_t tdm_initialized;
72 tpl_list_t *surface_vblanks;
74 /* To make sure that tpl_gsource has been successfully finalized. */
75 tpl_bool_t gsource_finalized;
80 #if TIZEN_FEATURE_ENABLE
81 struct tizen_surface_shm *tss; /* used for surface buffer_flush */
82 struct wp_presentation *presentation; /* for presentation feedback */
83 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
87 typedef enum surf_message {
93 struct _tpl_wl_egl_surface {
94 tpl_gsource *surf_source;
96 tbm_surface_queue_h tbm_queue;
99 struct wl_egl_window *wl_egl_window;
100 struct wl_surface *wl_surface;
102 #if TIZEN_FEATURE_ENABLE
103 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
104 struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */
107 tpl_surface_vblank_t *vblank;
109 /* surface information */
116 int latest_transform;
120 tpl_wl_egl_display_t *wl_egl_display;
121 tpl_surface_t *tpl_surface;
123 /* wl_egl_buffer array for buffer tracing */
124 tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE];
125 int buffer_cnt; /* the number of using wl_egl_buffers */
126 tpl_gmutex buffers_mutex;
127 tbm_surface_h last_enq_buffer;
129 tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
141 tpl_gmutex surf_mutex;
144 surf_message sent_message;
146 /* for waiting draw done */
147 tpl_bool_t use_render_done_fence;
148 tpl_bool_t is_activated;
149 tpl_bool_t reset; /* TRUE if queue reseted by external */
150 tpl_bool_t need_to_enqueue;
151 tpl_bool_t prerotation_capability;
152 tpl_bool_t vblank_done;
153 tpl_bool_t set_serial_is_used;
154 tpl_bool_t initialized_in_thread;
156 /* To make sure that tpl_gsource has been successfully finalized. */
157 tpl_bool_t gsource_finalized;
160 struct _surface_vblank {
161 tdm_client_vblank *tdm_vblank;
162 tpl_wl_egl_surface_t *wl_egl_surface;
163 tpl_list_t *waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
167 typedef enum buffer_status {
172 WAITING_SIGNALED, // 4
177 static const char *status_to_string[7] = {
182 "WAITING_SIGNALED", // 4
183 "WAITING_VBLANK", // 5
187 struct _tpl_wl_egl_buffer {
188 tbm_surface_h tbm_surface;
191 struct wl_proxy *wl_buffer;
192 int dx, dy; /* position to attach to wl_surface */
193 int width, height; /* size to attach to wl_surface */
195 buffer_status_t status; /* for tracing buffer status */
196 int idx; /* position index in buffers array of wl_egl_surface */
198 /* for damage region */
202 /* for wayland_tbm_client_set_buffer_transform */
204 tpl_bool_t w_rotated;
206 /* for wl_surface_set_buffer_transform */
209 /* for wayland_tbm_client_set_buffer_serial */
212 /* for checking need_to_commit (frontbuffer mode) */
213 tpl_bool_t need_to_commit;
215 /* for checking draw done */
216 tpl_bool_t draw_done;
218 #if TIZEN_FEATURE_ENABLE
219 /* to get release event via zwp_linux_buffer_release_v1 */
220 struct zwp_linux_buffer_release_v1 *buffer_release;
222 /* each buffers own its release_fence_fd, until it passes ownership
224 int32_t release_fence_fd;
226 /* each buffers own its acquire_fence_fd.
227 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
228 * will be passed to display server
229 * Otherwise it will be used as a fence waiting for render done
231 int32_t acquire_fence_fd;
233 /* Fd to send a signal when wl_surface_commit with this buffer */
234 int32_t commit_sync_fd;
236 /* Fd to send a siganl when receive the
237 * presentation feedback from display server */
238 int32_t presentation_sync_fd;
240 tpl_gsource *waiting_source;
245 tpl_wl_egl_surface_t *wl_egl_surface;
248 #if TIZEN_FEATURE_ENABLE
249 struct pst_feedback {
250 /* to get presentation feedback from display server */
251 struct wp_presentation_feedback *presentation_feedback;
256 tpl_wl_egl_surface_t *wl_egl_surface;
261 static const struct wl_buffer_listener wl_buffer_release_listener;
264 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
266 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface);
268 _check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface);
270 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer);
271 static tpl_wl_egl_buffer_t *
272 _get_wl_egl_buffer(tbm_surface_h tbm_surface);
274 _write_to_eventfd(int eventfd);
276 _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface);
278 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface);
280 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
281 tpl_wl_egl_buffer_t *wl_egl_buffer);
283 __cb_surface_vblank_free(void *data);
285 static struct tizen_private *
286 tizen_private_create()
288 struct tizen_private *private = NULL;
289 private = (struct tizen_private *)calloc(1, sizeof(struct tizen_private));
291 private->magic = WL_EGL_TIZEN_MAGIC;
292 private->rotation = 0;
293 private->frontbuffer_mode = 0;
294 private->transform = 0;
295 private->window_transform = 0;
298 private->data = NULL;
299 private->rotate_callback = NULL;
300 private->get_rotation_capability = NULL;
301 private->set_window_serial_callback = NULL;
302 private->set_frontbuffer_callback = NULL;
303 private->create_commit_sync_fd = NULL;
304 private->create_presentation_sync_fd = NULL;
305 private->merge_sync_fds = NULL;
312 _check_native_handle_is_wl_display(tpl_handle_t display)
314 struct wl_interface *wl_egl_native_dpy = *(void **) display;
316 if (!wl_egl_native_dpy) {
317 TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy);
321 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
322 is a memory address pointing the structure of wl_display_interface. */
323 if (wl_egl_native_dpy == &wl_display_interface)
326 if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
327 strlen(wl_display_interface.name)) == 0) {
335 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
337 tpl_wl_egl_display_t *wl_egl_display = NULL;
338 tdm_error tdm_err = TDM_ERROR_NONE;
342 wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
343 if (!wl_egl_display) {
344 TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource);
345 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
349 tdm_err = tdm_client_handle_events(wl_egl_display->tdm.tdm_client);
351 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
352 * When tdm_source is no longer available due to an unexpected situation,
353 * wl_egl_thread must remove it from the thread and destroy it.
354 * In that case, tdm_vblank can no longer be used for surfaces and displays
355 * that used this tdm_source. */
356 if (tdm_err != TDM_ERROR_NONE) {
357 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
359 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
361 tpl_gsource_destroy(gsource, TPL_FALSE);
363 wl_egl_display->tdm.tdm_source = NULL;
372 __thread_func_tdm_finalize(tpl_gsource *gsource)
374 tpl_wl_egl_display_t *wl_egl_display = NULL;
376 wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
378 tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
380 TPL_INFO("[TDM_CLIENT_FINI]",
381 "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
382 wl_egl_display, wl_egl_display->tdm.tdm_client,
383 wl_egl_display->tdm.tdm_display_fd);
385 if (wl_egl_display->tdm.tdm_client) {
387 if (wl_egl_display->tdm.surface_vblanks) {
388 __tpl_list_free(wl_egl_display->tdm.surface_vblanks,
389 __cb_surface_vblank_free);
390 wl_egl_display->tdm.surface_vblanks = NULL;
393 tdm_client_destroy(wl_egl_display->tdm.tdm_client);
394 wl_egl_display->tdm.tdm_client = NULL;
395 wl_egl_display->tdm.tdm_display_fd = -1;
396 wl_egl_display->tdm.tdm_source = NULL;
399 wl_egl_display->use_wait_vblank = TPL_FALSE;
400 wl_egl_display->tdm.tdm_initialized = TPL_FALSE;
401 wl_egl_display->tdm.gsource_finalized = TPL_TRUE;
403 tpl_gcond_signal(&wl_egl_display->tdm.tdm_cond);
404 tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
407 static tpl_gsource_functions tdm_funcs = {
410 .dispatch = __thread_func_tdm_dispatch,
411 .finalize = __thread_func_tdm_finalize,
415 _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display)
417 tdm_client *tdm_client = NULL;
418 int tdm_display_fd = -1;
419 tdm_error tdm_err = TDM_ERROR_NONE;
421 tdm_client = tdm_client_create(&tdm_err);
422 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
423 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
424 return TPL_ERROR_INVALID_OPERATION;
427 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
428 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
429 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
430 tdm_client_destroy(tdm_client);
431 return TPL_ERROR_INVALID_OPERATION;
434 wl_egl_display->tdm.tdm_display_fd = tdm_display_fd;
435 wl_egl_display->tdm.tdm_client = tdm_client;
436 wl_egl_display->tdm.tdm_source = NULL;
437 wl_egl_display->tdm.tdm_initialized = TPL_TRUE;
438 wl_egl_display->tdm.surface_vblanks = __tpl_list_alloc();
440 TPL_INFO("[TDM_CLIENT_INIT]",
441 "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
442 wl_egl_display, tdm_client, tdm_display_fd);
444 return TPL_ERROR_NONE;
447 #define IMPL_TIZEN_SURFACE_SHM_VERSION 2
451 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
452 uint32_t name, const char *interface,
455 #if TIZEN_FEATURE_ENABLE
456 tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
458 if (!strcmp(interface, "tizen_surface_shm")) {
459 wl_egl_display->tss =
460 wl_registry_bind(wl_registry,
462 &tizen_surface_shm_interface,
463 ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ?
464 version : IMPL_TIZEN_SURFACE_SHM_VERSION));
465 wl_egl_display->use_tss = TPL_TRUE;
466 } else if (!strcmp(interface, wp_presentation_interface.name)) {
467 wl_egl_display->presentation =
468 wl_registry_bind(wl_registry,
469 name, &wp_presentation_interface, 1);
470 TPL_DEBUG("bind wp_presentation_interface");
471 } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
472 char *env = tpl_getenv("TPL_EFS");
473 if (env && !atoi(env)) {
474 wl_egl_display->use_explicit_sync = TPL_FALSE;
476 wl_egl_display->explicit_sync =
477 wl_registry_bind(wl_registry, name,
478 &zwp_linux_explicit_synchronization_v1_interface, 1);
479 wl_egl_display->use_explicit_sync = TPL_TRUE;
480 TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
487 __cb_wl_resistry_global_remove_callback(void *data,
488 struct wl_registry *wl_registry,
493 static const struct wl_registry_listener registry_listener = {
494 __cb_wl_resistry_global_callback,
495 __cb_wl_resistry_global_remove_callback
499 _wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display,
500 const char *func_name)
504 strerror_r(errno, buf, sizeof(buf));
506 if (wl_egl_display->last_error == errno)
509 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
511 dpy_err = wl_display_get_error(wl_egl_display->wl_display);
512 if (dpy_err == EPROTO) {
513 const struct wl_interface *err_interface;
514 uint32_t err_proxy_id, err_code;
515 err_code = wl_display_get_protocol_error(wl_egl_display->wl_display,
518 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
519 err_interface->name, err_code, err_proxy_id);
522 wl_egl_display->last_error = errno;
526 _thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display)
528 struct wl_registry *registry = NULL;
529 struct wl_event_queue *queue = NULL;
530 struct wl_display *display_wrapper = NULL;
531 struct wl_proxy *wl_tbm = NULL;
532 struct wayland_tbm_client *wl_tbm_client = NULL;
534 tpl_result_t result = TPL_ERROR_NONE;
536 queue = wl_display_create_queue(wl_egl_display->wl_display);
538 TPL_ERR("Failed to create wl_queue wl_display(%p)",
539 wl_egl_display->wl_display);
540 result = TPL_ERROR_INVALID_OPERATION;
544 wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display);
545 if (!wl_egl_display->ev_queue) {
546 TPL_ERR("Failed to create wl_queue wl_display(%p)",
547 wl_egl_display->wl_display);
548 result = TPL_ERROR_INVALID_OPERATION;
552 display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display);
553 if (!display_wrapper) {
554 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
555 wl_egl_display->wl_display);
556 result = TPL_ERROR_INVALID_OPERATION;
560 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
562 registry = wl_display_get_registry(display_wrapper);
564 TPL_ERR("Failed to create wl_registry");
565 result = TPL_ERROR_INVALID_OPERATION;
569 wl_proxy_wrapper_destroy(display_wrapper);
570 display_wrapper = NULL;
572 wl_tbm_client = wayland_tbm_client_init(wl_egl_display->wl_display);
573 if (!wl_tbm_client) {
574 TPL_ERR("Failed to initialize wl_tbm_client.");
575 result = TPL_ERROR_INVALID_CONNECTION;
579 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
581 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
582 result = TPL_ERROR_INVALID_CONNECTION;
586 wl_proxy_set_queue(wl_tbm, wl_egl_display->ev_queue);
587 wl_egl_display->wl_tbm_client = wl_tbm_client;
589 if (wl_registry_add_listener(registry, ®istry_listener,
591 TPL_ERR("Failed to wl_registry_add_listener");
592 result = TPL_ERROR_INVALID_OPERATION;
596 ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue);
598 _wl_display_print_err(wl_egl_display, "roundtrip_queue");
599 result = TPL_ERROR_INVALID_OPERATION;
603 #if TIZEN_FEATURE_ENABLE
604 /* set tizen_surface_shm's queue as client's private queue */
605 if (wl_egl_display->tss) {
606 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss,
607 wl_egl_display->ev_queue);
608 TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss);
611 if (wl_egl_display->presentation) {
612 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation,
613 wl_egl_display->ev_queue);
614 TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.",
615 wl_egl_display->presentation);
618 if (wl_egl_display->explicit_sync) {
619 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync,
620 wl_egl_display->ev_queue);
621 TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.",
622 wl_egl_display->explicit_sync);
625 wl_egl_display->wl_initialized = TPL_TRUE;
627 TPL_INFO("[WAYLAND_INIT]",
628 "wl_egl_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
629 wl_egl_display, wl_egl_display->wl_display,
630 wl_egl_display->wl_tbm_client, wl_egl_display->ev_queue);
631 #if TIZEN_FEATURE_ENABLE
632 TPL_INFO("[WAYLAND_INIT]",
633 "tizen_surface_shm(%p) wp_presentation(%p) explicit_sync(%p)",
634 wl_egl_display->tss, wl_egl_display->presentation,
635 wl_egl_display->explicit_sync);
639 wl_proxy_wrapper_destroy(display_wrapper);
641 wl_registry_destroy(registry);
643 wl_event_queue_destroy(queue);
649 _thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display)
651 /* If wl_egl_display is in prepared state, cancel it */
652 if (wl_egl_display->prepared) {
653 wl_display_cancel_read(wl_egl_display->wl_display);
654 wl_egl_display->prepared = TPL_FALSE;
657 if (wl_display_roundtrip_queue(wl_egl_display->wl_display,
658 wl_egl_display->ev_queue) == -1) {
659 _wl_display_print_err(wl_egl_display, "roundtrip_queue");
662 #if TIZEN_FEATURE_ENABLE
663 if (wl_egl_display->tss) {
664 TPL_INFO("[TIZEN_SURFACE_SHM_DESTROY]",
665 "wl_egl_display(%p) tizen_surface_shm(%p) fini.",
666 wl_egl_display, wl_egl_display->tss);
667 tizen_surface_shm_destroy(wl_egl_display->tss);
668 wl_egl_display->tss = NULL;
671 if (wl_egl_display->presentation) {
672 TPL_INFO("[WP_PRESENTATION_DESTROY]",
673 "wl_egl_display(%p) wp_presentation(%p) fini.",
674 wl_egl_display, wl_egl_display->presentation);
675 wp_presentation_destroy(wl_egl_display->presentation);
676 wl_egl_display->presentation = NULL;
679 if (wl_egl_display->explicit_sync) {
680 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
681 "wl_egl_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
682 wl_egl_display, wl_egl_display->explicit_sync);
683 zwp_linux_explicit_synchronization_v1_destroy(wl_egl_display->explicit_sync);
684 wl_egl_display->explicit_sync = NULL;
687 if (wl_egl_display->wl_tbm_client) {
688 struct wl_proxy *wl_tbm = NULL;
690 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
691 wl_egl_display->wl_tbm_client);
693 wl_proxy_set_queue(wl_tbm, NULL);
696 TPL_INFO("[WL_TBM_DEINIT]",
697 "wl_egl_display(%p) wl_tbm_client(%p)",
698 wl_egl_display, wl_egl_display->wl_tbm_client);
699 wayland_tbm_client_deinit(wl_egl_display->wl_tbm_client);
700 wl_egl_display->wl_tbm_client = NULL;
703 wl_event_queue_destroy(wl_egl_display->ev_queue);
705 wl_egl_display->ev_queue = NULL;
706 wl_egl_display->wl_initialized = TPL_FALSE;
708 TPL_INFO("[DISPLAY_FINI]", "wl_egl_display(%p) wl_display(%p)",
709 wl_egl_display, wl_egl_display->wl_display);
713 _thread_init(void *data)
715 tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
717 if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) {
718 TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)",
719 wl_egl_display, wl_egl_display->wl_display);
722 if (wl_egl_display->use_wait_vblank &&
723 _thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
724 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
727 return wl_egl_display;
731 __thread_func_disp_prepare(tpl_gsource *gsource)
733 tpl_wl_egl_display_t *wl_egl_display =
734 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
736 /* If this wl_egl_display is already prepared,
737 * do nothing in this function. */
738 if (wl_egl_display->prepared)
741 /* If there is a last_error, there is no need to poll,
742 * so skip directly to dispatch.
743 * prepare -> dispatch */
744 if (wl_egl_display->last_error)
747 while (wl_display_prepare_read_queue(wl_egl_display->wl_display,
748 wl_egl_display->ev_queue) != 0) {
749 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
750 wl_egl_display->ev_queue) == -1) {
751 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
755 wl_egl_display->prepared = TPL_TRUE;
757 wl_display_flush(wl_egl_display->wl_display);
763 __thread_func_disp_check(tpl_gsource *gsource)
765 tpl_wl_egl_display_t *wl_egl_display =
766 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
767 tpl_bool_t ret = TPL_FALSE;
769 if (!wl_egl_display->prepared)
772 /* If prepared, but last_error is set,
773 * cancel_read is executed and FALSE is returned.
774 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
775 * and skipping disp_check from prepare to disp_dispatch.
776 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
777 if (wl_egl_display->prepared && wl_egl_display->last_error) {
778 wl_display_cancel_read(wl_egl_display->wl_display);
782 if (tpl_gsource_check_io_condition(gsource)) {
783 if (wl_display_read_events(wl_egl_display->wl_display) == -1)
784 _wl_display_print_err(wl_egl_display, "read_event");
787 wl_display_cancel_read(wl_egl_display->wl_display);
791 wl_egl_display->prepared = TPL_FALSE;
797 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
799 tpl_wl_egl_display_t *wl_egl_display =
800 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
804 /* If there is last_error, SOURCE_REMOVE should be returned
805 * to remove the gsource from the main loop.
806 * This is because wl_egl_display is not valid since last_error was set.*/
807 if (wl_egl_display->last_error) {
811 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
812 if (tpl_gsource_check_io_condition(gsource)) {
813 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
814 wl_egl_display->ev_queue) == -1) {
815 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
819 wl_display_flush(wl_egl_display->wl_display);
820 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
826 __thread_func_disp_finalize(tpl_gsource *gsource)
828 tpl_wl_egl_display_t *wl_egl_display =
829 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
831 tpl_gmutex_lock(&wl_egl_display->disp_mutex);
832 TPL_DEBUG("[FINALIZE] wl_egl_display(%p) tpl_gsource(%p)",
833 wl_egl_display, gsource);
835 if (wl_egl_display->wl_initialized)
836 _thread_wl_display_fini(wl_egl_display);
838 wl_egl_display->gsource_finalized = TPL_TRUE;
840 tpl_gcond_signal(&wl_egl_display->disp_cond);
841 tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
847 static tpl_gsource_functions disp_funcs = {
848 .prepare = __thread_func_disp_prepare,
849 .check = __thread_func_disp_check,
850 .dispatch = __thread_func_disp_dispatch,
851 .finalize = __thread_func_disp_finalize,
855 __tpl_wl_egl_display_init(tpl_display_t *display)
857 tpl_wl_egl_display_t *wl_egl_display = NULL;
861 /* Do not allow default display in wayland. */
862 if (!display->native_handle) {
863 TPL_ERR("Invalid native handle for display.");
864 return TPL_ERROR_INVALID_PARAMETER;
867 if (!_check_native_handle_is_wl_display(display->native_handle)) {
868 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
869 return TPL_ERROR_INVALID_PARAMETER;
872 wl_egl_display = (tpl_wl_egl_display_t *) calloc(1,
873 sizeof(tpl_wl_egl_display_t));
874 if (!wl_egl_display) {
875 TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t.");
876 return TPL_ERROR_OUT_OF_MEMORY;
879 display->backend.data = wl_egl_display;
880 display->bufmgr_fd = -1;
882 wl_egl_display->tdm.tdm_initialized = TPL_FALSE;
883 wl_egl_display->tdm.tdm_client = NULL;
884 wl_egl_display->tdm.tdm_display_fd = -1;
885 wl_egl_display->tdm.tdm_source = NULL;
887 wl_egl_display->wl_initialized = TPL_FALSE;
889 wl_egl_display->ev_queue = NULL;
890 wl_egl_display->wl_display = (struct wl_display *)display->native_handle;
891 wl_egl_display->last_error = 0;
892 wl_egl_display->use_tss = TPL_FALSE;
893 wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled
894 wl_egl_display->prepared = TPL_FALSE;
895 wl_egl_display->gsource_finalized = TPL_FALSE;
897 #if TIZEN_FEATURE_ENABLE
898 /* Wayland Interfaces */
899 wl_egl_display->tss = NULL;
900 wl_egl_display->presentation = NULL;
901 wl_egl_display->explicit_sync = NULL;
903 wl_egl_display->wl_tbm_client = NULL;
905 wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled
907 char *env = tpl_getenv("TPL_WAIT_VBLANK");
908 if (env && !atoi(env)) {
909 wl_egl_display->use_wait_vblank = TPL_FALSE;
913 tpl_gmutex_init(&wl_egl_display->wl_event_mutex);
915 tpl_gmutex_init(&wl_egl_display->disp_mutex);
916 tpl_gcond_init(&wl_egl_display->disp_cond);
919 wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
920 (tpl_gthread_func)_thread_init,
921 (void *)wl_egl_display);
922 if (!wl_egl_display->thread) {
923 TPL_ERR("Failed to create wl_egl_thread");
927 wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread,
928 (void *)wl_egl_display,
929 wl_display_get_fd(wl_egl_display->wl_display),
930 &disp_funcs, SOURCE_TYPE_NORMAL);
931 if (!wl_egl_display->disp_source) {
932 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
933 display->native_handle,
934 wl_egl_display->thread);
938 if (wl_egl_display->use_wait_vblank &&
939 wl_egl_display->tdm.tdm_initialized) {
940 tpl_gmutex_init(&wl_egl_display->tdm.tdm_mutex);
941 tpl_gcond_init(&wl_egl_display->tdm.tdm_cond);
942 wl_egl_display->tdm.tdm_source = tpl_gsource_create(wl_egl_display->thread,
943 (void *)wl_egl_display,
944 wl_egl_display->tdm.tdm_display_fd,
945 &tdm_funcs, SOURCE_TYPE_NORMAL);
946 wl_egl_display->tdm.gsource_finalized = TPL_FALSE;
947 if (!wl_egl_display->tdm.tdm_source) {
948 TPL_ERR("Failed to create tdm_gsource\n");
953 wl_egl_display->use_wait_vblank = (wl_egl_display->tdm.tdm_initialized &&
954 (wl_egl_display->tdm.tdm_source != NULL));
956 TPL_INFO("[DISPLAY_INIT]",
957 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
959 wl_egl_display->thread,
960 wl_egl_display->wl_display);
962 TPL_INFO("[DISPLAY_INIT]",
963 "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)",
964 wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE",
965 wl_egl_display->use_tss ? "TRUE" : "FALSE",
966 wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE");
968 return TPL_ERROR_NONE;
971 if (wl_egl_display->tdm.tdm_source) {
972 tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
973 // Send destroy mesage to thread
974 tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
975 while (!wl_egl_display->tdm.gsource_finalized) {
976 tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex);
978 tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
981 if (wl_egl_display->disp_source) {
982 tpl_gmutex_lock(&wl_egl_display->disp_mutex);
983 // Send destroy mesage to thread
984 tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
985 while (!wl_egl_display->gsource_finalized) {
986 tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex);
988 tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
991 if (wl_egl_display->thread) {
992 tpl_gthread_destroy(wl_egl_display->thread);
995 tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond);
996 tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex);
997 tpl_gcond_clear(&wl_egl_display->disp_cond);
998 tpl_gmutex_clear(&wl_egl_display->disp_mutex);
1000 wl_egl_display->thread = NULL;
1001 free(wl_egl_display);
1003 display->backend.data = NULL;
1004 return TPL_ERROR_INVALID_OPERATION;
1008 __tpl_wl_egl_display_fini(tpl_display_t *display)
1010 tpl_wl_egl_display_t *wl_egl_display;
1012 TPL_ASSERT(display);
1014 wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data;
1015 if (wl_egl_display) {
1016 TPL_INFO("[DISPLAY_FINI]",
1017 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
1019 wl_egl_display->thread,
1020 wl_egl_display->wl_display);
1022 if (wl_egl_display->tdm.tdm_source && wl_egl_display->tdm.tdm_initialized) {
1023 /* This is a protection to prevent problems that arise in unexpected situations
1024 * that g_cond_wait cannot work normally.
1025 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1026 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1028 tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
1029 // Send destroy mesage to thread
1030 tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
1031 while (!wl_egl_display->tdm.gsource_finalized) {
1032 tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex);
1034 wl_egl_display->tdm.tdm_source = NULL;
1035 tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
1038 if (wl_egl_display->disp_source) {
1039 tpl_gmutex_lock(&wl_egl_display->disp_mutex);
1040 // Send destroy mesage to thread
1041 tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
1042 /* This is a protection to prevent problems that arise in unexpected situations
1043 * that g_cond_wait cannot work normally.
1044 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1045 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1047 while (!wl_egl_display->gsource_finalized) {
1048 tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex);
1050 wl_egl_display->disp_source = NULL;
1051 tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
1054 if (wl_egl_display->thread) {
1055 tpl_gthread_destroy(wl_egl_display->thread);
1056 wl_egl_display->thread = NULL;
1059 tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond);
1060 tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex);
1061 tpl_gcond_clear(&wl_egl_display->disp_cond);
1062 tpl_gmutex_clear(&wl_egl_display->disp_mutex);
1064 tpl_gmutex_clear(&wl_egl_display->wl_event_mutex);
1066 free(wl_egl_display);
1069 display->backend.data = NULL;
1073 __tpl_wl_egl_display_query_config(tpl_display_t *display,
1074 tpl_surface_type_t surface_type,
1075 int red_size, int green_size,
1076 int blue_size, int alpha_size,
1077 int color_depth, int *native_visual_id,
1078 tpl_bool_t *is_slow)
1080 TPL_ASSERT(display);
1082 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
1083 green_size == 8 && blue_size == 8 &&
1084 (color_depth == 32 || color_depth == 24)) {
1086 if (alpha_size == 8) {
1087 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
1088 if (is_slow) *is_slow = TPL_FALSE;
1089 return TPL_ERROR_NONE;
1091 if (alpha_size == 0) {
1092 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
1093 if (is_slow) *is_slow = TPL_FALSE;
1094 return TPL_ERROR_NONE;
1098 return TPL_ERROR_INVALID_PARAMETER;
1102 __tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
1105 TPL_IGNORE(display);
1106 TPL_IGNORE(visual_id);
1107 TPL_IGNORE(alpha_size);
1108 return TPL_ERROR_NONE;
1112 __tpl_wl_egl_display_get_window_info(tpl_display_t *display,
1113 tpl_handle_t window, int *width,
1114 int *height, tbm_format *format,
1115 int depth, int a_size)
1117 tpl_result_t ret = TPL_ERROR_NONE;
1118 struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window;
1120 TPL_ASSERT(display);
1123 if (!wl_egl_window) {
1124 TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window);
1125 return TPL_ERROR_INVALID_PARAMETER;
1128 if (width) *width = wl_egl_window->width;
1129 if (height) *height = wl_egl_window->height;
1131 struct tizen_private *tizen_private =
1132 (struct tizen_private *)wl_egl_window->driver_private;
1133 if (tizen_private && tizen_private->data) {
1134 tpl_wl_egl_surface_t *wl_egl_surface =
1135 (tpl_wl_egl_surface_t *)tizen_private->data;
1136 *format = wl_egl_surface->format;
1139 *format = TBM_FORMAT_ARGB8888;
1141 *format = TBM_FORMAT_XRGB8888;
1149 __tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display,
1150 tpl_handle_t pixmap, int *width,
1151 int *height, tbm_format *format)
1153 tbm_surface_h tbm_surface = NULL;
1156 TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap);
1157 return TPL_ERROR_INVALID_PARAMETER;
1160 tbm_surface = wayland_tbm_server_get_surface(NULL,
1161 (struct wl_resource *)pixmap);
1163 TPL_ERR("Failed to get tbm_surface from wayland_tbm.");
1164 return TPL_ERROR_INVALID_PARAMETER;
1167 if (width) *width = tbm_surface_get_width(tbm_surface);
1168 if (height) *height = tbm_surface_get_height(tbm_surface);
1169 if (format) *format = tbm_surface_get_format(tbm_surface);
1171 return TPL_ERROR_NONE;
1174 static tbm_surface_h
1175 __tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
1177 tbm_surface_h tbm_surface = NULL;
1181 tbm_surface = wayland_tbm_server_get_surface(NULL,
1182 (struct wl_resource *)pixmap);
1184 TPL_ERR("Failed to get tbm_surface_h from wayland_tbm.");
1192 __tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
1194 struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
1196 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE);
1198 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
1199 is a memory address pointing the structure of wl_display_interface. */
1200 if (wl_egl_native_dpy == &wl_display_interface)
1203 if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
1204 strlen(wl_display_interface.name)) == 0) {
1211 /* -- BEGIN -- wl_egl_window callback functions */
1213 __cb_destroy_callback(void *private)
1215 struct tizen_private *tizen_private = (struct tizen_private *)private;
1216 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1218 if (!tizen_private) {
1219 TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface");
1223 wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1224 if (wl_egl_surface) {
1225 TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.",
1226 wl_egl_surface->wl_egl_window);
1227 TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface.");
1229 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1230 wl_egl_surface->wl_egl_window->destroy_window_callback = NULL;
1231 wl_egl_surface->wl_egl_window->resize_callback = NULL;
1232 wl_egl_surface->wl_egl_window->driver_private = NULL;
1233 wl_egl_surface->wl_egl_window = NULL;
1234 wl_egl_surface->wl_surface = NULL;
1236 tizen_private->set_window_serial_callback = NULL;
1237 tizen_private->rotate_callback = NULL;
1238 tizen_private->get_rotation_capability = NULL;
1239 tizen_private->set_frontbuffer_callback = NULL;
1240 tizen_private->create_commit_sync_fd = NULL;
1241 tizen_private->create_presentation_sync_fd = NULL;
1242 tizen_private->data = NULL;
1244 free(tizen_private);
1245 tizen_private = NULL;
1246 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1251 __cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
1253 TPL_ASSERT(private);
1254 TPL_ASSERT(wl_egl_window);
1256 struct tizen_private *tizen_private = (struct tizen_private *)private;
1257 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1258 int cur_w, cur_h, req_w, req_h, format;
1260 if (!wl_egl_surface) {
1261 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1266 format = wl_egl_surface->format;
1267 cur_w = wl_egl_surface->width;
1268 cur_h = wl_egl_surface->height;
1269 req_w = wl_egl_window->width;
1270 req_h = wl_egl_window->height;
1272 TPL_INFO("[WINDOW_RESIZE]",
1273 "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)",
1274 wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h);
1276 if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
1277 != TBM_SURFACE_QUEUE_ERROR_NONE) {
1278 TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue);
1282 /* -- END -- wl_egl_window callback functions */
1284 /* -- BEGIN -- wl_egl_window tizen private callback functions */
1286 /* There is no usecase for using prerotation callback below */
1288 __cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
1290 TPL_ASSERT(private);
1291 TPL_ASSERT(wl_egl_window);
1293 struct tizen_private *tizen_private = (struct tizen_private *)private;
1294 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1295 int rotation = tizen_private->rotation;
1297 if (!wl_egl_surface) {
1298 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1303 TPL_INFO("[WINDOW_ROTATE]",
1304 "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)",
1305 wl_egl_surface, wl_egl_window,
1306 wl_egl_surface->rotation, rotation);
1308 wl_egl_surface->rotation = rotation;
1311 /* There is no usecase for using prerotation callback below */
1313 __cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
1316 TPL_ASSERT(private);
1317 TPL_ASSERT(wl_egl_window);
1319 int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
1320 struct tizen_private *tizen_private = (struct tizen_private *)private;
1321 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1323 if (!wl_egl_surface) {
1324 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1326 return rotation_capability;
1329 if (wl_egl_surface->prerotation_capability == TPL_TRUE)
1330 rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
1332 rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
1335 return rotation_capability;
1339 __cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
1340 void *private, unsigned int serial)
1342 TPL_ASSERT(private);
1343 TPL_ASSERT(wl_egl_window);
1345 struct tizen_private *tizen_private = (struct tizen_private *)private;
1346 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1348 if (!wl_egl_surface) {
1349 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1354 wl_egl_surface->set_serial_is_used = TPL_TRUE;
1355 wl_egl_surface->serial = serial;
1359 __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
1361 TPL_ASSERT(private);
1362 TPL_ASSERT(wl_egl_window);
1364 int commit_sync_fd = -1;
1366 struct tizen_private *tizen_private = (struct tizen_private *)private;
1367 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1369 if (!wl_egl_surface) {
1370 TPL_ERR("Invalid parameter. wl_egl_surface(%p) is NULL", wl_egl_surface);
1374 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
1376 if (wl_egl_surface->commit_sync.fd != -1) {
1377 commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
1378 TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
1379 wl_egl_surface->commit_sync.fd, commit_sync_fd);
1380 TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)",
1381 wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd);
1382 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1383 return commit_sync_fd;
1386 wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
1387 if (wl_egl_surface->commit_sync.fd == -1) {
1388 TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)",
1390 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1394 commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
1396 TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
1397 wl_egl_surface->commit_sync.fd, commit_sync_fd);
1398 TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)",
1399 wl_egl_surface, commit_sync_fd);
1401 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1403 return commit_sync_fd;
1406 #if TIZEN_FEATURE_ENABLE
1408 __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
1410 TPL_ASSERT(private);
1411 TPL_ASSERT(wl_egl_window);
1413 int presentation_sync_fd = -1;
1415 struct tizen_private *tizen_private = (struct tizen_private *)private;
1416 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1418 if (!wl_egl_surface) {
1419 TPL_ERR("Invalid parameter. wl_egl_surface is NULL");
1423 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1424 if (wl_egl_surface->presentation_sync.fd != -1) {
1425 presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
1426 TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
1427 wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1428 TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
1429 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1430 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1431 return presentation_sync_fd;
1434 wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
1435 if (wl_egl_surface->presentation_sync.fd == -1) {
1436 TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)",
1438 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1442 presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
1443 TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
1444 wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1445 TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
1446 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1448 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1450 return presentation_sync_fd;
1452 /* -- END -- wl_egl_window tizen private callback functions */
1454 /* -- BEGIN -- tizen_surface_shm_flusher_listener */
1455 static void __cb_tss_flusher_flush_callback(void *data,
1456 struct tizen_surface_shm_flusher *tss_flusher)
1458 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1459 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1461 TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
1462 wl_egl_surface, wl_egl_surface->tbm_queue);
1464 _print_buffer_lists(wl_egl_surface);
1466 tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue);
1467 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1468 TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
1473 static void __cb_tss_flusher_free_flush_callback(void *data,
1474 struct tizen_surface_shm_flusher *tss_flusher)
1476 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1477 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1479 TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
1480 wl_egl_surface, wl_egl_surface->tbm_queue);
1482 _print_buffer_lists(wl_egl_surface);
1484 tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue);
1485 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1486 TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
1491 static const struct tizen_surface_shm_flusher_listener
1492 tss_flusher_listener = {
1493 __cb_tss_flusher_flush_callback,
1494 __cb_tss_flusher_free_flush_callback
1496 /* -- END -- tizen_surface_shm_flusher_listener */
1499 /* -- BEGIN -- tbm_surface_queue callback funstions */
1501 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1504 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1505 tpl_wl_egl_display_t *wl_egl_display = NULL;
1506 tpl_surface_t *surface = NULL;
1507 tpl_bool_t is_activated = TPL_FALSE;
1510 wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1511 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1513 wl_egl_display = wl_egl_surface->wl_egl_display;
1514 TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
1516 surface = wl_egl_surface->tpl_surface;
1517 TPL_CHECK_ON_NULL_RETURN(surface);
1519 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1520 * the changed window size at the next frame. */
1521 width = tbm_surface_queue_get_width(tbm_queue);
1522 height = tbm_surface_queue_get_height(tbm_queue);
1523 if (surface->width != width || surface->height != height) {
1524 TPL_INFO("[QUEUE_RESIZE]",
1525 "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1526 wl_egl_surface, tbm_queue,
1527 surface->width, surface->height, width, height);
1530 /* When queue_reset_callback is called, if is_activated is different from
1531 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1532 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1533 is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client,
1534 wl_egl_surface->tbm_queue);
1535 if (wl_egl_surface->is_activated != is_activated) {
1537 TPL_INFO("[ACTIVATED]",
1538 "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1539 wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
1541 TPL_LOG_T("[DEACTIVATED]",
1542 " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1543 wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
1547 wl_egl_surface->reset = TPL_TRUE;
1549 if (surface->reset_cb)
1550 surface->reset_cb(surface->reset_data);
1554 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1557 TPL_IGNORE(tbm_queue);
1559 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1560 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1562 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1563 if (wl_egl_surface->sent_message == NONE_MESSAGE) {
1564 wl_egl_surface->sent_message = ACQUIRABLE;
1565 tpl_gsource_send_message(wl_egl_surface->surf_source,
1566 wl_egl_surface->sent_message);
1568 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1570 /* -- END -- tbm_surface_queue callback funstions */
1573 _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
1575 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1577 TPL_INFO("[SURFACE_FINI]",
1578 "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
1579 wl_egl_surface, wl_egl_surface->wl_egl_window,
1580 wl_egl_surface->wl_surface);
1581 #if TIZEN_FEATURE_ENABLE
1582 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1584 if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) {
1585 while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) {
1586 struct pst_feedback *pst_feedback =
1587 (struct pst_feedback *)__tpl_list_pop_front(
1588 wl_egl_surface->presentation_feedbacks, NULL);
1590 _write_to_eventfd(pst_feedback->pst_sync_fd);
1591 close(pst_feedback->pst_sync_fd);
1592 pst_feedback->pst_sync_fd = -1;
1594 wp_presentation_feedback_destroy(pst_feedback->presentation_feedback);
1595 pst_feedback->presentation_feedback = NULL;
1601 __tpl_list_free(wl_egl_surface->presentation_feedbacks, NULL);
1602 wl_egl_surface->presentation_feedbacks = NULL;
1605 if (wl_egl_surface->presentation_sync.fd != -1) {
1606 _write_to_eventfd(wl_egl_surface->presentation_sync.fd);
1607 close(wl_egl_surface->presentation_sync.fd);
1608 wl_egl_surface->presentation_sync.fd = -1;
1611 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1613 if (wl_egl_surface->surface_sync) {
1614 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1615 "wl_egl_surface(%p) surface_sync(%p)",
1616 wl_egl_surface, wl_egl_surface->surface_sync);
1617 zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync);
1618 wl_egl_surface->surface_sync = NULL;
1621 if (wl_egl_surface->tss_flusher) {
1622 TPL_INFO("[FLUSHER_DESTROY]",
1623 "wl_egl_surface(%p) tss_flusher(%p)",
1624 wl_egl_surface, wl_egl_surface->tss_flusher);
1625 tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher);
1626 wl_egl_surface->tss_flusher = NULL;
1630 if (wl_egl_surface->tbm_queue) {
1631 TPL_INFO("[TBM_QUEUE_DESTROY]",
1632 "wl_egl_surface(%p) tbm_queue(%p)",
1633 wl_egl_surface, wl_egl_surface->tbm_queue);
1634 tbm_surface_queue_destroy(wl_egl_surface->tbm_queue);
1635 wl_egl_surface->tbm_queue = NULL;
1638 if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
1639 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
1640 __tpl_list_free(wl_egl_surface->vblank->waiting_buffers, NULL);
1641 wl_egl_surface->vblank->waiting_buffers = NULL;
1642 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
1645 if (wl_egl_surface->vblank) {
1646 __tpl_list_remove_data(wl_egl_display->tdm.surface_vblanks,
1647 (void *)wl_egl_surface->vblank,
1649 __cb_surface_vblank_free);
1650 wl_egl_surface->vblank = NULL;
1655 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1657 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1659 wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
1661 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1662 if (message == INIT_SURFACE) { /* Initialize surface */
1663 TPL_DEBUG("wl_egl_surface(%p) initialize message received!",
1665 _thread_wl_egl_surface_init(wl_egl_surface);
1666 wl_egl_surface->initialized_in_thread = TPL_TRUE;
1667 tpl_gcond_signal(&wl_egl_surface->surf_cond);
1668 } else if (message == ACQUIRABLE) { /* Acquirable */
1669 TPL_DEBUG("wl_egl_surface(%p) acquirable message received!",
1671 _thread_surface_queue_acquire(wl_egl_surface);
1674 wl_egl_surface->sent_message = NONE_MESSAGE;
1676 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1682 __thread_func_surf_finalize(tpl_gsource *gsource)
1684 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1686 wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
1687 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1689 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1690 TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)",
1691 wl_egl_surface, gsource);
1693 _thread_wl_egl_surface_fini(wl_egl_surface);
1695 wl_egl_surface->gsource_finalized = TPL_TRUE;
1697 tpl_gcond_signal(&wl_egl_surface->surf_cond);
1698 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1701 static tpl_gsource_functions surf_funcs = {
1704 .dispatch = __thread_func_surf_dispatch,
1705 .finalize = __thread_func_surf_finalize,
1709 __tpl_wl_egl_surface_init(tpl_surface_t *surface)
1711 tpl_wl_egl_display_t *wl_egl_display = NULL;
1712 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1713 tpl_gsource *surf_source = NULL;
1715 struct wl_egl_window *wl_egl_window =
1716 (struct wl_egl_window *)surface->native_handle;
1718 TPL_ASSERT(surface);
1719 TPL_ASSERT(surface->display);
1720 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1721 TPL_ASSERT(surface->native_handle);
1724 (tpl_wl_egl_display_t *)surface->display->backend.data;
1725 if (!wl_egl_display) {
1726 TPL_ERR("Invalid parameter. wl_egl_display(%p)",
1728 return TPL_ERROR_INVALID_PARAMETER;
1731 wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1,
1732 sizeof(tpl_wl_egl_surface_t));
1733 if (!wl_egl_surface) {
1734 TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
1735 return TPL_ERROR_OUT_OF_MEMORY;
1738 surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface,
1739 -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1741 TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)",
1743 goto surf_source_create_fail;
1746 surface->backend.data = (void *)wl_egl_surface;
1747 surface->width = wl_egl_window->width;
1748 surface->height = wl_egl_window->height;
1749 surface->rotation = 0;
1751 wl_egl_surface->tpl_surface = surface;
1752 wl_egl_surface->width = wl_egl_window->width;
1753 wl_egl_surface->height = wl_egl_window->height;
1754 wl_egl_surface->format = surface->format;
1755 wl_egl_surface->num_buffers = surface->num_buffers;
1757 wl_egl_surface->surf_source = surf_source;
1758 wl_egl_surface->wl_egl_window = wl_egl_window;
1759 wl_egl_surface->wl_surface = wl_egl_window->surface;
1761 wl_egl_surface->wl_egl_display = wl_egl_display;
1763 wl_egl_surface->reset = TPL_FALSE;
1764 wl_egl_surface->is_activated = TPL_FALSE;
1765 wl_egl_surface->need_to_enqueue = TPL_TRUE;
1766 wl_egl_surface->prerotation_capability = TPL_FALSE;
1767 wl_egl_surface->vblank_done = TPL_TRUE;
1768 wl_egl_surface->use_render_done_fence = TPL_FALSE;
1769 wl_egl_surface->set_serial_is_used = TPL_FALSE;
1770 wl_egl_surface->gsource_finalized = TPL_FALSE;
1771 wl_egl_surface->initialized_in_thread = TPL_FALSE;
1773 wl_egl_surface->latest_transform = -1;
1774 wl_egl_surface->render_done_cnt = 0;
1775 wl_egl_surface->serial = 0;
1777 wl_egl_surface->vblank = NULL;
1778 #if TIZEN_FEATURE_ENABLE
1779 wl_egl_surface->tss_flusher = NULL;
1780 wl_egl_surface->surface_sync = NULL;
1783 wl_egl_surface->post_interval = surface->post_interval;
1785 wl_egl_surface->commit_sync.fd = -1;
1786 wl_egl_surface->presentation_sync.fd = -1;
1788 wl_egl_surface->sent_message = NONE_MESSAGE;
1792 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1793 wl_egl_surface->buffers[i] = NULL;
1794 wl_egl_surface->buffer_cnt = 0;
1797 wl_egl_surface->last_enq_buffer = NULL;
1800 struct tizen_private *tizen_private = NULL;
1802 if (wl_egl_window->driver_private)
1803 tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
1805 tizen_private = tizen_private_create();
1806 wl_egl_window->driver_private = (void *)tizen_private;
1809 if (tizen_private) {
1810 tizen_private->data = (void *)wl_egl_surface;
1811 tizen_private->rotate_callback = (void *)__cb_rotate_callback;
1812 tizen_private->get_rotation_capability = (void *)
1813 __cb_get_rotation_capability;
1814 tizen_private->set_window_serial_callback = (void *)
1815 __cb_set_window_serial_callback;
1816 tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
1817 #if TIZEN_FEATURE_ENABLE
1818 tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
1820 tizen_private->create_presentation_sync_fd = NULL;
1823 wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
1824 wl_egl_window->resize_callback = (void *)__cb_resize_callback;
1828 tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
1829 tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
1831 tpl_gmutex_init(&wl_egl_surface->buffers_mutex);
1833 tpl_gmutex_init(&wl_egl_surface->surf_mutex);
1834 tpl_gcond_init(&wl_egl_surface->surf_cond);
1836 /* Initialize in thread */
1837 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1838 wl_egl_surface->sent_message = INIT_SURFACE;
1839 tpl_gsource_send_message(wl_egl_surface->surf_source,
1840 wl_egl_surface->sent_message);
1841 while (!wl_egl_surface->initialized_in_thread)
1842 tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
1843 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1845 TPL_ASSERT(wl_egl_surface->tbm_queue);
1847 TPL_INFO("[SURFACE_INIT]",
1848 "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)",
1849 surface, wl_egl_surface, wl_egl_surface->surf_source);
1851 return TPL_ERROR_NONE;
1853 surf_source_create_fail:
1854 free(wl_egl_surface);
1855 surface->backend.data = NULL;
1856 return TPL_ERROR_INVALID_OPERATION;
1859 static tbm_surface_queue_h
1860 _thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface,
1861 struct wayland_tbm_client *wl_tbm_client,
1864 tbm_surface_queue_h tbm_queue = NULL;
1865 tbm_bufmgr bufmgr = NULL;
1866 unsigned int capability;
1868 struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
1869 int width = wl_egl_surface->width;
1870 int height = wl_egl_surface->height;
1871 int format = wl_egl_surface->format;
1873 if (!wl_tbm_client || !wl_surface) {
1874 TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)",
1875 wl_tbm_client, wl_surface);
1879 bufmgr = tbm_bufmgr_init(-1);
1880 capability = tbm_bufmgr_get_capability(bufmgr);
1881 tbm_bufmgr_deinit(bufmgr);
1883 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1884 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1892 tbm_queue = wayland_tbm_client_create_surface_queue(
1902 TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)",
1907 if (tbm_surface_queue_set_modes(
1908 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1909 TBM_SURFACE_QUEUE_ERROR_NONE) {
1910 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1912 tbm_surface_queue_destroy(tbm_queue);
1916 if (tbm_surface_queue_add_reset_cb(
1918 __cb_tbm_queue_reset_callback,
1919 (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1920 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1922 tbm_surface_queue_destroy(tbm_queue);
1926 if (tbm_surface_queue_add_acquirable_cb(
1928 __cb_tbm_queue_acquirable_callback,
1929 (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1930 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1932 tbm_surface_queue_destroy(tbm_queue);
1939 static tdm_client_vblank*
1940 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1942 tdm_client_vblank *tdm_vblank = NULL;
1943 tdm_client_output *tdm_output = NULL;
1944 tdm_error tdm_err = TDM_ERROR_NONE;
1947 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1951 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1952 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1953 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1957 tdm_vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1958 if (!tdm_vblank || tdm_err != TDM_ERROR_NONE) {
1959 TPL_ERR("Failed to create tdm_vblank. tdm_err(%d)", tdm_err);
1963 tdm_client_vblank_set_enable_fake(tdm_vblank, 1);
1964 tdm_client_vblank_set_sync(tdm_vblank, 0);
1970 __cb_surface_vblank_free(void *data)
1972 TPL_CHECK_ON_NULL_RETURN(data);
1974 tpl_surface_vblank_t *vblank = (tpl_surface_vblank_t *)data;
1975 tpl_wl_egl_surface_t *wl_egl_surface = vblank->wl_egl_surface;
1977 TPL_INFO("[VBLANK_DESTROY]",
1978 "wl_egl_surface(%p) surface_vblank(%p) tdm_vblank(%p)",
1979 wl_egl_surface, vblank,
1980 vblank->tdm_vblank);
1982 tdm_client_vblank_destroy(vblank->tdm_vblank);
1983 vblank->tdm_vblank = NULL;
1984 vblank->wl_egl_surface = NULL;
1985 tpl_gmutex_clear(&vblank->mutex);
1989 wl_egl_surface->vblank = NULL;
1993 _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
1995 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1996 tpl_surface_vblank_t *vblank = NULL;
1998 wl_egl_surface->tbm_queue = _thread_create_tbm_queue(
2000 wl_egl_display->wl_tbm_client,
2001 wl_egl_surface->num_buffers);
2002 if (!wl_egl_surface->tbm_queue) {
2003 TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)",
2004 wl_egl_surface, wl_egl_display->wl_tbm_client);
2008 TPL_INFO("[QUEUE_CREATION]",
2009 "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)",
2010 wl_egl_surface, wl_egl_surface->wl_surface,
2011 wl_egl_display->wl_tbm_client);
2012 TPL_INFO("[QUEUE_CREATION]",
2013 "tbm_queue(%p) size(%d x %d) X %d format(%d)",
2014 wl_egl_surface->tbm_queue,
2015 wl_egl_surface->width,
2016 wl_egl_surface->height,
2017 wl_egl_surface->num_buffers,
2018 wl_egl_surface->format);
2020 if (wl_egl_display->use_wait_vblank) {
2021 vblank = (tpl_surface_vblank_t *)calloc(1, sizeof(tpl_surface_vblank_t));
2023 vblank->tdm_vblank = _thread_create_tdm_client_vblank(
2024 wl_egl_display->tdm.tdm_client);
2025 if (!vblank->tdm_vblank) {
2026 TPL_ERR("Failed to create tdm_vblank from tdm_client(%p)",
2027 wl_egl_display->tdm.tdm_client);
2031 vblank->waiting_buffers = __tpl_list_alloc();
2032 vblank->wl_egl_surface = wl_egl_surface;
2033 tpl_gmutex_init(&vblank->mutex);
2035 __tpl_list_push_back(wl_egl_display->tdm.surface_vblanks,
2038 TPL_INFO("[VBLANK_INIT]",
2039 "wl_egl_surface(%p) tdm_client(%p) tdm_vblank(%p)",
2040 wl_egl_surface, wl_egl_display->tdm.tdm_client,
2041 vblank->tdm_vblank);
2046 wl_egl_surface->vblank = vblank;
2047 #if TIZEN_FEATURE_ENABLE
2048 if (wl_egl_display->tss) {
2049 wl_egl_surface->tss_flusher =
2050 tizen_surface_shm_get_flusher(wl_egl_display->tss,
2051 wl_egl_surface->wl_surface);
2054 if (wl_egl_surface->tss_flusher) {
2055 tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher,
2056 &tss_flusher_listener,
2058 TPL_INFO("[FLUSHER_INIT]",
2059 "wl_egl_surface(%p) tss_flusher(%p)",
2060 wl_egl_surface, wl_egl_surface->tss_flusher);
2063 if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) {
2064 wl_egl_surface->surface_sync =
2065 zwp_linux_explicit_synchronization_v1_get_synchronization(
2066 wl_egl_display->explicit_sync, wl_egl_surface->wl_surface);
2067 if (wl_egl_surface->surface_sync) {
2068 TPL_INFO("[EXPLICIT_SYNC_INIT]",
2069 "wl_egl_surface(%p) surface_sync(%p)",
2070 wl_egl_surface, wl_egl_surface->surface_sync);
2072 TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)",
2074 wl_egl_display->use_explicit_sync = TPL_FALSE;
2078 wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
2082 _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
2084 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2085 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2086 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2087 tpl_bool_t need_to_release = TPL_FALSE;
2088 tpl_bool_t need_to_cancel = TPL_FALSE;
2089 buffer_status_t status = RELEASED;
2092 tpl_gthread_pause_in_idle(wl_egl_display->thread);
2094 TPL_INFO("[BUFFER_CLEAR]", "BEGIN | wl_egl_surface(%p)", wl_egl_surface);
2096 while (wl_egl_surface->buffer_cnt) {
2097 wl_egl_buffer = wl_egl_surface->buffers[idx];
2099 if (wl_egl_buffer) {
2100 wl_egl_surface->buffers[idx] = NULL;
2101 wl_egl_surface->buffer_cnt--;
2107 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2109 status = wl_egl_buffer->status;
2111 TPL_INFO("[BUFFER]","idx(%d)| wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
2113 wl_egl_buffer->tbm_surface,
2114 status_to_string[status]);
2116 if (status >= ENQUEUED) {
2117 tpl_result_t wait_result = TPL_ERROR_NONE;
2119 while (status < COMMITTED && wait_result != TPL_ERROR_TIME_OUT) {
2120 tpl_gthread_continue(wl_egl_display->thread);
2121 wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond,
2122 &wl_egl_buffer->mutex,
2124 tpl_gthread_pause_in_idle(wl_egl_display->thread);
2125 status = wl_egl_buffer->status; /* update status */
2127 if (wait_result == TPL_ERROR_TIME_OUT) {
2128 TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p) status(%s)",
2129 wl_egl_buffer, status_to_string[status]);
2134 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
2135 /* It has been acquired but has not yet been released, so this
2136 * buffer must be released. */
2137 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
2139 /* After dequeue, it has not been enqueued yet
2140 * so cancel_dequeue must be performed. */
2141 need_to_cancel = (status == DEQUEUED);
2143 if (need_to_release) {
2144 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2145 wl_egl_buffer->tbm_surface);
2146 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2147 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
2148 wl_egl_buffer->tbm_surface, tsq_err);
2151 if (need_to_cancel) {
2152 tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
2153 wl_egl_buffer->tbm_surface);
2154 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2155 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
2156 wl_egl_buffer->tbm_surface, tsq_err);
2159 wl_egl_buffer->status = RELEASED;
2161 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2163 if (need_to_release || need_to_cancel)
2164 tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
2168 TPL_INFO("[BUFFER_CLEAR]", "END | wl_egl_surface(%p)", wl_egl_surface);
2170 tpl_gthread_continue(wl_egl_display->thread);
2174 __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
2176 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
2177 tpl_wl_egl_display_t *wl_egl_display = NULL;
2179 TPL_ASSERT(surface);
2180 TPL_ASSERT(surface->display);
2182 TPL_CHECK_ON_FALSE_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW);
2184 wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data;
2185 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
2187 wl_egl_display = wl_egl_surface->wl_egl_display;
2188 TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
2190 TPL_INFO("[SURFACE_FINI][BEGIN]",
2191 "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
2193 wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
2195 tpl_gthread_wait_idle(wl_egl_display->thread);
2197 _tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
2199 if (wl_egl_surface->surf_source) {
2200 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2201 // Send destroy mesage to thread
2202 tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
2203 /* This is a protection to prevent problems that arise in unexpected situations
2204 * that g_cond_wait cannot work normally.
2205 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
2206 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
2208 while (!wl_egl_surface->gsource_finalized) {
2209 tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
2211 wl_egl_surface->surf_source = NULL;
2212 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2215 _print_buffer_lists(wl_egl_surface);
2217 if (wl_egl_surface->wl_egl_window) {
2218 struct tizen_private *tizen_private = NULL;
2219 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2220 TPL_INFO("[WL_EGL_WINDOW_FINI]",
2221 "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
2222 wl_egl_surface, wl_egl_window,
2223 wl_egl_surface->wl_surface);
2224 tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
2225 if (tizen_private) {
2226 tizen_private->set_window_serial_callback = NULL;
2227 tizen_private->rotate_callback = NULL;
2228 tizen_private->get_rotation_capability = NULL;
2229 tizen_private->create_presentation_sync_fd = NULL;
2230 tizen_private->create_commit_sync_fd = NULL;
2231 tizen_private->set_frontbuffer_callback = NULL;
2232 tizen_private->merge_sync_fds = NULL;
2233 tizen_private->data = NULL;
2234 free(tizen_private);
2236 wl_egl_window->driver_private = NULL;
2239 wl_egl_window->destroy_window_callback = NULL;
2240 wl_egl_window->resize_callback = NULL;
2242 wl_egl_surface->wl_egl_window = NULL;
2245 wl_egl_surface->last_enq_buffer = NULL;
2247 wl_egl_surface->wl_surface = NULL;
2248 wl_egl_surface->wl_egl_display = NULL;
2249 wl_egl_surface->tpl_surface = NULL;
2251 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
2252 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
2253 tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex);
2255 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2256 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2257 tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex);
2259 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2260 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2261 tpl_gmutex_clear(&wl_egl_surface->surf_mutex);
2262 tpl_gcond_clear(&wl_egl_surface->surf_cond);
2264 TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface);
2266 free(wl_egl_surface);
2267 surface->backend.data = NULL;
2271 __tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
2274 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
2276 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
2278 wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
2280 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
2282 TPL_INFO("[SET_PREROTATION_CAPABILITY]",
2283 "wl_egl_surface(%p) prerotation capability set to [%s]",
2284 wl_egl_surface, (set ? "TRUE" : "FALSE"));
2286 wl_egl_surface->prerotation_capability = set;
2287 return TPL_ERROR_NONE;
2291 __tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
2294 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
2296 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
2298 wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
2300 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
2302 TPL_INFO("[SET_POST_INTERVAL]",
2303 "wl_egl_surface(%p) post_interval(%d -> %d)",
2304 wl_egl_surface, wl_egl_surface->post_interval, post_interval);
2306 wl_egl_surface->post_interval = post_interval;
2308 return TPL_ERROR_NONE;
2312 __tpl_wl_egl_surface_validate(tpl_surface_t *surface)
2314 tpl_bool_t retval = TPL_TRUE;
2316 TPL_ASSERT(surface);
2317 TPL_ASSERT(surface->backend.data);
2319 tpl_wl_egl_surface_t *wl_egl_surface =
2320 (tpl_wl_egl_surface_t *)surface->backend.data;
2322 retval = !(wl_egl_surface->reset);
2328 __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
2330 tpl_wl_egl_surface_t *wl_egl_surface =
2331 (tpl_wl_egl_surface_t *)surface->backend.data;
2334 *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
2336 *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
2339 #define CAN_DEQUEUE_TIMEOUT_MS 10000
2342 _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
2344 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2345 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2347 tpl_gthread_pause_in_idle(wl_egl_display->thread);
2349 _print_buffer_lists(wl_egl_surface);
2351 if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue))
2352 != TBM_SURFACE_QUEUE_ERROR_NONE) {
2353 TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
2354 wl_egl_surface->tbm_queue, tsq_err);
2355 tpl_gthread_continue(wl_egl_display->thread);
2356 return TPL_ERROR_INVALID_OPERATION;
2361 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2362 for (i = 0; i < BUFFER_ARRAY_SIZE; i++) {
2363 buffer_status_t status;
2364 wl_egl_buffer = wl_egl_surface->buffers[i];
2365 if (wl_egl_buffer) {
2366 status = wl_egl_buffer->status;
2371 if (status > ENQUEUED && status <= COMMITTED) {
2372 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2373 wl_egl_buffer->tbm_surface);
2374 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2375 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
2376 wl_egl_buffer->tbm_surface, tsq_err);
2377 tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
2382 TPL_INFO("[FORCE_FLUSH]",
2383 "wl_egl_surface(%p) tbm_queue(%p)",
2384 wl_egl_surface, wl_egl_surface->tbm_queue);
2386 _print_buffer_lists(wl_egl_surface);
2388 tpl_gthread_continue(wl_egl_display->thread);
2390 return TPL_ERROR_NONE;
2394 _wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer,
2395 tpl_wl_egl_surface_t *wl_egl_surface)
2397 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2398 struct tizen_private *tizen_private =
2399 (struct tizen_private *)wl_egl_window->driver_private;
2401 TPL_ASSERT(tizen_private);
2403 wl_egl_buffer->draw_done = TPL_FALSE;
2404 wl_egl_buffer->need_to_commit = TPL_TRUE;
2405 #if TIZEN_FEATURE_ENABLE
2406 wl_egl_buffer->buffer_release = NULL;
2408 wl_egl_buffer->transform = tizen_private->transform;
2410 if (wl_egl_buffer->w_transform != tizen_private->window_transform) {
2411 wl_egl_buffer->w_transform = tizen_private->window_transform;
2412 wl_egl_buffer->w_rotated = TPL_TRUE;
2415 if (wl_egl_surface->set_serial_is_used) {
2416 wl_egl_buffer->serial = wl_egl_surface->serial;
2418 wl_egl_buffer->serial = ++tizen_private->serial;
2421 if (wl_egl_buffer->rects) {
2422 free(wl_egl_buffer->rects);
2423 wl_egl_buffer->rects = NULL;
2424 wl_egl_buffer->num_rects = 0;
2428 static tpl_wl_egl_buffer_t *
2429 _get_wl_egl_buffer(tbm_surface_h tbm_surface)
2431 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2432 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2433 (void **)&wl_egl_buffer);
2434 return wl_egl_buffer;
2437 static tpl_wl_egl_buffer_t *
2438 _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
2439 tbm_surface_h tbm_surface)
2441 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2442 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2444 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2446 if (!wl_egl_buffer) {
2447 wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t));
2448 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL);
2450 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2451 (tbm_data_free)__cb_wl_egl_buffer_free);
2452 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2455 wl_egl_buffer->wl_buffer = NULL;
2456 wl_egl_buffer->tbm_surface = tbm_surface;
2457 wl_egl_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
2458 wl_egl_buffer->wl_egl_surface = wl_egl_surface;
2460 wl_egl_buffer->status = RELEASED;
2462 wl_egl_buffer->acquire_fence_fd = -1;
2463 wl_egl_buffer->commit_sync_fd = -1;
2464 wl_egl_buffer->presentation_sync_fd = -1;
2465 wl_egl_buffer->release_fence_fd = -1;
2467 wl_egl_buffer->dx = wl_egl_window->dx;
2468 wl_egl_buffer->dy = wl_egl_window->dy;
2469 wl_egl_buffer->width = tbm_surface_get_width(tbm_surface);
2470 wl_egl_buffer->height = tbm_surface_get_height(tbm_surface);
2472 wl_egl_buffer->w_transform = -1;
2474 tpl_gmutex_init(&wl_egl_buffer->mutex);
2475 tpl_gcond_init(&wl_egl_buffer->cond);
2477 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
2480 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2481 if (wl_egl_surface->buffers[i] == NULL) break;
2483 /* If this exception is reached,
2484 * it may be a critical memory leak problem. */
2485 if (i == BUFFER_ARRAY_SIZE) {
2486 tpl_wl_egl_buffer_t *evicted_buffer = NULL;
2487 int evicted_idx = 0; /* evict the frontmost buffer */
2489 evicted_buffer = wl_egl_surface->buffers[evicted_idx];
2491 TPL_WARN("wl_egl_surface(%p) buffers array is full. evict one.",
2493 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2494 evicted_buffer, evicted_buffer->tbm_surface,
2495 status_to_string[evicted_buffer->status]);
2497 /* [TODO] need to think about whether there will be
2498 * better modifications */
2499 wl_egl_surface->buffer_cnt--;
2500 wl_egl_surface->buffers[evicted_idx] = NULL;
2505 wl_egl_surface->buffer_cnt++;
2506 wl_egl_surface->buffers[i] = wl_egl_buffer;
2507 wl_egl_buffer->idx = i;
2509 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
2511 TPL_INFO("[WL_EGL_BUFFER_CREATE]",
2512 "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2513 wl_egl_surface, wl_egl_buffer, tbm_surface,
2514 wl_egl_buffer->bo_name);
2517 _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface);
2519 return wl_egl_buffer;
2522 static tbm_surface_h
2523 __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
2524 int32_t *release_fence)
2526 TPL_ASSERT(surface);
2527 TPL_ASSERT(surface->backend.data);
2528 TPL_ASSERT(surface->display);
2529 TPL_ASSERT(surface->display->backend.data);
2530 TPL_OBJECT_CHECK_RETURN(surface, NULL);
2532 tpl_wl_egl_surface_t *wl_egl_surface =
2533 (tpl_wl_egl_surface_t *)surface->backend.data;
2534 tpl_wl_egl_display_t *wl_egl_display =
2535 (tpl_wl_egl_display_t *)surface->display->backend.data;
2536 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2538 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2539 tpl_bool_t is_activated = 0;
2541 tbm_surface_h tbm_surface = NULL;
2543 TPL_OBJECT_UNLOCK(surface);
2544 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2545 if (wl_egl_surface->reset == TPL_TRUE) {
2546 if (_check_buffer_validate(wl_egl_surface, wl_egl_surface->last_enq_buffer) &&
2547 tbm_surface_internal_is_valid(wl_egl_surface->last_enq_buffer)) {
2548 tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer;
2549 tpl_wl_egl_buffer_t *enqueued_buffer =
2550 _get_wl_egl_buffer(last_enq_buffer);
2552 if (enqueued_buffer) {
2553 tbm_surface_internal_ref(last_enq_buffer);
2554 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2555 tpl_gmutex_lock(&enqueued_buffer->mutex);
2556 while (enqueued_buffer->status >= ENQUEUED &&
2557 enqueued_buffer->status < COMMITTED) {
2558 tpl_result_t wait_result;
2559 TPL_INFO("[DEQ_AFTER_RESET]",
2560 "waiting for previous wl_egl_buffer(%p) commit",
2563 wait_result = tpl_gcond_timed_wait(&enqueued_buffer->cond,
2564 &enqueued_buffer->mutex,
2566 if (wait_result == TPL_ERROR_TIME_OUT) {
2567 TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
2572 tpl_gmutex_unlock(&enqueued_buffer->mutex);
2573 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2574 tbm_surface_internal_unref(last_enq_buffer);
2578 wl_egl_surface->last_enq_buffer = NULL;
2580 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2582 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2583 wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
2584 TPL_OBJECT_LOCK(surface);
2587 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2588 TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
2589 wl_egl_surface->tbm_queue, surface);
2590 if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
2591 TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
2592 wl_egl_surface->tbm_queue, surface);
2595 tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2599 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2600 TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
2601 wl_egl_surface->tbm_queue, surface);
2605 /* After the can dequeue state, lock the wl_event_mutex to prevent other
2606 * events from being processed in wayland_egl_thread
2607 * during below dequeue procedure. */
2608 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
2610 /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
2611 * below function [wayland_tbm_client_queue_check_activate()].
2612 * This function has to be called before tbm_surface_queue_dequeue()
2613 * in order to know what state the buffer will be dequeued next.
2615 * ACTIVATED state means non-composite mode. Client can get buffers which
2616 can be displayed directly(without compositing).
2617 * DEACTIVATED state means composite mode. Client's buffer will be displayed
2618 by compositor(E20) with compositing.
2620 is_activated = wayland_tbm_client_queue_check_activate(
2621 wl_egl_display->wl_tbm_client,
2622 wl_egl_surface->tbm_queue);
2624 wl_egl_surface->is_activated = is_activated;
2626 surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
2627 surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
2628 wl_egl_surface->width = surface->width;
2629 wl_egl_surface->height = surface->height;
2631 if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
2632 /* If surface->frontbuffer is already set in frontbuffer mode,
2633 * it will return that frontbuffer if it is still activated,
2634 * otherwise dequeue the new buffer after initializing
2635 * surface->frontbuffer to NULL. */
2636 if (is_activated && !wl_egl_surface->reset) {
2637 bo_name = _get_tbm_surface_bo_name(surface->frontbuffer);
2640 "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
2641 surface->frontbuffer, bo_name);
2642 TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer,
2643 "[DEQ]~[ENQ] BO_NAME:%d",
2645 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2646 return surface->frontbuffer;
2648 surface->frontbuffer = NULL;
2649 wl_egl_surface->need_to_enqueue = TPL_TRUE;
2652 surface->frontbuffer = NULL;
2655 tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue,
2658 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d",
2659 wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err);
2660 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2664 tbm_surface_internal_ref(tbm_surface);
2666 wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
2667 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
2669 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2670 wl_egl_buffer->status = DEQUEUED;
2672 /* If wl_egl_buffer->release_fence_fd is -1,
2673 * the tbm_surface can be used immediately.
2674 * If not, user(EGL) have to wait until signaled. */
2675 if (release_fence) {
2676 #if TIZEN_FEATURE_ENABLE
2677 if (wl_egl_display->use_explicit_sync) {
2678 *release_fence = wl_egl_buffer->release_fence_fd;
2679 TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)",
2680 wl_egl_surface, wl_egl_buffer, *release_fence);
2682 wl_egl_buffer->release_fence_fd = -1;
2686 *release_fence = -1;
2690 if (surface->is_frontbuffer_mode && is_activated)
2691 surface->frontbuffer = tbm_surface;
2693 wl_egl_surface->reset = TPL_FALSE;
2695 TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name);
2696 TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d",
2697 wl_egl_buffer->bo_name);
2698 TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2699 wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name,
2700 release_fence ? *release_fence : -1);
2702 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2703 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2709 __tpl_wl_egl_surface_cancel_buffer(tpl_surface_t *surface,
2710 tbm_surface_h tbm_surface)
2712 TPL_ASSERT(surface);
2713 TPL_ASSERT(surface->backend.data);
2715 tpl_wl_egl_surface_t *wl_egl_surface =
2716 (tpl_wl_egl_surface_t *)surface->backend.data;
2717 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2718 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2720 if (!tbm_surface_internal_is_valid(tbm_surface)) {
2721 TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
2722 return TPL_ERROR_INVALID_PARAMETER;
2725 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2726 if (wl_egl_buffer) {
2727 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2728 wl_egl_buffer->status = RELEASED;
2729 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2732 tbm_surface_internal_unref(tbm_surface);
2734 tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
2736 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2737 TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
2738 tbm_surface, surface);
2739 return TPL_ERROR_INVALID_OPERATION;
2742 TPL_INFO("[CANCEL_BUFFER]", "wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
2743 wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
2745 return TPL_ERROR_NONE;
2749 __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
2750 tbm_surface_h tbm_surface,
2751 int num_rects, const int *rects, int32_t acquire_fence)
2753 TPL_ASSERT(surface);
2754 TPL_ASSERT(surface->display);
2755 TPL_ASSERT(surface->backend.data);
2756 TPL_ASSERT(tbm_surface);
2757 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2759 tpl_wl_egl_surface_t *wl_egl_surface =
2760 (tpl_wl_egl_surface_t *) surface->backend.data;
2761 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2762 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2765 if (!tbm_surface_internal_is_valid(tbm_surface)) {
2766 TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
2768 TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2769 return TPL_ERROR_INVALID_PARAMETER;
2772 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2773 if (!wl_egl_buffer) {
2774 TPL_ERR("Failed to get wl_egl_buffer from tbm_surface(%p)", tbm_surface);
2775 return TPL_ERROR_INVALID_PARAMETER;
2778 bo_name = _get_tbm_surface_bo_name(tbm_surface);
2780 TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
2782 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2784 /* If there are received region information, save it to wl_egl_buffer */
2785 if (num_rects && rects) {
2786 if (wl_egl_buffer->rects != NULL) {
2787 free(wl_egl_buffer->rects);
2788 wl_egl_buffer->rects = NULL;
2789 wl_egl_buffer->num_rects = 0;
2792 wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2793 wl_egl_buffer->num_rects = num_rects;
2795 if (!wl_egl_buffer->rects) {
2796 TPL_ERR("Failed to allocate memory fo damage rects info.");
2797 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2798 return TPL_ERROR_OUT_OF_MEMORY;
2801 memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects);
2804 if (!wl_egl_surface->need_to_enqueue ||
2805 !wl_egl_buffer->need_to_commit) {
2806 TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
2807 ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
2808 TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2809 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2810 return TPL_ERROR_NONE;
2813 /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
2814 * commit if surface->frontbuffer that is already set and the tbm_surface
2815 * client want to enqueue are the same.
2817 if (surface->is_frontbuffer_mode) {
2818 /* The first buffer to be activated in frontbuffer mode must be
2819 * committed. Subsequence frames do not need to be committed because
2820 * the buffer is already displayed.
2822 if (surface->frontbuffer == tbm_surface)
2823 wl_egl_surface->need_to_enqueue = TPL_FALSE;
2825 if (acquire_fence != -1) {
2826 close(acquire_fence);
2831 if (wl_egl_buffer->acquire_fence_fd != -1)
2832 close(wl_egl_buffer->acquire_fence_fd);
2834 wl_egl_buffer->acquire_fence_fd = acquire_fence;
2836 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2837 if (wl_egl_surface->presentation_sync.fd != -1) {
2838 wl_egl_buffer->presentation_sync_fd = wl_egl_surface->presentation_sync.fd;
2839 wl_egl_surface->presentation_sync.fd = -1;
2841 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2843 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
2844 if (wl_egl_surface->commit_sync.fd != -1) {
2845 wl_egl_buffer->commit_sync_fd = wl_egl_surface->commit_sync.fd;
2846 wl_egl_surface->commit_sync.fd = -1;
2847 TRACE_ASYNC_BEGIN(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
2848 _get_tbm_surface_bo_name(tbm_surface));
2850 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
2852 wl_egl_buffer->status = ENQUEUED;
2854 "[ENQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2855 wl_egl_buffer, tbm_surface, bo_name, acquire_fence);
2857 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2859 tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
2861 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2862 tbm_surface_internal_unref(tbm_surface);
2863 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d",
2864 tbm_surface, wl_egl_surface, tsq_err);
2865 TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2866 return TPL_ERROR_INVALID_OPERATION;
2869 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2870 wl_egl_surface->last_enq_buffer = tbm_surface;
2871 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2873 tbm_surface_internal_unref(tbm_surface);
2875 TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2877 return TPL_ERROR_NONE;
2881 __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message)
2883 tpl_wl_egl_buffer_t *wl_egl_buffer =
2884 (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
2885 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2886 tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
2888 wl_egl_surface->render_done_cnt++;
2890 TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
2891 wl_egl_buffer->acquire_fence_fd);
2893 TPL_DEBUG("[RENDER DONE] wl_egl_buffer(%p) tbm_surface(%p)",
2894 wl_egl_buffer, tbm_surface);
2896 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2897 wl_egl_buffer->status = WAITING_VBLANK;
2899 TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
2900 wl_egl_buffer, wl_egl_buffer->waiting_source,
2901 wl_egl_buffer->acquire_fence_fd);
2903 close(wl_egl_buffer->acquire_fence_fd);
2904 wl_egl_buffer->acquire_fence_fd = -1;
2905 wl_egl_buffer->waiting_source = NULL;
2907 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2909 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2911 if (wl_egl_surface->vblank == NULL || wl_egl_surface->vblank_done)
2912 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2914 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
2915 __tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers,
2917 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
2920 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2926 __thread_func_waiting_source_finalize(tpl_gsource *gsource)
2928 TPL_IGNORE(gsource);
2931 static tpl_gsource_functions buffer_funcs = {
2934 .dispatch = __thread_func_waiting_source_dispatch,
2935 .finalize = __thread_func_waiting_source_finalize,
2939 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
2941 tbm_surface_h tbm_surface = NULL;
2942 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2943 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2944 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2945 tpl_bool_t ready_to_commit = TPL_FALSE;
2947 while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) {
2948 tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue,
2950 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2951 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2952 wl_egl_surface->tbm_queue);
2953 return TPL_ERROR_INVALID_OPERATION;
2956 tbm_surface_internal_ref(tbm_surface);
2958 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2959 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
2960 "wl_egl_buffer sould be not NULL");
2962 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2964 wl_egl_buffer->status = ACQUIRED;
2966 TPL_LOG_T("WL_EGL", "[ACQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2967 wl_egl_buffer, tbm_surface,
2968 _get_tbm_surface_bo_name(tbm_surface));
2970 if (wl_egl_buffer->acquire_fence_fd != -1) {
2971 #if TIZEN_FEATURE_ENABLE
2972 if (wl_egl_display->use_explicit_sync)
2973 ready_to_commit = TPL_TRUE;
2977 if (wl_egl_buffer->waiting_source) {
2978 tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
2979 wl_egl_buffer->waiting_source = NULL;
2982 wl_egl_buffer->waiting_source =
2983 tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
2984 wl_egl_buffer->acquire_fence_fd, &buffer_funcs,
2985 SOURCE_TYPE_DISPOSABLE);
2986 wl_egl_buffer->status = WAITING_SIGNALED;
2988 TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
2989 wl_egl_buffer->acquire_fence_fd);
2991 ready_to_commit = TPL_FALSE;
2994 ready_to_commit = TPL_TRUE;
2997 if (ready_to_commit) {
2998 if (wl_egl_surface->vblank == NULL || wl_egl_surface->vblank_done)
2999 ready_to_commit = TPL_TRUE;
3001 wl_egl_buffer->status = WAITING_VBLANK;
3002 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
3003 __tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers, wl_egl_buffer);
3004 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
3005 ready_to_commit = TPL_FALSE;
3009 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3011 if (ready_to_commit)
3012 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
3015 return TPL_ERROR_NONE;
3018 /* -- BEGIN -- tdm_client vblank callback function */
3020 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
3021 unsigned int sequence, unsigned int tv_sec,
3022 unsigned int tv_usec, void *user_data)
3024 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data;
3025 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
3027 TRACE_ASYNC_END((intptr_t)wl_egl_surface, "WAIT_VBLANK");
3028 TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface);
3030 if (error == TDM_ERROR_TIMEOUT)
3031 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)",
3034 wl_egl_surface->vblank_done = TPL_TRUE;
3036 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
3037 if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
3038 tpl_bool_t is_empty = TPL_TRUE;
3040 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
3041 wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(
3042 wl_egl_surface->vblank->waiting_buffers,
3044 is_empty = __tpl_list_is_empty(wl_egl_surface->vblank->waiting_buffers);
3045 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
3047 if (!wl_egl_buffer) break;
3049 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
3051 /* If tdm error such as TIMEOUT occured,
3052 * flush all vblank waiting buffers of its wl_egl_surface.
3053 * Otherwise, only one wl_egl_buffer will be commited per one vblank event.
3055 if (error == TDM_ERROR_NONE) break;
3056 } while (!is_empty);
3058 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
3060 /* -- END -- tdm_client vblank callback function */
3062 #if TIZEN_FEATURE_ENABLE
3064 __cb_buffer_fenced_release(void *data,
3065 struct zwp_linux_buffer_release_v1 *release, int32_t fence)
3067 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
3068 tbm_surface_h tbm_surface = NULL;
3070 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
3072 tbm_surface = wl_egl_buffer->tbm_surface;
3074 if (tbm_surface_internal_is_valid(tbm_surface)) {
3075 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
3077 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3078 if (wl_egl_buffer->status == COMMITTED) {
3079 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3081 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
3082 wl_egl_buffer->buffer_release = NULL;
3084 wl_egl_buffer->release_fence_fd = fence;
3085 wl_egl_buffer->status = RELEASED;
3087 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
3088 _get_tbm_surface_bo_name(tbm_surface),
3090 TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3091 _get_tbm_surface_bo_name(tbm_surface));
3094 "[FENCED_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
3095 wl_egl_buffer, tbm_surface,
3096 _get_tbm_surface_bo_name(tbm_surface),
3099 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
3101 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
3102 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
3105 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3107 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
3108 tbm_surface_internal_unref(tbm_surface);
3111 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
3116 __cb_buffer_immediate_release(void *data,
3117 struct zwp_linux_buffer_release_v1 *release)
3119 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
3120 tbm_surface_h tbm_surface = NULL;
3122 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
3124 tbm_surface = wl_egl_buffer->tbm_surface;
3126 if (tbm_surface_internal_is_valid(tbm_surface)) {
3127 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
3129 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3130 if (wl_egl_buffer->status == COMMITTED) {
3131 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3133 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
3134 wl_egl_buffer->buffer_release = NULL;
3136 wl_egl_buffer->release_fence_fd = -1;
3137 wl_egl_buffer->status = RELEASED;
3139 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
3140 _get_tbm_surface_bo_name(tbm_surface));
3141 TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3142 _get_tbm_surface_bo_name(tbm_surface));
3145 "[IMMEDIATE_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
3146 wl_egl_buffer, tbm_surface,
3147 _get_tbm_surface_bo_name(tbm_surface));
3149 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
3151 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
3152 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
3155 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3157 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
3158 tbm_surface_internal_unref(tbm_surface);
3161 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
3165 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
3166 __cb_buffer_fenced_release,
3167 __cb_buffer_immediate_release,
3172 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
3174 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
3175 tbm_surface_h tbm_surface = NULL;
3177 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer)
3179 tbm_surface = wl_egl_buffer->tbm_surface;
3181 if (tbm_surface_internal_is_valid(tbm_surface)) {
3182 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3183 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
3185 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3187 if (wl_egl_buffer->status == COMMITTED) {
3189 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
3191 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
3192 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
3194 wl_egl_buffer->status = RELEASED;
3196 TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
3197 TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3198 _get_tbm_surface_bo_name(tbm_surface));
3200 TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
3201 wl_egl_buffer->wl_buffer, tbm_surface,
3202 _get_tbm_surface_bo_name(tbm_surface));
3205 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3207 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
3208 tbm_surface_internal_unref(tbm_surface);
3210 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
3214 static const struct wl_buffer_listener wl_buffer_release_listener = {
3215 (void *)__cb_wl_buffer_release,
3217 #if TIZEN_FEATURE_ENABLE
3219 __cb_presentation_feedback_sync_output(void *data,
3220 struct wp_presentation_feedback *presentation_feedback,
3221 struct wl_output *output)
3224 TPL_IGNORE(presentation_feedback);
3230 __cb_presentation_feedback_presented(void *data,
3231 struct wp_presentation_feedback *presentation_feedback,
3235 uint32_t refresh_nsec,
3240 TPL_IGNORE(tv_sec_hi);
3241 TPL_IGNORE(tv_sec_lo);
3242 TPL_IGNORE(tv_nsec);
3243 TPL_IGNORE(refresh_nsec);
3248 struct pst_feedback *pst_feedback = (struct pst_feedback *)data;
3249 tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface;
3251 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
3253 TPL_DEBUG("[FEEDBACK][PRESENTED] pst_feedback(%p) presentation_feedback(%p) bo(%d)",
3254 pst_feedback, presentation_feedback, pst_feedback->bo_name);
3256 if (pst_feedback->pst_sync_fd != -1) {
3257 int ret = _write_to_eventfd(pst_feedback->pst_sync_fd);
3259 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
3260 pst_feedback->pst_sync_fd);
3263 TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
3264 "[PRESENTATION_SYNC] bo(%d)",
3265 pst_feedback->bo_name);
3267 close(pst_feedback->pst_sync_fd);
3268 pst_feedback->pst_sync_fd = -1;
3271 wp_presentation_feedback_destroy(presentation_feedback);
3273 pst_feedback->presentation_feedback = NULL;
3274 pst_feedback->wl_egl_surface = NULL;
3275 pst_feedback->bo_name = 0;
3277 __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
3282 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3286 __cb_presentation_feedback_discarded(void *data,
3287 struct wp_presentation_feedback *presentation_feedback)
3289 struct pst_feedback *pst_feedback = (struct pst_feedback *)data;
3290 tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface;
3292 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
3294 TPL_DEBUG("[FEEDBACK][DISCARDED] pst_feedback(%p) presentation_feedback(%p) bo(%d)",
3295 pst_feedback, presentation_feedback, pst_feedback->bo_name);
3297 if (pst_feedback->pst_sync_fd != -1) {
3298 int ret = _write_to_eventfd(pst_feedback->pst_sync_fd);
3300 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
3301 pst_feedback->pst_sync_fd);
3304 TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
3305 "[PRESENTATION_SYNC] bo(%d)",
3306 pst_feedback->bo_name);
3308 close(pst_feedback->pst_sync_fd);
3309 pst_feedback->pst_sync_fd = -1;
3312 wp_presentation_feedback_destroy(presentation_feedback);
3314 pst_feedback->presentation_feedback = NULL;
3315 pst_feedback->wl_egl_surface = NULL;
3316 pst_feedback->bo_name = 0;
3318 __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
3323 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3326 static const struct wp_presentation_feedback_listener feedback_listener = {
3327 __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
3328 __cb_presentation_feedback_presented,
3329 __cb_presentation_feedback_discarded
3334 _thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface)
3336 tdm_error tdm_err = TDM_ERROR_NONE;
3337 tpl_surface_vblank_t *vblank = wl_egl_surface->vblank;
3339 tdm_err = tdm_client_vblank_wait(vblank->tdm_vblank,
3340 wl_egl_surface->post_interval,
3341 __cb_tdm_client_vblank,
3342 (void *)wl_egl_surface);
3344 if (tdm_err == TDM_ERROR_NONE) {
3345 wl_egl_surface->vblank_done = TPL_FALSE;
3346 TRACE_ASYNC_BEGIN((intptr_t)wl_egl_surface, "WAIT_VBLANK");
3348 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
3349 return TPL_ERROR_INVALID_OPERATION;
3352 return TPL_ERROR_NONE;
3356 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
3357 tpl_wl_egl_buffer_t *wl_egl_buffer)
3359 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3360 struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
3361 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
3364 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
3365 "wl_egl_buffer sould be not NULL");
3367 if (wl_egl_buffer->wl_buffer == NULL) {
3368 wl_egl_buffer->wl_buffer =
3369 (struct wl_proxy *)wayland_tbm_client_create_buffer(
3370 wl_egl_display->wl_tbm_client,
3371 wl_egl_buffer->tbm_surface);
3373 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
3374 "[FATAL] Failed to create wl_buffer");
3376 TPL_INFO("[WL_BUFFER_CREATE]",
3377 "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
3378 wl_egl_buffer, wl_egl_buffer->wl_buffer,
3379 wl_egl_buffer->tbm_surface);
3381 #if TIZEN_FEATURE_ENABLE
3382 if (!wl_egl_display->use_explicit_sync ||
3383 wl_egl_buffer->acquire_fence_fd == -1)
3386 wl_buffer_add_listener((struct wl_buffer *)wl_egl_buffer->wl_buffer,
3387 &wl_buffer_release_listener,
3392 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
3394 #if TIZEN_FEATURE_ENABLE
3395 /* create presentation feedback and add listener */
3396 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
3397 if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) {
3399 struct pst_feedback *pst_feedback = NULL;
3400 pst_feedback = (struct pst_feedback *) calloc(1, sizeof(struct pst_feedback));
3402 pst_feedback->presentation_feedback =
3403 wp_presentation_feedback(wl_egl_display->presentation,
3406 pst_feedback->wl_egl_surface = wl_egl_surface;
3407 pst_feedback->bo_name = wl_egl_buffer->bo_name;
3409 pst_feedback->pst_sync_fd = wl_egl_buffer->presentation_sync_fd;
3410 wl_egl_buffer->presentation_sync_fd = -1;
3412 wp_presentation_feedback_add_listener(pst_feedback->presentation_feedback,
3413 &feedback_listener, pst_feedback);
3414 __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, pst_feedback);
3415 TRACE_ASYNC_BEGIN(pst_feedback->pst_sync_fd,
3416 "[PRESENTATION_SYNC] bo(%d)",
3417 pst_feedback->bo_name);
3419 TPL_ERR("Failed to create presentation feedback. wl_egl_buffer(%p)",
3421 _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
3422 close(wl_egl_buffer->presentation_sync_fd);
3423 wl_egl_buffer->presentation_sync_fd = -1;
3426 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3429 if (wl_egl_buffer->w_rotated == TPL_TRUE) {
3431 wayland_tbm_client_set_buffer_transform(
3432 wl_egl_display->wl_tbm_client,
3433 (void *)wl_egl_buffer->wl_buffer,
3434 wl_egl_buffer->w_transform);
3435 TPL_INFO("[W_TRANSFORM]",
3436 "wl_egl_surface(%p) wl_egl_buffer(%p) w_transform(%d)",
3437 wl_egl_surface, wl_egl_buffer, wl_egl_buffer->w_transform);
3439 wl_egl_buffer->w_rotated = TPL_FALSE;
3442 if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) {
3444 wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
3445 TPL_INFO("[TRANSFORM]",
3446 "wl_egl_surface(%p) wl_egl_buffer(%p) transform(%d -> %d)",
3447 wl_egl_surface, wl_egl_buffer,
3448 wl_egl_surface->latest_transform, wl_egl_buffer->transform);
3450 wl_egl_surface->latest_transform = wl_egl_buffer->transform;
3453 if (wl_egl_window) {
3454 wl_egl_window->attached_width = wl_egl_buffer->width;
3455 wl_egl_window->attached_height = wl_egl_buffer->height;
3458 wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer,
3459 wl_egl_buffer->dx, wl_egl_buffer->dy);
3461 if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) {
3463 wl_surface_damage(wl_surface,
3464 wl_egl_buffer->dx, wl_egl_buffer->dy,
3465 wl_egl_buffer->width, wl_egl_buffer->height);
3467 wl_surface_damage_buffer(wl_surface,
3469 wl_egl_buffer->width, wl_egl_buffer->height);
3473 for (i = 0; i < wl_egl_buffer->num_rects; i++) {
3475 wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] +
3476 wl_egl_buffer->rects[i * 4 + 3]);
3478 wl_surface_damage(wl_surface,
3479 wl_egl_buffer->rects[i * 4 + 0],
3481 wl_egl_buffer->rects[i * 4 + 2],
3482 wl_egl_buffer->rects[i * 4 + 3]);
3484 wl_surface_damage_buffer(wl_surface,
3485 wl_egl_buffer->rects[i * 4 + 0],
3487 wl_egl_buffer->rects[i * 4 + 2],
3488 wl_egl_buffer->rects[i * 4 + 3]);
3493 wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client,
3494 (void *)wl_egl_buffer->wl_buffer,
3495 wl_egl_buffer->serial);
3496 #if TIZEN_FEATURE_ENABLE
3497 if (wl_egl_display->use_explicit_sync &&
3498 wl_egl_buffer->acquire_fence_fd != -1) {
3500 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync,
3501 wl_egl_buffer->acquire_fence_fd);
3502 TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)",
3503 wl_egl_surface, wl_egl_buffer->tbm_surface, wl_egl_buffer->acquire_fence_fd);
3504 close(wl_egl_buffer->acquire_fence_fd);
3505 wl_egl_buffer->acquire_fence_fd = -1;
3507 wl_egl_buffer->buffer_release =
3508 zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync);
3509 if (!wl_egl_buffer->buffer_release) {
3510 TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface);
3512 zwp_linux_buffer_release_v1_add_listener(
3513 wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer);
3514 TPL_DEBUG("add explicit_sync_release_listener.");
3519 wl_surface_commit(wl_surface);
3521 wl_display_flush(wl_egl_display->wl_display);
3523 TRACE_ASYNC_BEGIN((intptr_t)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3524 wl_egl_buffer->bo_name);
3526 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3528 wl_egl_buffer->need_to_commit = TPL_FALSE;
3529 wl_egl_buffer->status = COMMITTED;
3530 if (wl_egl_surface->last_enq_buffer == wl_egl_buffer->tbm_surface)
3531 wl_egl_surface->last_enq_buffer = NULL;
3533 tpl_gcond_signal(&wl_egl_buffer->cond);
3535 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3538 "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
3539 wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface,
3540 wl_egl_buffer->bo_name);
3542 if (wl_egl_surface->vblank != NULL &&
3543 _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
3544 TPL_ERR("Failed to set wait vblank.");
3546 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
3548 if (wl_egl_buffer->commit_sync_fd != -1) {
3549 int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
3551 TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd);
3554 TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
3555 wl_egl_buffer->bo_name);
3556 TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)",
3557 wl_egl_surface, wl_egl_buffer->commit_sync_fd);
3559 close(wl_egl_buffer->commit_sync_fd);
3560 wl_egl_buffer->commit_sync_fd = -1;
3563 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
3567 _write_to_eventfd(int eventfd)
3572 if (eventfd == -1) {
3573 TPL_ERR("Invalid fd(-1)");
3577 ret = write(eventfd, &value, sizeof(uint64_t));
3579 TPL_ERR("failed to write to fd(%d)", eventfd);
3587 __tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend)
3589 TPL_ASSERT(backend);
3591 backend->type = TPL_BACKEND_WAYLAND_THREAD;
3592 backend->data = NULL;
3594 backend->init = __tpl_wl_egl_display_init;
3595 backend->fini = __tpl_wl_egl_display_fini;
3596 backend->query_config = __tpl_wl_egl_display_query_config;
3597 backend->filter_config = __tpl_wl_egl_display_filter_config;
3598 backend->get_window_info = __tpl_wl_egl_display_get_window_info;
3599 backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
3600 backend->get_buffer_from_native_pixmap =
3601 __tpl_wl_egl_display_get_buffer_from_native_pixmap;
3605 __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
3607 TPL_ASSERT(backend);
3609 backend->type = TPL_BACKEND_WAYLAND_THREAD;
3610 backend->data = NULL;
3612 backend->init = __tpl_wl_egl_surface_init;
3613 backend->fini = __tpl_wl_egl_surface_fini;
3614 backend->validate = __tpl_wl_egl_surface_validate;
3615 backend->cancel_dequeued_buffer =
3616 __tpl_wl_egl_surface_cancel_buffer;
3617 backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
3618 backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
3619 backend->set_rotation_capability =
3620 __tpl_wl_egl_surface_set_rotation_capability;
3621 backend->set_post_interval =
3622 __tpl_wl_egl_surface_set_post_interval;
3624 __tpl_wl_egl_surface_get_size;
3628 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
3630 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3631 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3633 TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
3634 wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);
3636 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3637 if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) {
3638 wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL;
3639 wl_egl_surface->buffer_cnt--;
3641 wl_egl_buffer->idx = -1;
3643 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
3645 if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
3646 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
3647 __tpl_list_remove_data(wl_egl_surface->vblank->waiting_buffers,
3648 (void *)wl_egl_buffer,
3651 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
3654 if (wl_egl_display) {
3655 if (wl_egl_buffer->wl_buffer) {
3656 wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
3657 (void *)wl_egl_buffer->wl_buffer);
3658 wl_egl_buffer->wl_buffer = NULL;
3661 wl_display_flush(wl_egl_display->wl_display);
3664 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3665 #if TIZEN_FEATURE_ENABLE
3666 if (wl_egl_buffer->buffer_release) {
3667 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
3668 wl_egl_buffer->buffer_release = NULL;
3671 if (wl_egl_buffer->release_fence_fd != -1) {
3672 close(wl_egl_buffer->release_fence_fd);
3673 wl_egl_buffer->release_fence_fd = -1;
3677 if (wl_egl_buffer->waiting_source) {
3678 tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
3679 wl_egl_buffer->waiting_source = NULL;
3682 if (wl_egl_buffer->commit_sync_fd != -1) {
3683 int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
3685 TPL_ERR("Failed to send commit_sync signal to fd(%d)",
3686 wl_egl_buffer->commit_sync_fd);
3687 close(wl_egl_buffer->commit_sync_fd);
3688 wl_egl_buffer->commit_sync_fd = -1;
3691 if (wl_egl_buffer->presentation_sync_fd != -1) {
3692 int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
3694 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
3695 wl_egl_buffer->presentation_sync_fd);
3696 close(wl_egl_buffer->presentation_sync_fd);
3697 wl_egl_buffer->presentation_sync_fd = -1;
3700 if (wl_egl_buffer->rects) {
3701 free(wl_egl_buffer->rects);
3702 wl_egl_buffer->rects = NULL;
3703 wl_egl_buffer->num_rects = 0;
3706 wl_egl_buffer->tbm_surface = NULL;
3707 wl_egl_buffer->bo_name = -1;
3708 wl_egl_buffer->status = RELEASED;
3710 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3711 tpl_gmutex_clear(&wl_egl_buffer->mutex);
3712 tpl_gcond_clear(&wl_egl_buffer->cond);
3713 free(wl_egl_buffer);
3717 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
3719 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
3723 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
3727 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3728 TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)",
3729 wl_egl_surface, wl_egl_surface->buffer_cnt);
3730 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
3731 tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
3732 if (wl_egl_buffer) {
3734 "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
3735 idx, wl_egl_buffer, wl_egl_buffer->tbm_surface,
3736 wl_egl_buffer->bo_name,
3737 status_to_string[wl_egl_buffer->status]);
3740 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
3744 _check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface)
3747 tpl_bool_t ret = TPL_FALSE;
3750 if (!wl_egl_surface || !tbm_surface)
3753 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3754 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
3755 tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
3756 if (wl_egl_buffer && wl_egl_buffer->tbm_surface == tbm_surface) {
3762 if (ret == TPL_FALSE || idx == BUFFER_ARRAY_SIZE) {
3763 TPL_ERR("tbm_surface(%p) is not owned by wl_egl_surface(%p)",
3764 tbm_surface, wl_egl_surface);
3766 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);