2 #include "tpl_internal.h"
7 #include <sys/eventfd.h>
9 #include <tbm_bufmgr.h>
10 #include <tbm_surface.h>
11 #include <tbm_surface_internal.h>
12 #include <tbm_surface_queue.h>
14 #include <wayland-client.h>
15 #include <wayland-tbm-server.h>
16 #include <wayland-tbm-client.h>
17 #include <wayland-egl-backend.h>
19 #include <tdm_client.h>
21 #include "wayland-egl-tizen/wayland-egl-tizen.h"
22 #include "wayland-egl-tizen/wayland-egl-tizen-priv.h"
24 #ifndef TIZEN_FEATURE_ENABLE
25 #define TIZEN_FEATURE_ENABLE 1
28 #if TIZEN_FEATURE_ENABLE
29 #include <tizen-surface-client-protocol.h>
30 #include <presentation-time-client-protocol.h>
31 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
34 #include "tpl_utils_gthread.h"
36 static int wl_egl_buffer_key;
37 #define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key)
39 /* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
40 #define BUFFER_ARRAY_SIZE 9
42 typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
43 typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
44 typedef struct _tpl_wl_egl_buffer tpl_wl_egl_buffer_t;
45 typedef struct _surface_vblank tpl_surface_vblank_t;
47 #define wl_egl_display(ptr) *wl_egl_display = (tpl_wl_egl_display_t *)ptr;
48 #define wl_egl_surface(ptr) *wl_egl_surface = (tpl_wl_egl_surface_t *)ptr;
49 #define wl_egl_buffer(ptr) *wl_egl_buffer = (tpl_wl_egl_buffer_t *)ptr;
50 #define tizen_private(ptr) *tizen_private = (struct tizen_private *)ptr;
52 struct _tpl_wl_egl_display {
53 tpl_gsource *disp_source;
55 tpl_gmutex wl_event_mutex;
57 struct wl_display *wl_display;
58 struct wl_event_queue *ev_queue;
59 struct wayland_tbm_client *wl_tbm_client;
60 int last_error; /* errno of the last wl_display error*/
62 tpl_bool_t wl_initialized;
64 tpl_bool_t use_wait_vblank;
65 tpl_bool_t use_explicit_sync;
68 /* To make sure that tpl_gsource has been successfully finalized. */
69 tpl_bool_t gsource_finalized;
70 tpl_gmutex disp_mutex;
73 tdm_client *tdm_client;
74 tpl_gsource *tdm_source;
76 tpl_bool_t tdm_initialized;
77 tpl_list_t *surface_vblanks;
79 /* To make sure that tpl_gsource has been successfully finalized. */
80 tpl_bool_t gsource_finalized;
85 #if TIZEN_FEATURE_ENABLE
86 struct tizen_surface_shm *tss; /* used for surface buffer_flush */
87 struct wp_presentation *presentation; /* for presentation feedback */
88 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
92 typedef enum surf_message {
98 struct _tpl_wl_egl_surface {
99 tpl_gsource *surf_source;
101 tbm_surface_queue_h tbm_queue;
104 struct wl_egl_window *wl_egl_window;
105 struct wl_surface *wl_surface;
107 #if TIZEN_FEATURE_ENABLE
108 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
109 struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */
112 tpl_surface_vblank_t *vblank;
114 /* surface information */
120 int latest_transform;
124 tpl_wl_egl_display_t *wl_egl_display;
125 tpl_surface_t *tpl_surface;
127 /* wl_egl_buffer list for buffer tracing */
129 int buffer_cnt; /* the number of using wl_egl_buffers */
130 tpl_gmutex buffers_mutex;
131 tbm_surface_h last_enq_buffer;
133 tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
145 tpl_gmutex surf_mutex;
148 surf_message sent_message;
150 /* for waiting draw done */
151 tpl_bool_t use_render_done_fence;
152 tpl_bool_t is_activated;
153 tpl_bool_t reset; /* TRUE if queue reseted by external */
154 tpl_bool_t need_to_enqueue;
155 tpl_bool_t prerotation_capability;
156 tpl_bool_t vblank_done;
157 tpl_bool_t vblank_enable;
158 tpl_bool_t set_serial_is_used;
159 tpl_bool_t initialized_in_thread;
160 tpl_bool_t frontbuffer_activated;
162 /* To make sure that tpl_gsource has been successfully finalized. */
163 tpl_bool_t gsource_finalized;
166 struct _surface_vblank {
167 tdm_client_vblank *tdm_vblank;
168 tpl_wl_egl_surface_t *wl_egl_surface;
169 tpl_list_t *waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
173 typedef enum buffer_status {
178 WAITING_SIGNALED, // 4
183 static const char *status_to_string[7] = {
188 "WAITING_SIGNALED", // 4
189 "WAITING_VBLANK", // 5
193 struct _tpl_wl_egl_buffer {
194 tbm_surface_h tbm_surface;
197 struct wl_proxy *wl_buffer;
198 int dx, dy; /* position to attach to wl_surface */
199 int width, height; /* size to attach to wl_surface */
201 buffer_status_t status; /* for tracing buffer status */
203 /* for damage region */
207 /* for wayland_tbm_client_set_buffer_transform */
209 tpl_bool_t w_rotated;
211 /* for wl_surface_set_buffer_transform */
214 /* for wayland_tbm_client_set_buffer_serial */
217 /* for checking need_to_commit (frontbuffer mode) */
218 tpl_bool_t need_to_commit;
220 /* for checking draw done */
221 tpl_bool_t draw_done;
223 #if TIZEN_FEATURE_ENABLE
224 /* to get release event via zwp_linux_buffer_release_v1 */
225 struct zwp_linux_buffer_release_v1 *buffer_release;
227 /* each buffers own its release_fence_fd, until it passes ownership
229 int32_t release_fence_fd;
231 /* each buffers own its acquire_fence_fd.
232 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
233 * will be passed to display server
234 * Otherwise it will be used as a fence waiting for render done
236 int32_t acquire_fence_fd;
238 /* Fd to send a signal when wl_surface_commit with this buffer */
239 int32_t commit_sync_fd;
241 /* Fd to send a siganl when receive the
242 * presentation feedback from display server */
243 int32_t presentation_sync_fd;
245 tpl_gsource *waiting_source;
250 tpl_wl_egl_surface_t *wl_egl_surface;
253 #if TIZEN_FEATURE_ENABLE
254 struct pst_feedback {
255 /* to get presentation feedback from display server */
256 struct wp_presentation_feedback *presentation_feedback;
261 tpl_wl_egl_surface_t *wl_egl_surface;
266 static const struct wl_buffer_listener wl_buffer_release_listener;
269 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
271 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface);
273 _check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface);
275 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer);
276 static tpl_wl_egl_buffer_t *
277 _get_wl_egl_buffer(tbm_surface_h tbm_surface);
279 _write_to_eventfd(int eventfd, uint64_t value);
281 send_signal(int fd, const char *type);
283 _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface);
285 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface);
287 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
288 tpl_wl_egl_buffer_t *wl_egl_buffer);
290 __cb_surface_vblank_free(void *data);
292 static struct tizen_private *
293 tizen_private_create()
295 struct tizen_private *private = calloc(1, sizeof(struct tizen_private));
297 private->magic = WL_EGL_TIZEN_MAGIC;
298 private->rotation = 0;
299 private->frontbuffer_mode = 0;
300 private->transform = 0;
301 private->window_transform = 0;
304 private->data = NULL;
305 private->rotate_callback = NULL;
306 private->get_rotation_capability = NULL;
307 private->set_window_serial_callback = NULL;
308 private->set_frontbuffer_callback = NULL;
309 private->create_commit_sync_fd = NULL;
310 private->create_presentation_sync_fd = NULL;
311 private->merge_sync_fds = NULL;
318 _check_native_handle_is_wl_display(tpl_handle_t display)
320 struct wl_interface *wl_egl_native_dpy = *(void **) display;
322 if (!wl_egl_native_dpy) {
323 TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy);
327 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
328 is a memory address pointing the structure of wl_display_interface. */
329 if (wl_egl_native_dpy == &wl_display_interface)
332 if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
333 strlen(wl_display_interface.name)) == 0) {
341 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
343 tpl_wl_egl_display_t wl_egl_display(tpl_gsource_get_data(gsource));
344 tdm_error tdm_err = TDM_ERROR_NONE;
348 if (!wl_egl_display) {
349 TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource);
350 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
354 tdm_err = tdm_client_handle_events(wl_egl_display->tdm.tdm_client);
356 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
357 * When tdm_source is no longer available due to an unexpected situation,
358 * wl_egl_thread must remove it from the thread and destroy it.
359 * In that case, tdm_vblank can no longer be used for surfaces and displays
360 * that used this tdm_source. */
361 if (tdm_err != TDM_ERROR_NONE) {
362 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
364 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
366 tpl_gsource_destroy(gsource, TPL_FALSE);
368 wl_egl_display->tdm.tdm_source = NULL;
377 __thread_func_tdm_finalize(tpl_gsource *gsource)
379 tpl_wl_egl_display_t wl_egl_display(tpl_gsource_get_data(gsource));
381 tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
383 TPL_INFO("[TDM_CLIENT_FINI]",
384 "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
385 wl_egl_display, wl_egl_display->tdm.tdm_client,
386 wl_egl_display->tdm.tdm_display_fd);
388 if (wl_egl_display->tdm.tdm_client) {
390 if (wl_egl_display->tdm.surface_vblanks) {
391 __tpl_list_free(wl_egl_display->tdm.surface_vblanks,
392 __cb_surface_vblank_free);
393 wl_egl_display->tdm.surface_vblanks = NULL;
396 tdm_client_destroy(wl_egl_display->tdm.tdm_client);
397 wl_egl_display->tdm.tdm_client = NULL;
398 wl_egl_display->tdm.tdm_display_fd = -1;
399 wl_egl_display->tdm.tdm_source = NULL;
402 wl_egl_display->use_wait_vblank = TPL_FALSE;
403 wl_egl_display->tdm.tdm_initialized = TPL_FALSE;
404 wl_egl_display->tdm.gsource_finalized = TPL_TRUE;
406 tpl_gcond_signal(&wl_egl_display->tdm.tdm_cond);
407 tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
410 static tpl_gsource_functions tdm_funcs = {
413 .dispatch = __thread_func_tdm_dispatch,
414 .finalize = __thread_func_tdm_finalize,
418 _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display)
420 tdm_client *tdm_client = NULL;
421 int tdm_display_fd = -1;
422 tdm_error tdm_err = TDM_ERROR_NONE;
424 tdm_client = tdm_client_create(&tdm_err);
425 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
426 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
427 return TPL_ERROR_INVALID_OPERATION;
430 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
431 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
432 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
433 tdm_client_destroy(tdm_client);
434 return TPL_ERROR_INVALID_OPERATION;
437 wl_egl_display->tdm.tdm_display_fd = tdm_display_fd;
438 wl_egl_display->tdm.tdm_client = tdm_client;
439 wl_egl_display->tdm.tdm_source = NULL;
440 wl_egl_display->tdm.tdm_initialized = TPL_TRUE;
441 wl_egl_display->tdm.surface_vblanks = __tpl_list_alloc();
443 TPL_INFO("[TDM_CLIENT_INIT]",
444 "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
445 wl_egl_display, tdm_client, tdm_display_fd);
447 return TPL_ERROR_NONE;
450 #define IMPL_TIZEN_SURFACE_SHM_VERSION 2
454 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
455 uint32_t name, const char *interface,
458 #if TIZEN_FEATURE_ENABLE
459 tpl_wl_egl_display_t wl_egl_display(data);
461 if (!strcmp(interface, "tizen_surface_shm")) {
462 wl_egl_display->tss =
463 wl_registry_bind(wl_registry,
465 &tizen_surface_shm_interface,
466 ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ?
467 version : IMPL_TIZEN_SURFACE_SHM_VERSION));
468 wl_egl_display->use_tss = TPL_TRUE;
469 } else if (!strcmp(interface, wp_presentation_interface.name)) {
470 wl_egl_display->presentation =
471 wl_registry_bind(wl_registry,
472 name, &wp_presentation_interface, 1);
473 TPL_LOG_D("[REGISTRY_BIND]",
474 "wl_egl_display(%p) bind wp_presentation_interface",
476 } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
477 char *env = tpl_getenv("TPL_EFS");
478 if (env && !atoi(env)) {
479 wl_egl_display->use_explicit_sync = TPL_FALSE;
481 wl_egl_display->explicit_sync =
482 wl_registry_bind(wl_registry, name,
483 &zwp_linux_explicit_synchronization_v1_interface, 1);
484 wl_egl_display->use_explicit_sync = TPL_TRUE;
485 TPL_LOG_D("[REGISTRY_BIND]",
486 "wl_egl_display(%p) bind zwp_linux_explicit_synchronization_v1_interface",
494 __cb_wl_resistry_global_remove_callback(void *data,
495 struct wl_registry *wl_registry,
500 static const struct wl_registry_listener registry_listener = {
501 __cb_wl_resistry_global_callback,
502 __cb_wl_resistry_global_remove_callback
506 _wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display,
507 const char *func_name)
511 strerror_r(errno, buf, sizeof(buf));
513 if (wl_egl_display->last_error == errno)
516 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
518 dpy_err = wl_display_get_error(wl_egl_display->wl_display);
519 if (dpy_err == EPROTO) {
520 const struct wl_interface *err_interface;
521 uint32_t err_proxy_id, err_code;
522 err_code = wl_display_get_protocol_error(wl_egl_display->wl_display,
525 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
526 (err_interface ? err_interface->name : "UNKNOWN"),
527 err_code, err_proxy_id);
530 wl_egl_display->last_error = errno;
534 _thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display)
536 struct wl_registry *registry = NULL;
537 struct wl_event_queue *queue = NULL;
538 struct wl_display *display_wrapper = NULL;
539 struct wl_proxy *wl_tbm = NULL;
540 struct wayland_tbm_client *wl_tbm_client = NULL;
542 tpl_result_t result = TPL_ERROR_NONE;
544 queue = wl_display_create_queue(wl_egl_display->wl_display);
546 TPL_ERR("Failed to create wl_queue wl_display(%p)",
547 wl_egl_display->wl_display);
548 result = TPL_ERROR_INVALID_OPERATION;
552 wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display);
553 if (!wl_egl_display->ev_queue) {
554 TPL_ERR("Failed to create wl_queue wl_display(%p)",
555 wl_egl_display->wl_display);
556 result = TPL_ERROR_INVALID_OPERATION;
560 display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display);
561 if (!display_wrapper) {
562 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
563 wl_egl_display->wl_display);
564 result = TPL_ERROR_INVALID_OPERATION;
568 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
570 registry = wl_display_get_registry(display_wrapper);
572 TPL_ERR("Failed to create wl_registry");
573 result = TPL_ERROR_INVALID_OPERATION;
577 wl_proxy_wrapper_destroy(display_wrapper);
578 display_wrapper = NULL;
580 wl_tbm_client = wayland_tbm_client_init(wl_egl_display->wl_display);
581 if (!wl_tbm_client) {
582 TPL_ERR("Failed to initialize wl_tbm_client.");
583 result = TPL_ERROR_INVALID_CONNECTION;
587 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
589 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
590 result = TPL_ERROR_INVALID_CONNECTION;
594 wl_proxy_set_queue(wl_tbm, wl_egl_display->ev_queue);
595 wl_egl_display->wl_tbm_client = wl_tbm_client;
597 if (wl_registry_add_listener(registry, ®istry_listener,
599 TPL_ERR("Failed to wl_registry_add_listener");
600 result = TPL_ERROR_INVALID_OPERATION;
604 ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue);
606 _wl_display_print_err(wl_egl_display, "roundtrip_queue");
607 result = TPL_ERROR_INVALID_OPERATION;
611 #if TIZEN_FEATURE_ENABLE
612 /* set tizen_surface_shm's queue as client's private queue */
613 if (wl_egl_display->tss) {
614 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss,
615 wl_egl_display->ev_queue);
616 TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss);
619 if (wl_egl_display->presentation) {
620 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation,
621 wl_egl_display->ev_queue);
622 TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.",
623 wl_egl_display->presentation);
626 if (wl_egl_display->explicit_sync) {
627 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync,
628 wl_egl_display->ev_queue);
629 TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.",
630 wl_egl_display->explicit_sync);
633 wl_egl_display->wl_initialized = TPL_TRUE;
635 TPL_INFO("[WAYLAND_INIT]",
636 "wl_egl_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
637 wl_egl_display, wl_egl_display->wl_display,
638 wl_egl_display->wl_tbm_client, wl_egl_display->ev_queue);
639 #if TIZEN_FEATURE_ENABLE
640 TPL_INFO("[WAYLAND_INIT]",
641 "tizen_surface_shm(%p) wp_presentation(%p) explicit_sync(%p)",
642 wl_egl_display->tss, wl_egl_display->presentation,
643 wl_egl_display->explicit_sync);
647 wl_proxy_wrapper_destroy(display_wrapper);
649 wl_registry_destroy(registry);
651 wl_event_queue_destroy(queue);
657 _thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display)
659 /* If wl_egl_display is in prepared state, cancel it */
660 if (wl_egl_display->prepared) {
661 wl_display_cancel_read(wl_egl_display->wl_display);
662 wl_egl_display->prepared = TPL_FALSE;
665 if (wl_display_roundtrip_queue(wl_egl_display->wl_display,
666 wl_egl_display->ev_queue) == -1) {
667 _wl_display_print_err(wl_egl_display, "roundtrip_queue");
670 #if TIZEN_FEATURE_ENABLE
671 if (wl_egl_display->tss) {
672 TPL_INFO("[TIZEN_SURFACE_SHM_DESTROY]",
673 "wl_egl_display(%p) tizen_surface_shm(%p) fini.",
674 wl_egl_display, wl_egl_display->tss);
675 tizen_surface_shm_destroy(wl_egl_display->tss);
676 wl_egl_display->tss = NULL;
679 if (wl_egl_display->presentation) {
680 TPL_INFO("[WP_PRESENTATION_DESTROY]",
681 "wl_egl_display(%p) wp_presentation(%p) fini.",
682 wl_egl_display, wl_egl_display->presentation);
683 wp_presentation_destroy(wl_egl_display->presentation);
684 wl_egl_display->presentation = NULL;
687 if (wl_egl_display->explicit_sync) {
688 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
689 "wl_egl_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
690 wl_egl_display, wl_egl_display->explicit_sync);
691 zwp_linux_explicit_synchronization_v1_destroy(wl_egl_display->explicit_sync);
692 wl_egl_display->explicit_sync = NULL;
695 if (wl_egl_display->wl_tbm_client) {
696 struct wl_proxy *wl_tbm = NULL;
698 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
699 wl_egl_display->wl_tbm_client);
701 wl_proxy_set_queue(wl_tbm, NULL);
704 TPL_INFO("[WL_TBM_DEINIT]",
705 "wl_egl_display(%p) wl_tbm_client(%p)",
706 wl_egl_display, wl_egl_display->wl_tbm_client);
707 wayland_tbm_client_deinit(wl_egl_display->wl_tbm_client);
708 wl_egl_display->wl_tbm_client = NULL;
711 wl_event_queue_destroy(wl_egl_display->ev_queue);
713 wl_egl_display->ev_queue = NULL;
714 wl_egl_display->wl_initialized = TPL_FALSE;
716 TPL_INFO("[DISPLAY_FINI]", "wl_egl_display(%p) wl_display(%p)",
717 wl_egl_display, wl_egl_display->wl_display);
721 _thread_init(void *data)
723 tpl_wl_egl_display_t wl_egl_display(data);
725 if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) {
726 TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)",
727 wl_egl_display, wl_egl_display->wl_display);
730 if (wl_egl_display->use_wait_vblank &&
731 _thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
732 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
735 return wl_egl_display;
739 __thread_func_disp_prepare(tpl_gsource *gsource)
741 tpl_wl_egl_display_t wl_egl_display(tpl_gsource_get_data(gsource));
743 /* If this wl_egl_display is already prepared,
744 * do nothing in this function. */
745 if (wl_egl_display->prepared)
748 /* If there is a last_error, there is no need to poll,
749 * so skip directly to dispatch.
750 * prepare -> dispatch */
751 if (wl_egl_display->last_error)
754 while (wl_display_prepare_read_queue(wl_egl_display->wl_display,
755 wl_egl_display->ev_queue) != 0) {
756 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
757 wl_egl_display->ev_queue) == -1) {
758 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
762 wl_egl_display->prepared = TPL_TRUE;
764 wl_display_flush(wl_egl_display->wl_display);
770 __thread_func_disp_check(tpl_gsource *gsource)
772 tpl_wl_egl_display_t wl_egl_display(tpl_gsource_get_data(gsource));
773 tpl_bool_t ret = TPL_FALSE;
775 if (!wl_egl_display->prepared)
778 /* If prepared, but last_error is set,
779 * cancel_read is executed and FALSE is returned.
780 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
781 * and skipping disp_check from prepare to disp_dispatch.
782 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
783 if (wl_egl_display->prepared && wl_egl_display->last_error) {
784 wl_display_cancel_read(wl_egl_display->wl_display);
788 if (tpl_gsource_check_io_condition(gsource)) {
789 if (wl_display_read_events(wl_egl_display->wl_display) == -1)
790 _wl_display_print_err(wl_egl_display, "read_event");
793 wl_display_cancel_read(wl_egl_display->wl_display);
797 wl_egl_display->prepared = TPL_FALSE;
803 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
805 tpl_wl_egl_display_t wl_egl_display(tpl_gsource_get_data(gsource));
809 /* If there is last_error, SOURCE_REMOVE should be returned
810 * to remove the gsource from the main loop.
811 * This is because wl_egl_display is not valid since last_error was set.*/
812 if (wl_egl_display->last_error) {
816 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
817 if (tpl_gsource_check_io_condition(gsource)) {
818 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
819 wl_egl_display->ev_queue) == -1) {
820 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
824 wl_display_flush(wl_egl_display->wl_display);
825 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
831 __thread_func_disp_finalize(tpl_gsource *gsource)
833 tpl_wl_egl_display_t wl_egl_display(tpl_gsource_get_data(gsource));
835 tpl_gmutex_lock(&wl_egl_display->disp_mutex);
836 TPL_LOG_D("[D_FINALIZE]", "wl_egl_display(%p) tpl_gsource(%p)",
837 wl_egl_display, gsource);
839 if (wl_egl_display->wl_initialized)
840 _thread_wl_display_fini(wl_egl_display);
842 wl_egl_display->gsource_finalized = TPL_TRUE;
844 tpl_gcond_signal(&wl_egl_display->disp_cond);
845 tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
851 static tpl_gsource_functions disp_funcs = {
852 .prepare = __thread_func_disp_prepare,
853 .check = __thread_func_disp_check,
854 .dispatch = __thread_func_disp_dispatch,
855 .finalize = __thread_func_disp_finalize,
859 __tpl_wl_egl_display_init(tpl_display_t *display)
861 tpl_wl_egl_display_t *wl_egl_display = NULL;
865 /* Do not allow default display in wayland. */
866 if (!display->native_handle) {
867 TPL_ERR("Invalid native handle for display.");
868 return TPL_ERROR_INVALID_PARAMETER;
871 if (!_check_native_handle_is_wl_display(display->native_handle)) {
872 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
873 return TPL_ERROR_INVALID_PARAMETER;
876 wl_egl_display = calloc(1, sizeof(tpl_wl_egl_display_t));
877 if (!wl_egl_display) {
878 TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t.");
879 return TPL_ERROR_OUT_OF_MEMORY;
882 display->backend.data = wl_egl_display;
883 display->bufmgr_fd = -1;
885 wl_egl_display->tdm.tdm_initialized = TPL_FALSE;
886 wl_egl_display->tdm.tdm_client = NULL;
887 wl_egl_display->tdm.tdm_display_fd = -1;
888 wl_egl_display->tdm.tdm_source = NULL;
890 wl_egl_display->wl_initialized = TPL_FALSE;
892 wl_egl_display->ev_queue = NULL;
893 wl_egl_display->wl_display = (struct wl_display *)display->native_handle;
894 wl_egl_display->last_error = 0;
895 wl_egl_display->use_tss = TPL_FALSE;
896 wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled
897 wl_egl_display->prepared = TPL_FALSE;
898 wl_egl_display->gsource_finalized = TPL_FALSE;
900 #if TIZEN_FEATURE_ENABLE
901 /* Wayland Interfaces */
902 wl_egl_display->tss = NULL;
903 wl_egl_display->presentation = NULL;
904 wl_egl_display->explicit_sync = NULL;
906 wl_egl_display->wl_tbm_client = NULL;
908 wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled
910 char *env = tpl_getenv("TPL_WAIT_VBLANK");
911 if (env && !atoi(env)) {
912 wl_egl_display->use_wait_vblank = TPL_FALSE;
916 tpl_gmutex_init(&wl_egl_display->wl_event_mutex);
918 tpl_gmutex_init(&wl_egl_display->disp_mutex);
919 tpl_gcond_init(&wl_egl_display->disp_cond);
922 wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
923 (tpl_gthread_func)_thread_init,
924 (void *)wl_egl_display);
925 if (!wl_egl_display->thread) {
926 TPL_ERR("Failed to create wl_egl_thread");
930 wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread,
931 (void *)wl_egl_display,
932 wl_display_get_fd(wl_egl_display->wl_display),
934 &disp_funcs, SOURCE_TYPE_NORMAL);
935 if (!wl_egl_display->disp_source) {
936 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
937 display->native_handle,
938 wl_egl_display->thread);
942 if (wl_egl_display->use_wait_vblank &&
943 wl_egl_display->tdm.tdm_initialized) {
944 tpl_gmutex_init(&wl_egl_display->tdm.tdm_mutex);
945 tpl_gcond_init(&wl_egl_display->tdm.tdm_cond);
946 wl_egl_display->tdm.tdm_source = tpl_gsource_create(wl_egl_display->thread,
947 (void *)wl_egl_display,
948 wl_egl_display->tdm.tdm_display_fd,
950 &tdm_funcs, SOURCE_TYPE_NORMAL);
951 wl_egl_display->tdm.gsource_finalized = TPL_FALSE;
952 if (!wl_egl_display->tdm.tdm_source) {
953 TPL_ERR("Failed to create tdm_gsource\n");
958 wl_egl_display->use_wait_vblank = (wl_egl_display->tdm.tdm_initialized &&
959 (wl_egl_display->tdm.tdm_source != NULL));
961 TPL_INFO("[DISPLAY_INIT]",
962 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
964 wl_egl_display->thread,
965 wl_egl_display->wl_display);
967 TPL_INFO("[DISPLAY_INIT]",
968 "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)",
969 wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE",
970 wl_egl_display->use_tss ? "TRUE" : "FALSE",
971 wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE");
973 return TPL_ERROR_NONE;
976 if (wl_egl_display->tdm.tdm_source) {
977 tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
978 // Send destroy mesage to thread
979 tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
980 while (!wl_egl_display->tdm.gsource_finalized) {
981 tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex);
983 tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
986 if (wl_egl_display->disp_source) {
987 tpl_gmutex_lock(&wl_egl_display->disp_mutex);
988 // Send destroy mesage to thread
989 tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
990 while (!wl_egl_display->gsource_finalized) {
991 tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex);
993 tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
996 if (wl_egl_display->thread) {
997 tpl_gthread_destroy(wl_egl_display->thread);
1000 tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond);
1001 tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex);
1002 tpl_gcond_clear(&wl_egl_display->disp_cond);
1003 tpl_gmutex_clear(&wl_egl_display->disp_mutex);
1005 wl_egl_display->thread = NULL;
1006 free(wl_egl_display);
1008 display->backend.data = NULL;
1009 return TPL_ERROR_INVALID_OPERATION;
1013 __tpl_wl_egl_display_fini(tpl_display_t *display)
1015 tpl_wl_egl_display_t wl_egl_display(display->backend.data);
1016 if (wl_egl_display) {
1017 TPL_INFO("[DISPLAY_FINI]",
1018 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
1020 wl_egl_display->thread,
1021 wl_egl_display->wl_display);
1023 if (wl_egl_display->tdm.tdm_source && wl_egl_display->tdm.tdm_initialized) {
1024 /* This is a protection to prevent problems that arise in unexpected situations
1025 * that g_cond_wait cannot work normally.
1026 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1027 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1029 tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
1030 // Send destroy mesage to thread
1031 tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
1032 while (!wl_egl_display->tdm.gsource_finalized) {
1033 tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex);
1035 wl_egl_display->tdm.tdm_source = NULL;
1036 tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
1039 if (wl_egl_display->disp_source) {
1040 tpl_gmutex_lock(&wl_egl_display->disp_mutex);
1041 // Send destroy mesage to thread
1042 tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
1043 /* This is a protection to prevent problems that arise in unexpected situations
1044 * that g_cond_wait cannot work normally.
1045 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
1046 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
1048 while (!wl_egl_display->gsource_finalized) {
1049 tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex);
1051 wl_egl_display->disp_source = NULL;
1052 tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
1055 if (wl_egl_display->thread) {
1056 tpl_gthread_destroy(wl_egl_display->thread);
1057 wl_egl_display->thread = NULL;
1060 tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond);
1061 tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex);
1062 tpl_gcond_clear(&wl_egl_display->disp_cond);
1063 tpl_gmutex_clear(&wl_egl_display->disp_mutex);
1065 tpl_gmutex_clear(&wl_egl_display->wl_event_mutex);
1067 free(wl_egl_display);
1070 display->backend.data = NULL;
1074 __tpl_wl_egl_display_query_config(tpl_display_t *display,
1075 tpl_surface_type_t surface_type,
1076 int red_size, int green_size,
1077 int blue_size, int alpha_size,
1078 int color_depth, int *native_visual_id,
1079 tpl_bool_t *is_slow)
1081 TPL_ASSERT(display);
1083 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
1084 green_size == 8 && blue_size == 8 &&
1085 (color_depth == 32 || color_depth == 24)) {
1087 if (alpha_size == 8) {
1088 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
1089 if (is_slow) *is_slow = TPL_FALSE;
1090 return TPL_ERROR_NONE;
1092 if (alpha_size == 0) {
1093 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
1094 if (is_slow) *is_slow = TPL_FALSE;
1095 return TPL_ERROR_NONE;
1099 return TPL_ERROR_INVALID_PARAMETER;
1103 __tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
1106 TPL_IGNORE(display);
1107 TPL_IGNORE(visual_id);
1108 TPL_IGNORE(alpha_size);
1109 return TPL_ERROR_NONE;
1113 __tpl_wl_egl_display_get_window_info(tpl_display_t *display,
1114 tpl_handle_t window, int *width,
1115 int *height, tbm_format *format,
1116 int depth, int a_size)
1118 tpl_result_t ret = TPL_ERROR_NONE;
1119 struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window;
1121 if (!wl_egl_window) {
1122 TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window);
1123 return TPL_ERROR_INVALID_PARAMETER;
1126 if (width) *width = wl_egl_window->width;
1127 if (height) *height = wl_egl_window->height;
1129 struct tizen_private tizen_private(wl_egl_window->driver_private);
1130 if (tizen_private && tizen_private->data) {
1131 tpl_wl_egl_surface_t wl_egl_surface(tizen_private->data);
1132 *format = wl_egl_surface->format;
1135 *format = TBM_FORMAT_ARGB8888;
1137 *format = TBM_FORMAT_XRGB8888;
1145 __tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display,
1146 tpl_handle_t pixmap, int *width,
1147 int *height, tbm_format *format)
1149 tbm_surface_h tbm_surface = NULL;
1152 TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap);
1153 return TPL_ERROR_INVALID_PARAMETER;
1156 tbm_surface = wayland_tbm_server_get_surface(NULL,
1157 (struct wl_resource *)pixmap);
1159 TPL_ERR("Failed to get tbm_surface from wayland_tbm.");
1160 return TPL_ERROR_INVALID_PARAMETER;
1163 if (width) *width = tbm_surface_get_width(tbm_surface);
1164 if (height) *height = tbm_surface_get_height(tbm_surface);
1165 if (format) *format = tbm_surface_get_format(tbm_surface);
1167 return TPL_ERROR_NONE;
1170 static tbm_surface_h
1171 __tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
1173 tbm_surface_h tbm_surface = NULL;
1177 tbm_surface = wayland_tbm_server_get_surface(NULL,
1178 (struct wl_resource *)pixmap);
1180 TPL_ERR("Failed to get tbm_surface_h from wayland_tbm.");
1188 __tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
1190 struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
1192 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE);
1194 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
1195 is a memory address pointing the structure of wl_display_interface. */
1196 if (wl_egl_native_dpy == &wl_display_interface)
1199 if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
1200 strlen(wl_display_interface.name)) == 0) {
1207 /* -- BEGIN -- wl_egl_window callback functions */
1209 __cb_destroy_callback(void *private)
1211 struct tizen_private tizen_private(private);
1213 if (!tizen_private) {
1214 TPL_LOG_D("[WL_EGL_WINDOW_DESTROY_CALLBACK]", "Already destroyed surface");
1218 tpl_wl_egl_surface_t wl_egl_surface(tizen_private->data);
1219 if (wl_egl_surface) {
1220 TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.",
1221 wl_egl_surface->wl_egl_window);
1222 TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface.");
1224 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1225 wl_egl_surface->wl_egl_window->destroy_window_callback = NULL;
1226 wl_egl_surface->wl_egl_window->resize_callback = NULL;
1227 wl_egl_surface->wl_egl_window->driver_private = NULL;
1228 wl_egl_surface->wl_egl_window = NULL;
1229 wl_egl_surface->wl_surface = NULL;
1231 tizen_private->set_window_serial_callback = NULL;
1232 tizen_private->rotate_callback = NULL;
1233 tizen_private->get_rotation_capability = NULL;
1234 tizen_private->set_frontbuffer_callback = NULL;
1235 tizen_private->create_commit_sync_fd = NULL;
1236 tizen_private->create_presentation_sync_fd = NULL;
1237 tizen_private->data = NULL;
1239 free(tizen_private);
1240 tizen_private = NULL;
1241 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1246 __cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
1248 TPL_ASSERT(private);
1250 struct tizen_private tizen_private(private);
1251 tpl_wl_egl_surface_t wl_egl_surface(tizen_private->data);
1252 int cur_w, cur_h, req_w, req_h, format;
1254 if (!wl_egl_surface) {
1255 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1260 format = wl_egl_surface->format;
1261 cur_w = wl_egl_surface->width;
1262 cur_h = wl_egl_surface->height;
1263 req_w = wl_egl_window->width;
1264 req_h = wl_egl_window->height;
1266 TPL_INFO("[WINDOW_RESIZE]",
1267 "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)",
1268 wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h);
1270 if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
1271 != TBM_SURFACE_QUEUE_ERROR_NONE) {
1272 TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue);
1276 /* -- END -- wl_egl_window callback functions */
1278 /* -- BEGIN -- wl_egl_window tizen private callback functions */
1280 /* There is no usecase for using prerotation callback below */
1282 __cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
1284 TPL_ASSERT(private);
1286 struct tizen_private tizen_private(private);
1287 tpl_wl_egl_surface_t wl_egl_surface(tizen_private->data);
1288 int rotation = tizen_private->rotation;
1290 if (!wl_egl_surface) {
1291 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1296 TPL_INFO("[WINDOW_ROTATE]",
1297 "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)",
1298 wl_egl_surface, wl_egl_window,
1299 wl_egl_surface->rotation, rotation);
1301 wl_egl_surface->rotation = rotation;
1304 /* There is no usecase for using prerotation callback below */
1306 __cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
1309 TPL_ASSERT(private);
1311 int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
1312 struct tizen_private tizen_private(private);
1313 tpl_wl_egl_surface_t wl_egl_surface(tizen_private->data);
1315 if (!wl_egl_surface) {
1316 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1318 return rotation_capability;
1321 if (wl_egl_surface->prerotation_capability == TPL_TRUE)
1322 rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
1324 rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
1327 return rotation_capability;
1331 __cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
1332 void *private, unsigned int serial)
1334 TPL_ASSERT(private);
1336 struct tizen_private tizen_private(private);
1337 tpl_wl_egl_surface_t wl_egl_surface(tizen_private->data);
1339 if (!wl_egl_surface) {
1340 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1345 wl_egl_surface->set_serial_is_used = TPL_TRUE;
1346 wl_egl_surface->serial = serial;
1350 __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
1352 TPL_ASSERT(private);
1353 TPL_ASSERT(wl_egl_window);
1355 int commit_sync_fd = -1;
1357 struct tizen_private tizen_private(private);
1358 tpl_wl_egl_surface_t wl_egl_surface(tizen_private->data);
1360 if (!wl_egl_surface) {
1361 TPL_ERR("Invalid parameter. wl_egl_surface(%p) is NULL", wl_egl_surface);
1365 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
1367 if (wl_egl_surface->commit_sync.fd != -1) {
1368 commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
1369 TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
1370 wl_egl_surface->commit_sync.fd, commit_sync_fd);
1371 TPL_LOG_D("[COMMIT_SYNC][DUP]", "wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)",
1372 wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd);
1373 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1374 return commit_sync_fd;
1377 wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
1378 if (wl_egl_surface->commit_sync.fd == -1) {
1379 TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)",
1381 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1385 commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
1387 TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
1388 wl_egl_surface->commit_sync.fd, commit_sync_fd);
1389 TPL_LOG_D("[COMMIT_SYNC][CREATE]", "wl_egl_surface(%p) commit_sync_fd(%d)",
1390 wl_egl_surface, commit_sync_fd);
1392 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1394 return commit_sync_fd;
1398 __cb_client_window_set_frontbuffer_mode(struct wl_egl_window *wl_egl_window,
1399 void *private, int set)
1401 TPL_ASSERT(private);
1402 TPL_ASSERT(wl_egl_window);
1403 struct tizen_private tizen_private(private);
1404 tpl_wl_egl_surface_t wl_egl_surface(tizen_private->data);
1405 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1407 tpl_surface_t *surface = wl_egl_surface->tpl_surface;
1408 TPL_CHECK_ON_NULL_RETURN(surface);
1410 tpl_bool_t is_frontbuffer_mode = set ? TPL_TRUE : TPL_FALSE;
1412 TPL_OBJECT_LOCK(surface);
1413 if (is_frontbuffer_mode == surface->is_frontbuffer_mode) {
1414 TPL_OBJECT_UNLOCK(surface);
1418 TPL_INFO("[FRONTBUFFER_MODE]",
1419 "[%s] wl_egl_surface(%p) wl_egl_window(%p)",
1420 is_frontbuffer_mode ? "ON" : "OFF",
1421 wl_egl_surface, wl_egl_window);
1423 surface->is_frontbuffer_mode = is_frontbuffer_mode;
1425 TPL_OBJECT_UNLOCK(surface);
1428 #if TIZEN_FEATURE_ENABLE
1430 __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
1432 TPL_ASSERT(private);
1433 TPL_ASSERT(wl_egl_window);
1435 int presentation_sync_fd = -1;
1437 struct tizen_private tizen_private(private);
1438 tpl_wl_egl_surface_t wl_egl_surface(tizen_private->data);
1440 if (!wl_egl_surface) {
1441 TPL_ERR("Invalid parameter. wl_egl_surface is NULL");
1445 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1446 if (wl_egl_surface->presentation_sync.fd != -1) {
1447 presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
1448 TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
1449 wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1450 TPL_LOG_D("[PRESENTATION_SYNC][DUP]", "wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
1451 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1452 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1453 return presentation_sync_fd;
1456 wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
1457 if (wl_egl_surface->presentation_sync.fd == -1) {
1458 TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)",
1460 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1464 presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
1465 TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
1466 wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1467 TPL_LOG_D("[PRESENTATION_SYNC][CREATE]", "wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
1468 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1470 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1472 return presentation_sync_fd;
1474 /* -- END -- wl_egl_window tizen private callback functions */
1476 /* -- BEGIN -- tizen_surface_shm_flusher_listener */
1477 static void __cb_tss_flusher_flush_callback(void *data,
1478 struct tizen_surface_shm_flusher *tss_flusher)
1480 tpl_wl_egl_surface_t wl_egl_surface(data);
1481 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1483 TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
1484 wl_egl_surface, wl_egl_surface->tbm_queue);
1486 tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue);
1487 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1488 TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
1493 static void __cb_tss_flusher_free_flush_callback(void *data,
1494 struct tizen_surface_shm_flusher *tss_flusher)
1496 tpl_wl_egl_surface_t wl_egl_surface(data);
1497 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1499 TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
1500 wl_egl_surface, wl_egl_surface->tbm_queue);
1502 tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue);
1503 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1504 TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
1509 static const struct tizen_surface_shm_flusher_listener
1510 tss_flusher_listener = {
1511 __cb_tss_flusher_flush_callback,
1512 __cb_tss_flusher_free_flush_callback
1514 /* -- END -- tizen_surface_shm_flusher_listener */
1517 /* -- BEGIN -- tbm_surface_queue callback funstions */
1519 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1522 tpl_wl_egl_display_t *wl_egl_display = NULL;
1523 tpl_surface_t *surface = NULL;
1524 tpl_bool_t is_activated = TPL_FALSE;
1527 tpl_wl_egl_surface_t wl_egl_surface(data);
1528 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1530 wl_egl_display = wl_egl_surface->wl_egl_display;
1531 TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
1533 surface = wl_egl_surface->tpl_surface;
1534 TPL_CHECK_ON_NULL_RETURN(surface);
1536 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1537 * the changed window size at the next frame. */
1538 width = tbm_surface_queue_get_width(tbm_queue);
1539 height = tbm_surface_queue_get_height(tbm_queue);
1540 if (surface->width != width || surface->height != height) {
1541 TPL_INFO("[QUEUE_RESIZE]",
1542 "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1543 wl_egl_surface, tbm_queue,
1544 surface->width, surface->height, width, height);
1547 /* When queue_reset_callback is called, if is_activated is different from
1548 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1549 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1550 is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client,
1551 wl_egl_surface->tbm_queue);
1552 if (wl_egl_surface->is_activated != is_activated) {
1554 TPL_INFO("[ACTIVATED]",
1555 "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1556 wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
1558 TPL_INFO("[DEACTIVATED]",
1559 " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1560 wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
1564 wl_egl_surface->reset = TPL_TRUE;
1566 if (surface->reset_cb)
1567 surface->reset_cb(surface->reset_data);
1571 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1574 TPL_IGNORE(tbm_queue);
1576 tpl_wl_egl_surface_t wl_egl_surface(data);
1577 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1579 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1580 if (wl_egl_surface->sent_message == NONE_MESSAGE) {
1581 wl_egl_surface->sent_message = ACQUIRABLE;
1582 tpl_gsource_send_message(wl_egl_surface->surf_source,
1583 wl_egl_surface->sent_message);
1585 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1587 /* -- END -- tbm_surface_queue callback funstions */
1590 _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
1592 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1594 TPL_INFO("[SURFACE_FINI]",
1595 "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
1596 wl_egl_surface, wl_egl_surface->wl_egl_window,
1597 wl_egl_surface->wl_surface);
1598 #if TIZEN_FEATURE_ENABLE
1599 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1601 if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) {
1602 while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) {
1603 struct pst_feedback *pst_feedback =
1604 (struct pst_feedback *)__tpl_list_pop_front(
1605 wl_egl_surface->presentation_feedbacks, NULL);
1607 send_signal(pst_feedback->pst_sync_fd, "PST_FEEDBACK");
1608 pst_feedback->pst_sync_fd = -1;
1610 wp_presentation_feedback_destroy(pst_feedback->presentation_feedback);
1611 pst_feedback->presentation_feedback = NULL;
1617 __tpl_list_free(wl_egl_surface->presentation_feedbacks, NULL);
1618 wl_egl_surface->presentation_feedbacks = NULL;
1621 send_signal(wl_egl_surface->presentation_sync.fd, "PST_SYNC");
1622 wl_egl_surface->presentation_sync.fd = -1;
1624 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1626 if (wl_egl_surface->surface_sync) {
1627 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1628 "wl_egl_surface(%p) surface_sync(%p)",
1629 wl_egl_surface, wl_egl_surface->surface_sync);
1630 zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync);
1631 wl_egl_surface->surface_sync = NULL;
1634 if (wl_egl_surface->tss_flusher) {
1635 TPL_INFO("[FLUSHER_DESTROY]",
1636 "wl_egl_surface(%p) tss_flusher(%p)",
1637 wl_egl_surface, wl_egl_surface->tss_flusher);
1638 tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher);
1639 wl_egl_surface->tss_flusher = NULL;
1643 if (wl_egl_surface->tbm_queue) {
1644 TPL_INFO("[TBM_QUEUE_DESTROY]",
1645 "wl_egl_surface(%p) tbm_queue(%p)",
1646 wl_egl_surface, wl_egl_surface->tbm_queue);
1647 tbm_surface_queue_destroy(wl_egl_surface->tbm_queue);
1648 wl_egl_surface->tbm_queue = NULL;
1651 if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
1652 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
1653 __tpl_list_free(wl_egl_surface->vblank->waiting_buffers, NULL);
1654 wl_egl_surface->vblank->waiting_buffers = NULL;
1655 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
1658 if (wl_egl_surface->vblank) {
1659 __tpl_list_remove_data(wl_egl_display->tdm.surface_vblanks,
1660 (void *)wl_egl_surface->vblank,
1662 __cb_surface_vblank_free);
1663 wl_egl_surface->vblank = NULL;
1668 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1670 tpl_wl_egl_surface_t wl_egl_surface(tpl_gsource_get_data(gsource));
1672 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1673 if (message == INIT_SURFACE) { /* Initialize surface */
1674 TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) initialize message received!",
1676 _thread_wl_egl_surface_init(wl_egl_surface);
1677 wl_egl_surface->initialized_in_thread = TPL_TRUE;
1678 tpl_gcond_signal(&wl_egl_surface->surf_cond);
1679 } else if (message == ACQUIRABLE) { /* Acquirable */
1680 TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) acquirable message received!",
1682 _thread_surface_queue_acquire(wl_egl_surface);
1685 wl_egl_surface->sent_message = NONE_MESSAGE;
1687 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1693 __thread_func_surf_finalize(tpl_gsource *gsource)
1695 tpl_wl_egl_surface_t wl_egl_surface(tpl_gsource_get_data(gsource));
1696 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1698 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1699 TPL_LOG_D("[S_FINALIZE]", "wl_egl_surface(%p) tpl_gsource(%p)",
1700 wl_egl_surface, gsource);
1702 _thread_wl_egl_surface_fini(wl_egl_surface);
1704 wl_egl_surface->gsource_finalized = TPL_TRUE;
1706 tpl_gcond_signal(&wl_egl_surface->surf_cond);
1707 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1710 static tpl_gsource_functions surf_funcs = {
1713 .dispatch = __thread_func_surf_dispatch,
1714 .finalize = __thread_func_surf_finalize,
1718 __tpl_wl_egl_surface_init(tpl_surface_t *surface)
1720 tpl_wl_egl_display_t wl_egl_display(surface->display->backend.data);
1721 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1722 tpl_gsource *surf_source = NULL;
1724 struct wl_egl_window *wl_egl_window =
1725 (struct wl_egl_window *)surface->native_handle;
1727 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1728 TPL_ASSERT(surface->native_handle);
1729 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_display, TPL_ERROR_INVALID_PARAMETER);
1731 wl_egl_surface = calloc(1, sizeof(tpl_wl_egl_surface_t));
1732 if (!wl_egl_surface) {
1733 TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
1734 return TPL_ERROR_OUT_OF_MEMORY;
1737 surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface,
1738 -1, FD_TYPE_NONE, &surf_funcs, SOURCE_TYPE_NORMAL);
1740 TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)",
1742 goto surf_source_create_fail;
1745 surface->backend.data = (void *)wl_egl_surface;
1746 surface->width = wl_egl_window->width;
1747 surface->height = wl_egl_window->height;
1748 surface->rotation = 0;
1750 wl_egl_surface->tpl_surface = surface;
1751 wl_egl_surface->width = wl_egl_window->width;
1752 wl_egl_surface->height = wl_egl_window->height;
1753 wl_egl_surface->format = surface->format;
1754 wl_egl_surface->num_buffers = surface->num_buffers;
1756 wl_egl_surface->surf_source = surf_source;
1757 wl_egl_surface->wl_egl_window = wl_egl_window;
1758 wl_egl_surface->wl_surface = wl_egl_window->surface;
1760 wl_egl_surface->wl_egl_display = wl_egl_display;
1762 wl_egl_surface->reset = TPL_FALSE;
1763 wl_egl_surface->is_activated = TPL_FALSE;
1764 wl_egl_surface->need_to_enqueue = TPL_TRUE;
1765 wl_egl_surface->prerotation_capability = TPL_FALSE;
1766 wl_egl_surface->vblank_done = TPL_TRUE;
1767 wl_egl_surface->use_render_done_fence = TPL_FALSE;
1768 wl_egl_surface->set_serial_is_used = TPL_FALSE;
1769 wl_egl_surface->gsource_finalized = TPL_FALSE;
1770 wl_egl_surface->initialized_in_thread = TPL_FALSE;
1771 wl_egl_surface->frontbuffer_activated = TPL_FALSE;
1773 wl_egl_surface->latest_transform = -1;
1774 wl_egl_surface->serial = 0;
1776 wl_egl_surface->vblank = NULL;
1777 #if TIZEN_FEATURE_ENABLE
1778 wl_egl_surface->tss_flusher = NULL;
1779 wl_egl_surface->surface_sync = NULL;
1782 wl_egl_surface->post_interval = surface->post_interval;
1784 wl_egl_surface->vblank_enable = TPL_FALSE;
1786 wl_egl_surface->commit_sync.fd = -1;
1787 wl_egl_surface->presentation_sync.fd = -1;
1789 wl_egl_surface->sent_message = NONE_MESSAGE;
1790 wl_egl_surface->last_enq_buffer = NULL;
1792 wl_egl_surface->buffers = __tpl_list_alloc();
1795 struct tizen_private *tizen_private = NULL;
1797 if (wl_egl_window->driver_private)
1798 tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
1800 tizen_private = tizen_private_create();
1801 wl_egl_window->driver_private = (void *)tizen_private;
1804 if (tizen_private) {
1805 tizen_private->data = (void *)wl_egl_surface;
1806 tizen_private->rotate_callback = (void *)__cb_rotate_callback;
1807 tizen_private->get_rotation_capability = (void *)
1808 __cb_get_rotation_capability;
1809 tizen_private->set_window_serial_callback = (void *)
1810 __cb_set_window_serial_callback;
1811 tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
1812 tizen_private->set_frontbuffer_callback = (void *)__cb_client_window_set_frontbuffer_mode;
1813 #if TIZEN_FEATURE_ENABLE
1814 tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
1816 tizen_private->create_presentation_sync_fd = NULL;
1819 wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
1820 wl_egl_window->resize_callback = (void *)__cb_resize_callback;
1824 tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
1825 tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
1827 tpl_gmutex_init(&wl_egl_surface->buffers_mutex);
1829 tpl_gmutex_init(&wl_egl_surface->surf_mutex);
1830 tpl_gcond_init(&wl_egl_surface->surf_cond);
1832 /* Initialize in thread */
1833 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1834 wl_egl_surface->sent_message = INIT_SURFACE;
1835 tpl_gsource_send_message(wl_egl_surface->surf_source,
1836 wl_egl_surface->sent_message);
1837 while (!wl_egl_surface->initialized_in_thread)
1838 tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
1839 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1841 TPL_ASSERT(wl_egl_surface->tbm_queue);
1843 TPL_INFO("[SURFACE_INIT]",
1844 "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)",
1845 surface, wl_egl_surface, wl_egl_surface->surf_source);
1847 return TPL_ERROR_NONE;
1849 surf_source_create_fail:
1850 free(wl_egl_surface);
1851 surface->backend.data = NULL;
1852 return TPL_ERROR_INVALID_OPERATION;
1855 static tbm_surface_queue_h
1856 _thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface,
1857 struct wayland_tbm_client *wl_tbm_client,
1860 tbm_surface_queue_h tbm_queue = NULL;
1861 tbm_bufmgr bufmgr = NULL;
1862 unsigned int capability;
1864 struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
1865 int width = wl_egl_surface->width;
1866 int height = wl_egl_surface->height;
1867 int format = wl_egl_surface->format;
1869 if (!wl_tbm_client || !wl_surface) {
1870 TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)",
1871 wl_tbm_client, wl_surface);
1875 bufmgr = tbm_bufmgr_init(-1);
1876 capability = tbm_bufmgr_get_capability(bufmgr);
1877 tbm_bufmgr_deinit(bufmgr);
1879 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1880 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1888 tbm_queue = wayland_tbm_client_create_surface_queue(
1898 TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)",
1903 if (tbm_surface_queue_set_modes(
1904 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1905 TBM_SURFACE_QUEUE_ERROR_NONE) {
1906 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1908 tbm_surface_queue_destroy(tbm_queue);
1912 if (tbm_surface_queue_add_reset_cb(
1914 __cb_tbm_queue_reset_callback,
1915 (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1916 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1918 tbm_surface_queue_destroy(tbm_queue);
1922 if (tbm_surface_queue_add_acquirable_cb(
1924 __cb_tbm_queue_acquirable_callback,
1925 (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1926 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1928 tbm_surface_queue_destroy(tbm_queue);
1935 static tdm_client_vblank*
1936 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1938 tdm_client_vblank *tdm_vblank = NULL;
1939 tdm_client_output *tdm_output = NULL;
1940 tdm_error tdm_err = TDM_ERROR_NONE;
1943 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1947 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1948 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1949 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1953 tdm_vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1954 if (!tdm_vblank || tdm_err != TDM_ERROR_NONE) {
1955 TPL_ERR("Failed to create tdm_vblank. tdm_err(%d)", tdm_err);
1959 tdm_err = tdm_client_handle_pending_events(tdm_client);
1960 if (tdm_err != TDM_ERROR_NONE) {
1961 TPL_ERR("Failed to handle pending events. tdm_err(%d)", tdm_err);
1964 tdm_client_vblank_set_enable_fake(tdm_vblank, 1);
1965 tdm_client_vblank_set_sync(tdm_vblank, 0);
1971 __cb_surface_vblank_free(void *data)
1973 TPL_CHECK_ON_NULL_RETURN(data);
1975 tpl_surface_vblank_t *vblank = (tpl_surface_vblank_t *)data;
1976 tpl_wl_egl_surface_t *wl_egl_surface = vblank->wl_egl_surface;
1978 TPL_INFO("[VBLANK_DESTROY]",
1979 "wl_egl_surface(%p) surface_vblank(%p) tdm_vblank(%p)",
1980 wl_egl_surface, vblank,
1981 vblank->tdm_vblank);
1983 tdm_client_vblank_destroy(vblank->tdm_vblank);
1984 vblank->tdm_vblank = NULL;
1985 vblank->wl_egl_surface = NULL;
1986 tpl_gmutex_clear(&vblank->mutex);
1990 wl_egl_surface->vblank = NULL;
1994 _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
1996 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1997 tpl_surface_vblank_t *vblank = NULL;
1999 wl_egl_surface->tbm_queue = _thread_create_tbm_queue(
2001 wl_egl_display->wl_tbm_client,
2002 wl_egl_surface->num_buffers);
2003 if (!wl_egl_surface->tbm_queue) {
2004 TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)",
2005 wl_egl_surface, wl_egl_display->wl_tbm_client);
2009 TPL_INFO("[QUEUE_CREATION][1/2]",
2010 "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)",
2011 wl_egl_surface, wl_egl_surface->wl_surface,
2012 wl_egl_display->wl_tbm_client);
2013 TPL_INFO("[QUEUE_CREATION][2/2]",
2014 "wl_egl_surface(%p) tbm_queue(%p) size(%d x %d) X %d format(%d)",
2016 wl_egl_surface->tbm_queue,
2017 wl_egl_surface->width,
2018 wl_egl_surface->height,
2019 wl_egl_surface->num_buffers,
2020 wl_egl_surface->format);
2022 if (wl_egl_display->use_wait_vblank) {
2023 vblank = (tpl_surface_vblank_t *)calloc(1, sizeof(tpl_surface_vblank_t));
2025 vblank->tdm_vblank = _thread_create_tdm_client_vblank(
2026 wl_egl_display->tdm.tdm_client);
2027 if (!vblank->tdm_vblank) {
2028 TPL_ERR("Failed to create tdm_vblank from tdm_client(%p)",
2029 wl_egl_display->tdm.tdm_client);
2033 vblank->waiting_buffers = __tpl_list_alloc();
2034 if (!vblank->waiting_buffers) {
2035 tdm_client_vblank_destroy(vblank->tdm_vblank);
2039 vblank->wl_egl_surface = wl_egl_surface;
2040 tpl_gmutex_init(&vblank->mutex);
2042 __tpl_list_push_back(wl_egl_display->tdm.surface_vblanks,
2045 TPL_INFO("[VBLANK_INIT]",
2046 "wl_egl_surface(%p) tdm_client(%p) tdm_vblank(%p)",
2047 wl_egl_surface, wl_egl_display->tdm.tdm_client,
2048 vblank->tdm_vblank);
2054 wl_egl_surface->vblank = vblank;
2055 wl_egl_surface->vblank_enable = (vblank != NULL &&
2056 wl_egl_surface->post_interval > 0);
2058 #if TIZEN_FEATURE_ENABLE
2059 if (wl_egl_display->tss) {
2060 wl_egl_surface->tss_flusher =
2061 tizen_surface_shm_get_flusher(wl_egl_display->tss,
2062 wl_egl_surface->wl_surface);
2065 if (wl_egl_surface->tss_flusher) {
2066 tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher,
2067 &tss_flusher_listener,
2069 TPL_INFO("[FLUSHER_INIT]",
2070 "wl_egl_surface(%p) tss_flusher(%p)",
2071 wl_egl_surface, wl_egl_surface->tss_flusher);
2074 if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) {
2075 wl_egl_surface->surface_sync =
2076 zwp_linux_explicit_synchronization_v1_get_synchronization(
2077 wl_egl_display->explicit_sync, wl_egl_surface->wl_surface);
2078 if (wl_egl_surface->surface_sync) {
2079 TPL_INFO("[EXPLICIT_SYNC_INIT]",
2080 "wl_egl_surface(%p) surface_sync(%p)",
2081 wl_egl_surface, wl_egl_surface->surface_sync);
2083 TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)",
2085 wl_egl_display->use_explicit_sync = TPL_FALSE;
2089 wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
2093 _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
2095 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2096 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2097 tpl_bool_t need_to_release = TPL_FALSE;
2098 tpl_bool_t need_to_cancel = TPL_FALSE;
2099 buffer_status_t status = RELEASED;
2103 tpl_gthread_pause_in_idle(wl_egl_display->thread);
2105 buffer_cnt = __tpl_list_get_count(wl_egl_surface->buffers);
2107 while (!__tpl_list_is_empty(wl_egl_surface->buffers)) {
2108 tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_pop_front(wl_egl_surface->buffers,
2111 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2113 status = wl_egl_buffer->status;
2115 TPL_INFO("[BUFFER_CLEAR]",
2116 "[%d/%d] wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
2117 ++idx, buffer_cnt, wl_egl_surface, wl_egl_buffer,
2118 wl_egl_buffer->tbm_surface,
2119 status_to_string[status]);
2121 if (status >= ENQUEUED) {
2122 tpl_result_t wait_result = TPL_ERROR_NONE;
2124 while (status < COMMITTED && wait_result != TPL_ERROR_TIME_OUT) {
2125 tpl_gthread_continue(wl_egl_display->thread);
2126 wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond,
2127 &wl_egl_buffer->mutex,
2129 tpl_gthread_pause_in_idle(wl_egl_display->thread);
2130 status = wl_egl_buffer->status; /* update status */
2132 if (wait_result == TPL_ERROR_TIME_OUT) {
2133 TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p) status(%s)",
2134 wl_egl_buffer, status_to_string[status]);
2139 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
2140 /* It has been acquired but has not yet been released, so this
2141 * buffer must be released. */
2142 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
2144 /* After dequeue, it has not been enqueued yet
2145 * so cancel_dequeue must be performed. */
2146 need_to_cancel = (status == DEQUEUED);
2148 if (need_to_release) {
2149 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2150 wl_egl_buffer->tbm_surface);
2151 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2152 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
2153 wl_egl_buffer->tbm_surface, tsq_err);
2156 if (need_to_cancel) {
2157 tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
2158 wl_egl_buffer->tbm_surface);
2159 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2160 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
2161 wl_egl_buffer->tbm_surface, tsq_err);
2164 wl_egl_buffer->status = RELEASED;
2166 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2168 if (need_to_release || need_to_cancel || status == ENQUEUED)
2169 tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
2172 tpl_gthread_continue(wl_egl_display->thread);
2176 __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
2178 tpl_wl_egl_display_t *wl_egl_display = NULL;
2180 TPL_ASSERT(surface);
2181 TPL_ASSERT(surface->display);
2183 TPL_CHECK_ON_FALSE_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW);
2185 tpl_wl_egl_surface_t wl_egl_surface(surface->backend.data);
2186 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
2188 wl_egl_display = wl_egl_surface->wl_egl_display;
2189 TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
2191 TPL_INFO("[SURFACE_FINI][BEGIN]",
2192 "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
2194 wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
2196 _tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
2198 if (wl_egl_surface->surf_source) {
2199 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2200 // Send destroy mesage to thread
2201 tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
2202 /* This is a protection to prevent problems that arise in unexpected situations
2203 * that g_cond_wait cannot work normally.
2204 * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
2205 * caller should use tpl_gcond_wait() in the loop with checking finalized flag
2207 while (!wl_egl_surface->gsource_finalized) {
2208 tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
2210 wl_egl_surface->surf_source = NULL;
2211 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2214 if (wl_egl_surface->wl_egl_window) {
2215 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2216 struct tizen_private tizen_private(wl_egl_window->driver_private);
2217 TPL_INFO("[WL_EGL_WINDOW_FINI]",
2218 "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
2219 wl_egl_surface, wl_egl_window,
2220 wl_egl_surface->wl_surface);
2222 if (tizen_private) {
2223 tizen_private->set_window_serial_callback = NULL;
2224 tizen_private->rotate_callback = NULL;
2225 tizen_private->get_rotation_capability = NULL;
2226 tizen_private->create_presentation_sync_fd = NULL;
2227 tizen_private->create_commit_sync_fd = NULL;
2228 tizen_private->set_frontbuffer_callback = NULL;
2229 tizen_private->merge_sync_fds = NULL;
2230 tizen_private->data = NULL;
2231 free(tizen_private);
2233 wl_egl_window->driver_private = NULL;
2236 wl_egl_window->destroy_window_callback = NULL;
2237 wl_egl_window->resize_callback = NULL;
2239 wl_egl_surface->wl_egl_window = NULL;
2242 wl_egl_surface->last_enq_buffer = NULL;
2244 wl_egl_surface->wl_surface = NULL;
2245 wl_egl_surface->wl_egl_display = NULL;
2246 wl_egl_surface->tpl_surface = NULL;
2248 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
2249 __tpl_list_free(wl_egl_surface->buffers, NULL);
2250 wl_egl_surface->buffers = NULL;
2251 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
2252 tpl_gmutex_clear(&wl_egl_surface->buffers_mutex);
2254 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
2255 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
2256 tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex);
2258 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2259 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2260 tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex);
2262 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2263 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2264 tpl_gmutex_clear(&wl_egl_surface->surf_mutex);
2265 tpl_gcond_clear(&wl_egl_surface->surf_cond);
2267 TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface);
2269 free(wl_egl_surface);
2270 surface->backend.data = NULL;
2274 __tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
2277 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
2279 tpl_wl_egl_surface_t wl_egl_surface(surface->backend.data);
2281 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
2283 TPL_INFO("[SET_PREROTATION_CAPABILITY]",
2284 "wl_egl_surface(%p) prerotation capability set to [%s]",
2285 wl_egl_surface, (set ? "TRUE" : "FALSE"));
2287 wl_egl_surface->prerotation_capability = set;
2288 return TPL_ERROR_NONE;
2292 __tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
2295 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
2297 tpl_wl_egl_surface_t wl_egl_surface(surface->backend.data);
2299 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
2301 TPL_INFO("[SET_POST_INTERVAL]",
2302 "wl_egl_surface(%p) post_interval(%d -> %d)",
2303 wl_egl_surface, wl_egl_surface->post_interval, post_interval);
2305 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2306 wl_egl_surface->post_interval = post_interval;
2307 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2309 return TPL_ERROR_NONE;
2313 __tpl_wl_egl_surface_validate(tpl_surface_t *surface)
2315 tpl_bool_t retval = TPL_TRUE;
2317 TPL_ASSERT(surface);
2318 TPL_ASSERT(surface->backend.data);
2320 tpl_wl_egl_surface_t wl_egl_surface(surface->backend.data);
2322 retval = !(wl_egl_surface->reset);
2328 __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
2330 tpl_wl_egl_surface_t wl_egl_surface(surface->backend.data);
2333 *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
2335 *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
2339 __tpl_wl_egl_surface_fence_sync_is_available(tpl_surface_t *surface)
2341 tpl_wl_egl_surface_t wl_egl_surface(surface->backend.data);
2343 return !wl_egl_surface->frontbuffer_activated;
2346 #define CAN_DEQUEUE_TIMEOUT_MS 10000
2349 _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
2351 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2353 _print_buffer_lists(wl_egl_surface);
2355 if (wl_egl_surface->vblank) {
2356 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
2358 if (wl_egl_surface->vblank->waiting_buffers)
2359 __tpl_list_fini(wl_egl_surface->vblank->waiting_buffers, NULL);
2361 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
2364 if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue))
2365 != TBM_SURFACE_QUEUE_ERROR_NONE) {
2366 TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
2367 wl_egl_surface->tbm_queue, tsq_err);
2368 return TPL_ERROR_INVALID_OPERATION;
2371 while (!__tpl_list_is_empty(wl_egl_surface->buffers)) {
2372 tpl_bool_t need_to_release = TPL_FALSE;
2373 tpl_wl_egl_buffer_t wl_egl_buffer(
2374 __tpl_list_pop_front(wl_egl_surface->buffers, NULL));
2375 need_to_release = (wl_egl_buffer->status >= ACQUIRED) &&
2376 (wl_egl_buffer->status <= COMMITTED);
2378 if (need_to_release) {
2379 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2380 wl_egl_buffer->tbm_surface);
2381 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2382 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
2383 wl_egl_buffer->tbm_surface, tsq_err);
2384 tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
2388 TPL_INFO("[FORCE_FLUSH]",
2389 "wl_egl_surface(%p) tbm_queue(%p)",
2390 wl_egl_surface, wl_egl_surface->tbm_queue);
2392 _print_buffer_lists(wl_egl_surface);
2394 return TPL_ERROR_NONE;
2398 _wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer,
2399 tpl_wl_egl_surface_t *wl_egl_surface)
2401 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2402 struct tizen_private tizen_private(wl_egl_window->driver_private);
2404 TPL_ASSERT(tizen_private);
2406 wl_egl_buffer->draw_done = TPL_FALSE;
2407 wl_egl_buffer->need_to_commit = TPL_TRUE;
2408 #if TIZEN_FEATURE_ENABLE
2409 wl_egl_buffer->buffer_release = NULL;
2411 wl_egl_buffer->transform = tizen_private->transform;
2413 if (wl_egl_buffer->w_transform != tizen_private->window_transform) {
2414 wl_egl_buffer->w_transform = tizen_private->window_transform;
2415 wl_egl_buffer->w_rotated = TPL_TRUE;
2418 if (wl_egl_surface->set_serial_is_used) {
2419 wl_egl_buffer->serial = wl_egl_surface->serial;
2421 wl_egl_buffer->serial = ++tizen_private->serial;
2424 if (wl_egl_buffer->rects) {
2425 free(wl_egl_buffer->rects);
2426 wl_egl_buffer->rects = NULL;
2427 wl_egl_buffer->num_rects = 0;
2431 static tpl_wl_egl_buffer_t *
2432 _get_wl_egl_buffer(tbm_surface_h tbm_surface)
2434 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2435 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2436 (void **)&wl_egl_buffer);
2437 return wl_egl_buffer;
2440 static tpl_wl_egl_buffer_t *
2441 _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
2442 tbm_surface_h tbm_surface)
2444 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2445 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2447 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2449 if (!wl_egl_buffer) {
2450 wl_egl_buffer = calloc(1, sizeof(tpl_wl_egl_buffer_t));
2451 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL);
2453 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2454 (tbm_data_free)__cb_wl_egl_buffer_free);
2455 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2458 wl_egl_buffer->wl_buffer = NULL;
2459 wl_egl_buffer->tbm_surface = tbm_surface;
2460 wl_egl_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
2461 wl_egl_buffer->wl_egl_surface = wl_egl_surface;
2463 wl_egl_buffer->status = RELEASED;
2465 wl_egl_buffer->acquire_fence_fd = -1;
2466 wl_egl_buffer->commit_sync_fd = -1;
2467 wl_egl_buffer->presentation_sync_fd = -1;
2468 wl_egl_buffer->release_fence_fd = -1;
2470 wl_egl_buffer->dx = wl_egl_window->dx;
2471 wl_egl_buffer->dy = wl_egl_window->dy;
2472 wl_egl_buffer->width = tbm_surface_get_width(tbm_surface);
2473 wl_egl_buffer->height = tbm_surface_get_height(tbm_surface);
2475 wl_egl_buffer->w_transform = -1;
2477 tpl_gmutex_init(&wl_egl_buffer->mutex);
2478 tpl_gcond_init(&wl_egl_buffer->cond);
2480 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
2481 __tpl_list_push_back(wl_egl_surface->buffers, (void *)wl_egl_buffer);
2482 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
2484 TPL_INFO("[WL_EGL_BUFFER_CREATE]",
2485 "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2486 wl_egl_surface, wl_egl_buffer, tbm_surface,
2487 wl_egl_buffer->bo_name);
2490 _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface);
2492 return wl_egl_buffer;
2495 static tbm_surface_h
2496 __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
2497 int32_t *release_fence)
2499 TPL_ASSERT(surface->backend.data);
2500 TPL_ASSERT(surface->display);
2501 TPL_ASSERT(surface->display->backend.data);
2503 tpl_wl_egl_surface_t wl_egl_surface(surface->backend.data);
2504 tpl_wl_egl_display_t wl_egl_display(surface->display->backend.data);
2505 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2507 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2509 tbm_surface_h tbm_surface = NULL;
2511 TPL_OBJECT_UNLOCK(surface);
2512 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2513 if (wl_egl_surface->reset == TPL_TRUE) {
2514 if (_check_buffer_validate(wl_egl_surface, wl_egl_surface->last_enq_buffer) &&
2515 tbm_surface_internal_is_valid(wl_egl_surface->last_enq_buffer)) {
2516 tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer;
2517 tpl_wl_egl_buffer_t *enqueued_buffer =
2518 _get_wl_egl_buffer(last_enq_buffer);
2520 if (enqueued_buffer) {
2521 tbm_surface_internal_ref(last_enq_buffer);
2522 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2523 tpl_gmutex_lock(&enqueued_buffer->mutex);
2524 while (enqueued_buffer->status >= ENQUEUED &&
2525 enqueued_buffer->status < COMMITTED) {
2526 tpl_result_t wait_result;
2527 TPL_INFO("[DEQ_AFTER_RESET]",
2528 "wl_egl_surface(%p) waiting for previous wl_egl_buffer(%p) commit",
2529 wl_egl_surface, enqueued_buffer);
2531 wait_result = tpl_gcond_timed_wait(&enqueued_buffer->cond,
2532 &enqueued_buffer->mutex,
2534 if (wait_result == TPL_ERROR_TIME_OUT) {
2535 TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
2540 tpl_gmutex_unlock(&enqueued_buffer->mutex);
2541 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2542 tbm_surface_internal_unref(last_enq_buffer);
2546 wl_egl_surface->last_enq_buffer = NULL;
2548 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2550 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2551 wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
2552 TPL_OBJECT_LOCK(surface);
2555 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2556 TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
2557 wl_egl_surface->tbm_queue, surface);
2559 tpl_gthread_pause_in_idle(wl_egl_display->thread);
2560 /* Locking wl_event_mutex is a secondary means of preparing for
2561 * the failure of tpl_gthread_pause_in_idle().
2562 * If tpl_gthread_pause_in_idle()is successful,
2563 * locking wl_event_mutex does not affect. */
2564 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
2565 if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
2566 TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
2567 wl_egl_surface->tbm_queue, surface);
2568 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2569 tpl_gthread_continue(wl_egl_display->thread);
2572 tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2575 wl_egl_surface->vblank_done = TPL_TRUE;
2577 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2578 tpl_gthread_continue(wl_egl_display->thread);
2581 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2582 TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
2583 wl_egl_surface->tbm_queue, surface);
2587 /* After the can dequeue state, lock the wl_event_mutex to prevent other
2588 * events from being processed in wayland_egl_thread
2589 * during below dequeue procedure. */
2590 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
2592 surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
2593 surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
2594 wl_egl_surface->width = surface->width;
2595 wl_egl_surface->height = surface->height;
2598 /* If surface->frontbuffer is not null, the frontbuffer rendering mode will be
2599 * maintained if the surface state meets the conditions below.
2600 * 1. surface->is_frontbuffer_mode == TPL_TRUE
2601 * - It may be changed to true or false by calling
2602 * tpl_surface_set_frontbuffer_mode(will be deprecated)
2604 * wl_egl_window_tizen_set_frontbuffer_mode (recommanded)
2605 * 2. is_activated == TPL_TRUE
2606 * - To check wheter direct display is possible.
2607 * 3. wl_egl_surface->reset == TPL_FALSE
2608 * - tbm_queue reset should not have occured due to window resize.
2609 * If surface is not satisfied with any of above conditions,
2610 * frontbuffer rendering will be stopped and surface->frontbuffer becomes null.
2612 if (surface->frontbuffer) {
2613 if (!surface->is_frontbuffer_mode ||
2614 !wl_egl_surface->is_activated ||
2615 wl_egl_surface->reset) {
2616 surface->frontbuffer = NULL;
2617 wl_egl_surface->need_to_enqueue = TPL_TRUE;
2618 wl_egl_surface->frontbuffer_activated = TPL_FALSE;
2619 TPL_INFO("[FRONTBUFFER_RENDERING_STOP]",
2620 "wl_egl_surface(%p) wl_egl_window(%p)",
2621 wl_egl_surface, wl_egl_surface->wl_egl_window);
2623 bo_name = _get_tbm_surface_bo_name(surface->frontbuffer);
2625 "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
2626 surface->frontbuffer, bo_name);
2627 TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer,
2628 "[DEQ]~[ENQ] BO_NAME:%d",
2630 wl_egl_surface->frontbuffer_activated = TPL_TRUE;
2631 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2632 return surface->frontbuffer;
2636 tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue,
2639 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d",
2640 wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err);
2641 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2645 tbm_surface_internal_ref(tbm_surface);
2647 wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
2648 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
2650 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2651 wl_egl_buffer->status = DEQUEUED;
2653 /* If wl_egl_buffer->release_fence_fd is -1,
2654 * the tbm_surface can be used immediately.
2655 * If not, user(EGL) have to wait until signaled. */
2656 if (release_fence) {
2657 #if TIZEN_FEATURE_ENABLE
2658 if (wl_egl_display->use_explicit_sync) {
2659 *release_fence = wl_egl_buffer->release_fence_fd;
2660 TPL_LOG_D("[EXPLICIT_FENCE]", "wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)",
2661 wl_egl_surface, wl_egl_buffer, *release_fence);
2663 wl_egl_buffer->release_fence_fd = -1;
2667 *release_fence = -1;
2671 if (surface->is_frontbuffer_mode && wl_egl_surface->is_activated) {
2672 if (surface->frontbuffer == NULL) {
2673 TPL_INFO("[FRONTBUFFER_RENDERING_START]",
2674 "wl_egl_surface(%p) wl_egl_window(%p) bo(%d)",
2675 wl_egl_surface, wl_egl_surface->wl_egl_window,
2676 _get_tbm_surface_bo_name(tbm_surface));
2678 surface->frontbuffer = tbm_surface;
2681 wl_egl_surface->reset = TPL_FALSE;
2683 TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name);
2684 TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d",
2685 wl_egl_buffer->bo_name);
2686 TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2687 wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name,
2688 release_fence ? *release_fence : -1);
2690 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2691 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2697 __tpl_wl_egl_surface_cancel_buffer(tpl_surface_t *surface,
2698 tbm_surface_h tbm_surface)
2700 TPL_ASSERT(surface);
2701 TPL_ASSERT(surface->backend.data);
2703 tpl_wl_egl_surface_t wl_egl_surface(surface->backend.data);
2704 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2705 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2707 if (!tbm_surface_internal_is_valid(tbm_surface)) {
2708 TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
2709 return TPL_ERROR_INVALID_PARAMETER;
2712 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2713 if (wl_egl_buffer) {
2714 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2715 wl_egl_buffer->status = RELEASED;
2716 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2719 tbm_surface_internal_unref(tbm_surface);
2721 tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
2723 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2724 TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
2725 tbm_surface, surface);
2726 return TPL_ERROR_INVALID_OPERATION;
2729 TPL_INFO("[CANCEL_BUFFER]", "wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
2730 wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
2732 return TPL_ERROR_NONE;
2736 __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
2737 tbm_surface_h tbm_surface,
2738 int num_rects, const int *rects, int32_t acquire_fence)
2740 TPL_ASSERT(surface);
2741 TPL_ASSERT(surface->display);
2742 TPL_ASSERT(surface->backend.data);
2743 TPL_ASSERT(tbm_surface);
2744 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2746 tpl_wl_egl_surface_t wl_egl_surface(surface->backend.data);
2747 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2748 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2751 if (!tbm_surface_internal_is_valid(tbm_surface)) {
2752 TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
2754 TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2755 return TPL_ERROR_INVALID_PARAMETER;
2758 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2759 if (!wl_egl_buffer) {
2760 TPL_ERR("Failed to get wl_egl_buffer from tbm_surface(%p)", tbm_surface);
2761 return TPL_ERROR_INVALID_PARAMETER;
2764 bo_name = _get_tbm_surface_bo_name(tbm_surface);
2766 TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
2768 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2770 /* If there are received region information, save it to wl_egl_buffer */
2771 if (num_rects && rects) {
2772 if (wl_egl_buffer->rects != NULL) {
2773 free(wl_egl_buffer->rects);
2774 wl_egl_buffer->rects = NULL;
2775 wl_egl_buffer->num_rects = 0;
2778 wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2779 wl_egl_buffer->num_rects = num_rects;
2781 if (!wl_egl_buffer->rects) {
2782 TPL_ERR("Failed to allocate memory fo damage rects info.");
2783 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2784 return TPL_ERROR_OUT_OF_MEMORY;
2787 memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects);
2790 if (!wl_egl_surface->need_to_enqueue ||
2791 !wl_egl_buffer->need_to_commit) {
2793 if (acquire_fence != -1) {
2794 close(acquire_fence);
2797 TPL_LOG_T("FRONTBUFFER_MODE", "[ENQ_SKIP] tbm_surface(%p) bo(%d) need not to enqueue",
2798 tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
2799 TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2800 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2801 return TPL_ERROR_NONE;
2804 /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
2805 * commit if surface->frontbuffer that is already set and the tbm_surface
2806 * client want to enqueue are the same.
2808 if (surface->is_frontbuffer_mode) {
2809 /* The first buffer to be activated in frontbuffer mode must be
2810 * committed. Subsequence frames do not need to be committed because
2811 * the buffer is already displayed.
2813 if (surface->frontbuffer == tbm_surface)
2814 wl_egl_surface->need_to_enqueue = TPL_FALSE;
2817 if (wl_egl_buffer->acquire_fence_fd != -1)
2818 close(wl_egl_buffer->acquire_fence_fd);
2820 wl_egl_buffer->acquire_fence_fd = acquire_fence;
2822 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2823 if (wl_egl_surface->presentation_sync.fd != -1) {
2824 wl_egl_buffer->presentation_sync_fd = wl_egl_surface->presentation_sync.fd;
2825 wl_egl_surface->presentation_sync.fd = -1;
2827 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2829 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
2830 if (wl_egl_surface->commit_sync.fd != -1) {
2831 wl_egl_buffer->commit_sync_fd = wl_egl_surface->commit_sync.fd;
2832 wl_egl_surface->commit_sync.fd = -1;
2833 TRACE_ASYNC_BEGIN(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
2834 _get_tbm_surface_bo_name(tbm_surface));
2836 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
2838 wl_egl_buffer->status = ENQUEUED;
2840 "[ENQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2841 wl_egl_buffer, tbm_surface, bo_name, acquire_fence);
2843 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2845 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2846 wl_egl_surface->last_enq_buffer = tbm_surface;
2847 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2849 tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
2851 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2852 tbm_surface_internal_unref(tbm_surface);
2853 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d",
2854 tbm_surface, wl_egl_surface, tsq_err);
2855 TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2856 return TPL_ERROR_INVALID_OPERATION;
2859 tbm_surface_internal_unref(tbm_surface);
2861 TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2863 return TPL_ERROR_NONE;
2867 __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message)
2869 tpl_wl_egl_buffer_t wl_egl_buffer(tpl_gsource_get_data(gsource));
2870 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, TPL_FALSE);
2872 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2873 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_FALSE);
2875 tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
2876 TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_FALSE);
2877 TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface), TPL_FALSE);
2879 TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
2880 wl_egl_buffer->acquire_fence_fd);
2882 TPL_LOG_D("[RENDER DONE]", "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p)",
2883 wl_egl_surface, wl_egl_buffer, tbm_surface);
2885 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2886 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2888 wl_egl_buffer->status = WAITING_VBLANK;
2890 TPL_LOG_D("[FINALIZE]", "wl_egl_surface(%p) wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
2891 wl_egl_surface, wl_egl_buffer, wl_egl_buffer->waiting_source,
2892 wl_egl_buffer->acquire_fence_fd);
2894 wl_egl_buffer->acquire_fence_fd = -1;
2895 wl_egl_buffer->waiting_source = NULL;
2897 if (!wl_egl_surface->vblank_enable || wl_egl_surface->vblank_done) {
2898 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2899 tpl_gcond_signal(&wl_egl_buffer->cond);
2901 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
2902 __tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers,
2904 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
2907 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2908 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2914 __thread_func_waiting_source_finalize(tpl_gsource *gsource)
2916 TPL_IGNORE(gsource);
2919 static tpl_gsource_functions buffer_funcs = {
2922 .dispatch = __thread_func_waiting_source_dispatch,
2923 .finalize = __thread_func_waiting_source_finalize,
2927 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
2929 tbm_surface_h tbm_surface = NULL;
2930 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2931 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2932 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2933 tpl_bool_t ready_to_commit = TPL_FALSE;
2935 while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) {
2936 tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue,
2938 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2939 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2940 wl_egl_surface->tbm_queue);
2941 return TPL_ERROR_INVALID_OPERATION;
2944 tbm_surface_internal_ref(tbm_surface);
2946 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2947 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
2948 "wl_egl_buffer sould be not NULL");
2950 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2952 wl_egl_buffer->status = ACQUIRED;
2954 TPL_LOG_T("WL_EGL", "[ACQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2955 wl_egl_buffer, tbm_surface,
2956 _get_tbm_surface_bo_name(tbm_surface));
2958 if (wl_egl_buffer->acquire_fence_fd != -1) {
2959 #if TIZEN_FEATURE_ENABLE
2960 if (wl_egl_display->use_explicit_sync)
2961 ready_to_commit = TPL_TRUE;
2965 if (wl_egl_buffer->waiting_source) {
2966 tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
2967 wl_egl_buffer->waiting_source = NULL;
2970 wl_egl_buffer->waiting_source =
2971 tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
2972 wl_egl_buffer->acquire_fence_fd,
2973 FD_TYPE_FENCE, &buffer_funcs,
2974 SOURCE_TYPE_DISPOSABLE);
2975 wl_egl_buffer->status = WAITING_SIGNALED;
2977 TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
2978 wl_egl_buffer->acquire_fence_fd);
2980 ready_to_commit = TPL_FALSE;
2983 ready_to_commit = TPL_TRUE;
2986 if (ready_to_commit) {
2987 if (!wl_egl_surface->vblank_enable || wl_egl_surface->vblank_done)
2988 ready_to_commit = TPL_TRUE;
2990 wl_egl_buffer->status = WAITING_VBLANK;
2991 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
2992 __tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers, wl_egl_buffer);
2993 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
2994 ready_to_commit = TPL_FALSE;
2998 if (ready_to_commit) {
2999 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
3000 tpl_gcond_signal(&wl_egl_buffer->cond);
3003 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3006 return TPL_ERROR_NONE;
3009 /* -- BEGIN -- tdm_client vblank callback function */
3011 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
3012 unsigned int sequence, unsigned int tv_sec,
3013 unsigned int tv_usec, void *user_data)
3015 tpl_wl_egl_surface_t wl_egl_surface(user_data);
3017 TRACE_ASYNC_END((intptr_t)wl_egl_surface, "WAIT_VBLANK");
3018 TPL_LOG_D("[VBLANK_DONE]", "wl_egl_surface(%p)", wl_egl_surface);
3020 if (error == TDM_ERROR_TIMEOUT)
3021 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)",
3024 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
3025 wl_egl_surface->vblank_done = TPL_TRUE;
3027 if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
3028 tpl_bool_t is_empty = TPL_TRUE;
3030 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
3031 tpl_wl_egl_buffer_t wl_egl_buffer(
3032 __tpl_list_pop_front( wl_egl_surface->vblank->waiting_buffers, NULL));
3033 is_empty = __tpl_list_is_empty(wl_egl_surface->vblank->waiting_buffers);
3034 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
3036 if (!wl_egl_buffer) break;
3038 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3039 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
3040 tpl_gcond_signal(&wl_egl_buffer->cond);
3041 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3043 /* If tdm error such as TIMEOUT occured,
3044 * flush all vblank waiting buffers of its wl_egl_surface.
3045 * Otherwise, only one wl_egl_buffer will be commited per one vblank event.
3047 if (error == TDM_ERROR_NONE && wl_egl_surface->post_interval > 0)
3049 } while (!is_empty);
3051 wl_egl_surface->vblank_enable = (wl_egl_surface->post_interval > 0);
3053 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
3055 /* -- END -- tdm_client vblank callback function */
3057 #if TIZEN_FEATURE_ENABLE
3059 __cb_buffer_fenced_release(void *data,
3060 struct zwp_linux_buffer_release_v1 *release, int32_t fence)
3062 tpl_wl_egl_buffer_t wl_egl_buffer(data);
3063 tbm_surface_h tbm_surface = NULL;
3065 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
3067 tbm_surface = wl_egl_buffer->tbm_surface;
3069 if (tbm_surface_internal_is_valid(tbm_surface)) {
3070 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
3072 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3073 if (wl_egl_buffer->status == COMMITTED) {
3074 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3076 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
3077 wl_egl_buffer->buffer_release = NULL;
3079 wl_egl_buffer->release_fence_fd = fence;
3080 wl_egl_buffer->status = RELEASED;
3082 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
3083 _get_tbm_surface_bo_name(tbm_surface),
3085 TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3086 _get_tbm_surface_bo_name(tbm_surface));
3089 "[FENCED_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
3090 wl_egl_buffer, tbm_surface,
3091 _get_tbm_surface_bo_name(tbm_surface),
3094 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
3096 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
3097 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
3100 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3102 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
3103 tbm_surface_internal_unref(tbm_surface);
3106 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
3111 __cb_buffer_immediate_release(void *data,
3112 struct zwp_linux_buffer_release_v1 *release)
3114 tpl_wl_egl_buffer_t wl_egl_buffer(data);
3115 tbm_surface_h tbm_surface = NULL;
3117 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
3119 tbm_surface = wl_egl_buffer->tbm_surface;
3121 if (tbm_surface_internal_is_valid(tbm_surface)) {
3122 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
3124 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3125 if (wl_egl_buffer->status == COMMITTED) {
3126 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3128 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
3129 wl_egl_buffer->buffer_release = NULL;
3131 wl_egl_buffer->release_fence_fd = -1;
3132 wl_egl_buffer->status = RELEASED;
3134 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
3135 _get_tbm_surface_bo_name(tbm_surface));
3136 TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3137 _get_tbm_surface_bo_name(tbm_surface));
3140 "[IMMEDIATE_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
3141 wl_egl_buffer, tbm_surface,
3142 _get_tbm_surface_bo_name(tbm_surface));
3144 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
3146 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
3147 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
3150 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3152 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
3153 tbm_surface_internal_unref(tbm_surface);
3156 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
3160 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
3161 __cb_buffer_fenced_release,
3162 __cb_buffer_immediate_release,
3167 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
3169 tpl_wl_egl_buffer_t wl_egl_buffer(data);
3170 tbm_surface_h tbm_surface = NULL;
3172 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer)
3174 tbm_surface = wl_egl_buffer->tbm_surface;
3176 if (tbm_surface_internal_is_valid(tbm_surface)) {
3177 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3178 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
3180 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3182 if (wl_egl_buffer->status == COMMITTED) {
3184 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
3186 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
3187 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
3189 wl_egl_buffer->status = RELEASED;
3191 TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
3192 TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3193 _get_tbm_surface_bo_name(tbm_surface));
3195 TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
3196 wl_egl_buffer->wl_buffer, tbm_surface,
3197 _get_tbm_surface_bo_name(tbm_surface));
3200 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3202 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
3203 tbm_surface_internal_unref(tbm_surface);
3205 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
3209 static const struct wl_buffer_listener wl_buffer_release_listener = {
3210 (void *)__cb_wl_buffer_release,
3212 #if TIZEN_FEATURE_ENABLE
3214 __cb_presentation_feedback_sync_output(void *data,
3215 struct wp_presentation_feedback *presentation_feedback,
3216 struct wl_output *output)
3219 TPL_IGNORE(presentation_feedback);
3225 __cb_presentation_feedback_presented(void *data,
3226 struct wp_presentation_feedback *presentation_feedback,
3230 uint32_t refresh_nsec,
3235 TPL_IGNORE(tv_sec_hi);
3236 TPL_IGNORE(tv_sec_lo);
3237 TPL_IGNORE(tv_nsec);
3238 TPL_IGNORE(refresh_nsec);
3243 struct pst_feedback *pst_feedback = (struct pst_feedback *)data;
3244 tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface;
3246 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
3248 TPL_LOG_D("[PRESENTED]", "pst_feedback(%p) presentation_feedback(%p) bo(%d)",
3249 pst_feedback, presentation_feedback, pst_feedback->bo_name);
3251 if (pst_feedback->pst_sync_fd != -1) {
3252 TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
3253 "[PRESENTATION_SYNC] bo(%d)",
3254 pst_feedback->bo_name);
3255 send_signal(pst_feedback->pst_sync_fd, "PST_FEEDBACK");
3256 pst_feedback->pst_sync_fd = -1;
3259 wp_presentation_feedback_destroy(presentation_feedback);
3261 pst_feedback->presentation_feedback = NULL;
3262 pst_feedback->wl_egl_surface = NULL;
3263 pst_feedback->bo_name = 0;
3265 __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
3270 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3274 __cb_presentation_feedback_discarded(void *data,
3275 struct wp_presentation_feedback *presentation_feedback)
3277 struct pst_feedback *pst_feedback = (struct pst_feedback *)data;
3278 tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface;
3280 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
3282 TPL_LOG_D("[DISCARDED]", "pst_feedback(%p) presentation_feedback(%p) bo(%d)",
3283 pst_feedback, presentation_feedback, pst_feedback->bo_name);
3285 if (pst_feedback->pst_sync_fd != -1) {
3286 TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
3287 "[PRESENTATION_SYNC] bo(%d)",
3288 pst_feedback->bo_name);
3289 send_signal(pst_feedback->pst_sync_fd, "PST_FEEDBACK");
3290 pst_feedback->pst_sync_fd = -1;
3293 wp_presentation_feedback_destroy(presentation_feedback);
3295 pst_feedback->presentation_feedback = NULL;
3296 pst_feedback->wl_egl_surface = NULL;
3297 pst_feedback->bo_name = 0;
3299 __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
3304 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3307 static const struct wp_presentation_feedback_listener feedback_listener = {
3308 __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
3309 __cb_presentation_feedback_presented,
3310 __cb_presentation_feedback_discarded
3315 _thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface)
3317 tdm_error tdm_err = TDM_ERROR_NONE;
3318 tpl_surface_vblank_t *vblank = wl_egl_surface->vblank;
3320 tdm_err = tdm_client_vblank_wait(vblank->tdm_vblank,
3321 wl_egl_surface->post_interval,
3322 __cb_tdm_client_vblank,
3323 (void *)wl_egl_surface);
3325 if (tdm_err == TDM_ERROR_NONE) {
3326 wl_egl_surface->vblank_done = TPL_FALSE;
3327 TRACE_ASYNC_BEGIN((intptr_t)wl_egl_surface, "WAIT_VBLANK");
3329 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
3330 return TPL_ERROR_INVALID_OPERATION;
3333 return TPL_ERROR_NONE;
3337 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
3338 tpl_wl_egl_buffer_t *wl_egl_buffer)
3340 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3341 struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
3342 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
3345 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
3346 "wl_egl_buffer sould be not NULL");
3348 if (wl_egl_buffer->wl_buffer == NULL) {
3349 wl_egl_buffer->wl_buffer =
3350 (struct wl_proxy *)wayland_tbm_client_create_buffer(
3351 wl_egl_display->wl_tbm_client,
3352 wl_egl_buffer->tbm_surface);
3354 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
3355 "[FATAL] Failed to create wl_buffer");
3357 TPL_INFO("[WL_BUFFER_CREATE]",
3358 "wl_egl_surface(%p) wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
3359 wl_egl_surface, wl_egl_buffer, wl_egl_buffer->wl_buffer,
3360 wl_egl_buffer->tbm_surface);
3362 #if TIZEN_FEATURE_ENABLE
3363 if (!wl_egl_display->use_explicit_sync ||
3364 wl_egl_buffer->acquire_fence_fd == -1)
3367 wl_buffer_add_listener((struct wl_buffer *)wl_egl_buffer->wl_buffer,
3368 &wl_buffer_release_listener,
3373 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
3375 #if TIZEN_FEATURE_ENABLE
3376 /* create presentation feedback and add listener */
3377 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
3378 if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) {
3380 struct pst_feedback *pst_feedback = NULL;
3381 pst_feedback = (struct pst_feedback *) calloc(1, sizeof(struct pst_feedback));
3383 pst_feedback->presentation_feedback =
3384 wp_presentation_feedback(wl_egl_display->presentation,
3387 pst_feedback->wl_egl_surface = wl_egl_surface;
3388 pst_feedback->bo_name = wl_egl_buffer->bo_name;
3390 pst_feedback->pst_sync_fd = wl_egl_buffer->presentation_sync_fd;
3391 wl_egl_buffer->presentation_sync_fd = -1;
3393 wp_presentation_feedback_add_listener(pst_feedback->presentation_feedback,
3394 &feedback_listener, pst_feedback);
3395 __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, pst_feedback);
3396 TRACE_ASYNC_BEGIN(pst_feedback->pst_sync_fd,
3397 "[PRESENTATION_SYNC] bo(%d)",
3398 pst_feedback->bo_name);
3400 TPL_ERR("Failed to create presentation feedback. wl_egl_buffer(%p)",
3402 send_signal(wl_egl_buffer->presentation_sync_fd, "PST_SYNC");
3403 wl_egl_buffer->presentation_sync_fd = -1;
3406 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3409 if (wl_egl_buffer->w_rotated == TPL_TRUE) {
3411 wayland_tbm_client_set_buffer_transform(
3412 wl_egl_display->wl_tbm_client,
3413 (void *)wl_egl_buffer->wl_buffer,
3414 wl_egl_buffer->w_transform);
3415 TPL_INFO("[W_TRANSFORM]",
3416 "wl_egl_surface(%p) wl_egl_buffer(%p) w_transform(%d)",
3417 wl_egl_surface, wl_egl_buffer, wl_egl_buffer->w_transform);
3419 wl_egl_buffer->w_rotated = TPL_FALSE;
3422 if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) {
3424 wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
3425 TPL_INFO("[TRANSFORM]",
3426 "wl_egl_surface(%p) wl_egl_buffer(%p) transform(%d -> %d)",
3427 wl_egl_surface, wl_egl_buffer,
3428 wl_egl_surface->latest_transform, wl_egl_buffer->transform);
3430 wl_egl_surface->latest_transform = wl_egl_buffer->transform;
3433 if (wl_egl_window) {
3434 wl_egl_window->attached_width = wl_egl_buffer->width;
3435 wl_egl_window->attached_height = wl_egl_buffer->height;
3438 wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer,
3439 wl_egl_buffer->dx, wl_egl_buffer->dy);
3441 if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) {
3443 wl_surface_damage(wl_surface,
3444 wl_egl_buffer->dx, wl_egl_buffer->dy,
3445 wl_egl_buffer->width, wl_egl_buffer->height);
3447 wl_surface_damage_buffer(wl_surface,
3449 wl_egl_buffer->width, wl_egl_buffer->height);
3453 for (i = 0; i < wl_egl_buffer->num_rects; i++) {
3455 wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] +
3456 wl_egl_buffer->rects[i * 4 + 3]);
3458 wl_surface_damage(wl_surface,
3459 wl_egl_buffer->rects[i * 4 + 0],
3461 wl_egl_buffer->rects[i * 4 + 2],
3462 wl_egl_buffer->rects[i * 4 + 3]);
3464 wl_surface_damage_buffer(wl_surface,
3465 wl_egl_buffer->rects[i * 4 + 0],
3467 wl_egl_buffer->rects[i * 4 + 2],
3468 wl_egl_buffer->rects[i * 4 + 3]);
3473 wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client,
3474 (void *)wl_egl_buffer->wl_buffer,
3475 wl_egl_buffer->serial);
3476 #if TIZEN_FEATURE_ENABLE
3477 if (wl_egl_display->use_explicit_sync &&
3478 wl_egl_buffer->acquire_fence_fd != -1) {
3480 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync,
3481 wl_egl_buffer->acquire_fence_fd);
3482 TPL_LOG_D("[SET_ACQUIRE_FENCE][1/2]", "wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)",
3483 wl_egl_surface, wl_egl_buffer->tbm_surface, wl_egl_buffer->acquire_fence_fd);
3484 close(wl_egl_buffer->acquire_fence_fd);
3485 wl_egl_buffer->acquire_fence_fd = -1;
3487 wl_egl_buffer->buffer_release =
3488 zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync);
3489 if (!wl_egl_buffer->buffer_release) {
3490 TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface);
3492 zwp_linux_buffer_release_v1_add_listener(
3493 wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer);
3494 TPL_LOG_D("[SET_ACQUIRE_FENCE][2/2]", "add explicit_sync_release_listener.");
3499 wl_surface_commit(wl_surface);
3501 wl_display_flush(wl_egl_display->wl_display);
3503 TRACE_ASYNC_BEGIN((intptr_t)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3504 wl_egl_buffer->bo_name);
3506 wl_egl_buffer->need_to_commit = TPL_FALSE;
3507 wl_egl_buffer->status = COMMITTED;
3508 if (wl_egl_surface->last_enq_buffer == wl_egl_buffer->tbm_surface)
3509 wl_egl_surface->last_enq_buffer = NULL;
3512 "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
3513 wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface,
3514 wl_egl_buffer->bo_name);
3516 if (wl_egl_surface->post_interval > 0 && wl_egl_surface->vblank != NULL) {
3517 wl_egl_surface->vblank_enable = TPL_TRUE;
3518 if (_thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
3519 TPL_ERR("Failed to set wait vblank.");
3522 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
3524 if (wl_egl_buffer->commit_sync_fd != -1) {
3525 TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
3526 wl_egl_buffer->bo_name);
3527 TPL_LOG_D("[COMMIT_SYNC][SEND]", "wl_egl_surface(%p) commit_sync_fd(%d)",
3528 wl_egl_surface, wl_egl_buffer->commit_sync_fd);
3529 send_signal(wl_egl_buffer->commit_sync_fd, "COMMIT_SYNC");
3530 wl_egl_buffer->commit_sync_fd = -1;
3533 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
3537 _write_to_eventfd(int eventfd, uint64_t value)
3541 ret = write(eventfd, &value, sizeof(uint64_t));
3543 TPL_ERR("failed to write to fd(%d)", eventfd);
3550 static int send_signal(int fd, const char *type)
3553 if (fd < 0) return ret;
3555 ret = _write_to_eventfd(fd, 1);
3557 TPL_ERR("Failed to send %s signal to fd(%d)", type, fd);
3565 __tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend)
3567 TPL_ASSERT(backend);
3569 backend->type = TPL_BACKEND_WAYLAND_THREAD;
3570 backend->data = NULL;
3572 backend->init = __tpl_wl_egl_display_init;
3573 backend->fini = __tpl_wl_egl_display_fini;
3574 backend->query_config = __tpl_wl_egl_display_query_config;
3575 backend->filter_config = __tpl_wl_egl_display_filter_config;
3576 backend->get_window_info = __tpl_wl_egl_display_get_window_info;
3577 backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
3578 backend->get_buffer_from_native_pixmap =
3579 __tpl_wl_egl_display_get_buffer_from_native_pixmap;
3583 __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
3585 TPL_ASSERT(backend);
3587 backend->type = TPL_BACKEND_WAYLAND_THREAD;
3588 backend->data = NULL;
3590 backend->init = __tpl_wl_egl_surface_init;
3591 backend->fini = __tpl_wl_egl_surface_fini;
3592 backend->validate = __tpl_wl_egl_surface_validate;
3593 backend->cancel_dequeued_buffer =
3594 __tpl_wl_egl_surface_cancel_buffer;
3595 backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
3596 backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
3597 backend->set_rotation_capability =
3598 __tpl_wl_egl_surface_set_rotation_capability;
3599 backend->set_post_interval =
3600 __tpl_wl_egl_surface_set_post_interval;
3602 __tpl_wl_egl_surface_get_size;
3603 backend->fence_sync_is_available =
3604 __tpl_wl_egl_surface_fence_sync_is_available;
3608 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
3610 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3611 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3613 TPL_INFO("[BUFFER_FREE]", "wl_egl_surface(%p) wl_egl_buffer(%p)",
3614 wl_egl_surface, wl_egl_buffer);
3615 TPL_INFO("[BUFFER_FREE]", "tbm_surface(%p) bo(%d)",
3616 wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name);
3618 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3619 if (wl_egl_surface->buffers) {
3620 __tpl_list_remove_data(wl_egl_surface->buffers, (void *)wl_egl_buffer,
3623 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
3625 if (wl_egl_surface->vblank) {
3626 tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
3627 if (wl_egl_surface->vblank->waiting_buffers)
3628 __tpl_list_remove_data(wl_egl_surface->vblank->waiting_buffers, (void *)wl_egl_buffer,
3630 tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
3633 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3635 if (wl_egl_display) {
3636 if (wl_egl_display->wl_tbm_client && wl_egl_buffer->wl_buffer) {
3637 wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
3638 (void *)wl_egl_buffer->wl_buffer);
3639 wl_egl_buffer->wl_buffer = NULL;
3642 wl_display_flush(wl_egl_display->wl_display);
3646 #if TIZEN_FEATURE_ENABLE
3647 if (wl_egl_buffer->buffer_release) {
3648 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
3649 wl_egl_buffer->buffer_release = NULL;
3652 if (wl_egl_buffer->release_fence_fd != -1) {
3653 close(wl_egl_buffer->release_fence_fd);
3654 wl_egl_buffer->release_fence_fd = -1;
3658 if (wl_egl_buffer->waiting_source) {
3659 tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
3660 wl_egl_buffer->waiting_source = NULL;
3663 send_signal(wl_egl_buffer->commit_sync_fd, "COMMIT_SYNC");
3664 wl_egl_buffer->commit_sync_fd = -1;
3666 send_signal(wl_egl_buffer->presentation_sync_fd, "PST_SYNC");
3667 wl_egl_buffer->presentation_sync_fd = -1;
3669 if (wl_egl_buffer->rects) {
3670 free(wl_egl_buffer->rects);
3671 wl_egl_buffer->rects = NULL;
3672 wl_egl_buffer->num_rects = 0;
3675 wl_egl_buffer->wl_egl_surface = NULL;
3676 wl_egl_buffer->tbm_surface = NULL;
3677 wl_egl_buffer->bo_name = -1;
3678 wl_egl_buffer->status = RELEASED;
3680 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3681 tpl_gmutex_clear(&wl_egl_buffer->mutex);
3682 tpl_gcond_clear(&wl_egl_buffer->cond);
3683 free(wl_egl_buffer);
3687 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
3689 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
3693 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
3695 tpl_list_node_t *node = NULL;
3699 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3700 buffer_cnt = __tpl_list_get_count(wl_egl_surface->buffers);
3702 node = __tpl_list_get_front_node(wl_egl_surface->buffers);
3705 tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_node_get_data(node));
3706 TPL_INFO("[BUFFERS_INFO]",
3707 "[%d/%d] wl_egl_surface(%p), wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
3708 ++idx, buffer_cnt, wl_egl_surface, wl_egl_buffer,
3709 wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name,
3710 status_to_string[wl_egl_buffer->status]);
3711 } while ((node = __tpl_list_node_next(node)));
3712 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
3716 _check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface)
3718 tpl_list_node_t *node = NULL;
3719 tpl_bool_t ret = TPL_FALSE;
3722 if (!wl_egl_surface || !tbm_surface)
3725 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3726 node = __tpl_list_get_front_node(wl_egl_surface->buffers);
3729 tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_node_get_data(node));
3730 if (wl_egl_buffer->tbm_surface == tbm_surface) {
3734 } while ((node = __tpl_list_node_next(node)));
3736 if (ret == TPL_FALSE) {
3737 TPL_ERR("tbm_surface(%p) is not owned by wl_egl_surface(%p)",
3738 tbm_surface, wl_egl_surface);
3741 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);