2 #include "tpl_internal.h"
7 #include <sys/eventfd.h>
9 #include <tbm_bufmgr.h>
10 #include <tbm_surface.h>
11 #include <tbm_surface_internal.h>
12 #include <tbm_surface_queue.h>
14 #include <wayland-client.h>
15 #include <wayland-tbm-server.h>
16 #include <wayland-tbm-client.h>
17 #include <wayland-egl-backend.h>
19 #include <tdm_client.h>
21 #include "wayland-egl-tizen/wayland-egl-tizen.h"
22 #include "wayland-egl-tizen/wayland-egl-tizen-priv.h"
24 #include <tizen-surface-client-protocol.h>
25 #include <presentation-time-client-protocol.h>
26 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
28 #include "tpl_utils_gthread.h"
30 static int wl_egl_buffer_key;
31 #define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key)
33 /* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
34 #define CLIENT_QUEUE_SIZE 3
35 #define BUFFER_ARRAY_SIZE (CLIENT_QUEUE_SIZE * 2)
37 typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
38 typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
39 typedef struct _tpl_wl_egl_buffer tpl_wl_egl_buffer_t;
41 struct _tpl_wl_egl_display {
42 tpl_gsource *disp_source;
44 tpl_gmutex wl_event_mutex;
46 struct wl_display *wl_display;
47 struct wl_event_queue *ev_queue;
48 struct wayland_tbm_client *wl_tbm_client;
49 int last_error; /* errno of the last wl_display error*/
51 tpl_bool_t wl_initialized;
52 tpl_bool_t tdm_initialized;
54 tdm_client *tdm_client;
55 tpl_gsource *tdm_source;
58 tpl_bool_t use_wait_vblank;
59 tpl_bool_t use_explicit_sync;
62 struct tizen_surface_shm *tss; /* used for surface buffer_flush */
63 struct wp_presentation *presentation; /* for presentation feedback */
64 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
67 struct _tpl_wl_egl_surface {
68 tpl_gsource *surf_source;
70 tbm_surface_queue_h tbm_queue;
72 struct wl_egl_window *wl_egl_window;
73 struct wl_surface *wl_surface;
74 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
75 struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */
77 tdm_client_vblank *vblank;
79 /* surface information */
90 tpl_wl_egl_display_t *wl_egl_display;
91 tpl_surface_t *tpl_surface;
93 /* wl_egl_buffer array for buffer tracing */
94 tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE];
95 int buffer_cnt; /* the number of using wl_egl_buffers */
96 tpl_gmutex buffers_mutex;
98 tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
99 tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
111 tpl_gmutex surf_mutex;
114 /* for waiting draw done */
115 tpl_bool_t use_render_done_fence;
116 tpl_bool_t is_activated;
117 tpl_bool_t reset; /* TRUE if queue reseted by external */
118 tpl_bool_t need_to_enqueue;
119 tpl_bool_t prerotation_capability;
120 tpl_bool_t vblank_done;
121 tpl_bool_t set_serial_is_used;
124 typedef enum buffer_status {
129 WAITING_SIGNALED, // 4
134 static const char *status_to_string[7] = {
139 "WAITING_SIGNALED", // 4
140 "WAITING_VBLANK", // 5
144 struct _tpl_wl_egl_buffer {
145 tbm_surface_h tbm_surface;
148 struct wl_proxy *wl_buffer;
149 int dx, dy; /* position to attach to wl_surface */
150 int width, height; /* size to attach to wl_surface */
152 buffer_status_t status; /* for tracing buffer status */
153 int idx; /* position index in buffers array of wl_egl_surface */
155 /* for damage region */
159 /* for wayland_tbm_client_set_buffer_transform */
161 tpl_bool_t w_rotated;
163 /* for wl_surface_set_buffer_transform */
166 /* for wayland_tbm_client_set_buffer_serial */
169 /* for checking need_to_commit (frontbuffer mode) */
170 tpl_bool_t need_to_commit;
172 /* for checking draw done */
173 tpl_bool_t draw_done;
176 /* to get release event via zwp_linux_buffer_release_v1 */
177 struct zwp_linux_buffer_release_v1 *buffer_release;
179 /* each buffers own its release_fence_fd, until it passes ownership
181 int32_t release_fence_fd;
183 /* each buffers own its acquire_fence_fd.
184 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
185 * will be passed to display server
186 * Otherwise it will be used as a fence waiting for render done
188 int32_t acquire_fence_fd;
190 /* Fd to send a signal when wl_surface_commit with this buffer */
191 int32_t commit_sync_fd;
193 /* Fd to send a siganl when receive the
194 * presentation feedback from display server */
195 int32_t presentation_sync_fd;
197 tpl_gsource *waiting_source;
202 tpl_wl_egl_surface_t *wl_egl_surface;
205 struct pst_feedback {
206 /* to get presentation feedback from display server */
207 struct wp_presentation_feedback *presentation_feedback;
212 tpl_wl_egl_surface_t *wl_egl_surface;
217 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
219 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface);
221 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer);
222 static tpl_wl_egl_buffer_t *
223 _get_wl_egl_buffer(tbm_surface_h tbm_surface);
225 _write_to_eventfd(int eventfd);
227 _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface);
229 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface);
231 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
232 tpl_wl_egl_buffer_t *wl_egl_buffer);
235 _check_native_handle_is_wl_display(tpl_handle_t display)
237 struct wl_interface *wl_egl_native_dpy = *(void **) display;
239 if (!wl_egl_native_dpy) {
240 TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy);
244 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
245 is a memory address pointing the structure of wl_display_interface. */
246 if (wl_egl_native_dpy == &wl_display_interface)
249 if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
250 strlen(wl_display_interface.name)) == 0) {
258 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
260 tpl_wl_egl_display_t *wl_egl_display = NULL;
261 tdm_error tdm_err = TDM_ERROR_NONE;
265 wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
266 if (!wl_egl_display) {
267 TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource);
268 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
272 tdm_err = tdm_client_handle_events(wl_egl_display->tdm_client);
274 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
275 * When tdm_source is no longer available due to an unexpected situation,
276 * wl_egl_thread must remove it from the thread and destroy it.
277 * In that case, tdm_vblank can no longer be used for surfaces and displays
278 * that used this tdm_source. */
279 if (tdm_err != TDM_ERROR_NONE) {
280 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
282 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
284 tpl_gsource_destroy(gsource, TPL_FALSE);
286 wl_egl_display->tdm_source = NULL;
295 __thread_func_tdm_finalize(tpl_gsource *gsource)
297 tpl_wl_egl_display_t *wl_egl_display = NULL;
299 wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
302 "tdm_destroy| wl_egl_display(%p) tdm_client(%p) tpl_gsource(%p)",
303 wl_egl_display, wl_egl_display->tdm_client, gsource);
305 if (wl_egl_display->tdm_client) {
306 tdm_client_destroy(wl_egl_display->tdm_client);
307 wl_egl_display->tdm_client = NULL;
308 wl_egl_display->tdm_display_fd = -1;
311 wl_egl_display->tdm_initialized = TPL_FALSE;
314 static tpl_gsource_functions tdm_funcs = {
317 .dispatch = __thread_func_tdm_dispatch,
318 .finalize = __thread_func_tdm_finalize,
322 _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display)
324 tdm_client *tdm_client = NULL;
325 int tdm_display_fd = -1;
326 tdm_error tdm_err = TDM_ERROR_NONE;
328 tdm_client = tdm_client_create(&tdm_err);
329 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
330 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
331 return TPL_ERROR_INVALID_OPERATION;
334 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
335 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
336 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
337 tdm_client_destroy(tdm_client);
338 return TPL_ERROR_INVALID_OPERATION;
341 wl_egl_display->tdm_display_fd = tdm_display_fd;
342 wl_egl_display->tdm_client = tdm_client;
343 wl_egl_display->tdm_source = NULL;
344 wl_egl_display->tdm_initialized = TPL_TRUE;
346 TPL_INFO("[TDM_CLIENT_INIT]",
347 "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
348 wl_egl_display, tdm_client, tdm_display_fd);
350 return TPL_ERROR_NONE;
353 #define IMPL_TIZEN_SURFACE_SHM_VERSION 2
356 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
357 uint32_t name, const char *interface,
360 tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
362 if (!strcmp(interface, "tizen_surface_shm")) {
363 wl_egl_display->tss =
364 wl_registry_bind(wl_registry,
366 &tizen_surface_shm_interface,
367 ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ?
368 version : IMPL_TIZEN_SURFACE_SHM_VERSION));
369 } else if (!strcmp(interface, wp_presentation_interface.name)) {
370 wl_egl_display->presentation =
371 wl_registry_bind(wl_registry,
372 name, &wp_presentation_interface, 1);
373 TPL_DEBUG("bind wp_presentation_interface");
374 } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
375 char *env = tpl_getenv("TPL_EFS");
376 if (env && !atoi(env)) {
377 wl_egl_display->use_explicit_sync = TPL_FALSE;
379 wl_egl_display->explicit_sync =
380 wl_registry_bind(wl_registry, name,
381 &zwp_linux_explicit_synchronization_v1_interface, 1);
382 wl_egl_display->use_explicit_sync = TPL_TRUE;
383 TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
389 __cb_wl_resistry_global_remove_callback(void *data,
390 struct wl_registry *wl_registry,
395 static const struct wl_registry_listener registry_listener = {
396 __cb_wl_resistry_global_callback,
397 __cb_wl_resistry_global_remove_callback
401 _wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display,
402 const char *func_name)
406 strerror_r(errno, buf, sizeof(buf));
408 if (wl_egl_display->last_error == errno)
411 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
413 dpy_err = wl_display_get_error(wl_egl_display->wl_display);
414 if (dpy_err == EPROTO) {
415 const struct wl_interface *err_interface;
416 uint32_t err_proxy_id, err_code;
417 err_code = wl_display_get_protocol_error(wl_egl_display->wl_display,
420 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
421 err_interface->name, err_code, err_proxy_id);
424 wl_egl_display->last_error = errno;
428 _thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display)
430 struct wl_registry *registry = NULL;
431 struct wl_event_queue *queue = NULL;
432 struct wl_display *display_wrapper = NULL;
433 struct wl_proxy *wl_tbm = NULL;
434 struct wayland_tbm_client *wl_tbm_client = NULL;
436 tpl_result_t result = TPL_ERROR_NONE;
438 queue = wl_display_create_queue(wl_egl_display->wl_display);
440 TPL_ERR("Failed to create wl_queue wl_display(%p)",
441 wl_egl_display->wl_display);
442 result = TPL_ERROR_INVALID_OPERATION;
446 wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display);
447 if (!wl_egl_display->ev_queue) {
448 TPL_ERR("Failed to create wl_queue wl_display(%p)",
449 wl_egl_display->wl_display);
450 result = TPL_ERROR_INVALID_OPERATION;
454 display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display);
455 if (!display_wrapper) {
456 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
457 wl_egl_display->wl_display);
458 result = TPL_ERROR_INVALID_OPERATION;
462 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
464 registry = wl_display_get_registry(display_wrapper);
466 TPL_ERR("Failed to create wl_registry");
467 result = TPL_ERROR_INVALID_OPERATION;
471 wl_proxy_wrapper_destroy(display_wrapper);
472 display_wrapper = NULL;
474 wl_tbm_client = wayland_tbm_client_init(wl_egl_display->wl_display);
475 if (!wl_tbm_client) {
476 TPL_ERR("Failed to initialize wl_tbm_client.");
477 result = TPL_ERROR_INVALID_CONNECTION;
481 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
483 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
484 result = TPL_ERROR_INVALID_CONNECTION;
488 wl_proxy_set_queue(wl_tbm, wl_egl_display->ev_queue);
489 wl_egl_display->wl_tbm_client = wl_tbm_client;
491 if (wl_registry_add_listener(registry, ®istry_listener,
493 TPL_ERR("Failed to wl_registry_add_listener");
494 result = TPL_ERROR_INVALID_OPERATION;
498 ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue);
500 _wl_display_print_err(wl_egl_display, "roundtrip_queue");
501 result = TPL_ERROR_INVALID_OPERATION;
505 /* set tizen_surface_shm's queue as client's private queue */
506 if (wl_egl_display->tss) {
507 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss,
508 wl_egl_display->ev_queue);
509 TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss);
512 if (wl_egl_display->presentation) {
513 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation,
514 wl_egl_display->ev_queue);
515 TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.",
516 wl_egl_display->presentation);
519 if (wl_egl_display->explicit_sync) {
520 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync,
521 wl_egl_display->ev_queue);
522 TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.",
523 wl_egl_display->explicit_sync);
526 wl_egl_display->wl_initialized = TPL_TRUE;
528 TPL_INFO("[WAYLAND_INIT]",
529 "wl_egl_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
530 wl_egl_display, wl_egl_display->wl_display,
531 wl_egl_display->wl_tbm_client, wl_egl_display->ev_queue);
532 TPL_INFO("[WAYLAND_INIT]",
533 "tizen_surface_shm(%p) wp_presentation(%p) explicit_sync(%p)",
534 wl_egl_display->tss, wl_egl_display->presentation,
535 wl_egl_display->explicit_sync);
539 wl_proxy_wrapper_destroy(display_wrapper);
541 wl_registry_destroy(registry);
543 wl_event_queue_destroy(queue);
549 _thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display)
551 /* If wl_egl_display is in prepared state, cancel it */
552 if (wl_egl_display->prepared) {
553 wl_display_cancel_read(wl_egl_display->wl_display);
554 wl_egl_display->prepared = TPL_FALSE;
557 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
558 wl_egl_display->ev_queue) == -1) {
559 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
562 if (wl_egl_display->tss) {
563 TPL_INFO("[TIZEN_SURFACE_SHM_DESTROY]",
564 "wl_egl_display(%p) tizen_surface_shm(%p) fini.",
565 wl_egl_display, wl_egl_display->tss);
566 tizen_surface_shm_destroy(wl_egl_display->tss);
567 wl_egl_display->tss = NULL;
570 if (wl_egl_display->presentation) {
571 TPL_INFO("[WP_PRESENTATION_DESTROY]",
572 "wl_egl_display(%p) wp_presentation(%p) fini.",
573 wl_egl_display, wl_egl_display->presentation);
574 wp_presentation_destroy(wl_egl_display->presentation);
575 wl_egl_display->presentation = NULL;
578 if (wl_egl_display->explicit_sync) {
579 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
580 "wl_egl_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
581 wl_egl_display, wl_egl_display->explicit_sync);
582 zwp_linux_explicit_synchronization_v1_destroy(wl_egl_display->explicit_sync);
583 wl_egl_display->explicit_sync = NULL;
586 if (wl_egl_display->wl_tbm_client) {
587 struct wl_proxy *wl_tbm = NULL;
589 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
590 wl_egl_display->wl_tbm_client);
592 wl_proxy_set_queue(wl_tbm, NULL);
595 TPL_INFO("[WL_TBM_DEINIT]",
596 "wl_egl_display(%p) wl_tbm_client(%p)",
597 wl_egl_display, wl_egl_display->wl_tbm_client);
598 wayland_tbm_client_deinit(wl_egl_display->wl_tbm_client);
599 wl_egl_display->wl_tbm_client = NULL;
602 wl_event_queue_destroy(wl_egl_display->ev_queue);
604 wl_egl_display->wl_initialized = TPL_FALSE;
606 TPL_INFO("[DISPLAY_FINI]", "wl_egl_display(%p) wl_display(%p)",
607 wl_egl_display, wl_egl_display->wl_display);
611 _thread_init(void *data)
613 tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
615 if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) {
616 TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)",
617 wl_egl_display, wl_egl_display->wl_display);
620 if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
621 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
624 return wl_egl_display;
628 __thread_func_disp_prepare(tpl_gsource *gsource)
630 tpl_wl_egl_display_t *wl_egl_display =
631 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
633 /* If this wl_egl_display is already prepared,
634 * do nothing in this function. */
635 if (wl_egl_display->prepared)
638 /* If there is a last_error, there is no need to poll,
639 * so skip directly to dispatch.
640 * prepare -> dispatch */
641 if (wl_egl_display->last_error)
644 while (wl_display_prepare_read_queue(wl_egl_display->wl_display,
645 wl_egl_display->ev_queue) != 0) {
646 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
647 wl_egl_display->ev_queue) == -1) {
648 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
652 wl_egl_display->prepared = TPL_TRUE;
654 wl_display_flush(wl_egl_display->wl_display);
660 __thread_func_disp_check(tpl_gsource *gsource)
662 tpl_wl_egl_display_t *wl_egl_display =
663 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
664 tpl_bool_t ret = TPL_FALSE;
666 if (!wl_egl_display->prepared)
669 /* If prepared, but last_error is set,
670 * cancel_read is executed and FALSE is returned.
671 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
672 * and skipping disp_check from prepare to disp_dispatch.
673 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
674 if (wl_egl_display->prepared && wl_egl_display->last_error) {
675 wl_display_cancel_read(wl_egl_display->wl_display);
679 if (tpl_gsource_check_io_condition(gsource)) {
680 if (wl_display_read_events(wl_egl_display->wl_display) == -1)
681 _wl_display_print_err(wl_egl_display, "read_event");
684 wl_display_cancel_read(wl_egl_display->wl_display);
688 wl_egl_display->prepared = TPL_FALSE;
694 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
696 tpl_wl_egl_display_t *wl_egl_display =
697 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
701 /* If there is last_error, SOURCE_REMOVE should be returned
702 * to remove the gsource from the main loop.
703 * This is because wl_egl_display is not valid since last_error was set.*/
704 if (wl_egl_display->last_error) {
708 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
709 if (tpl_gsource_check_io_condition(gsource)) {
710 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
711 wl_egl_display->ev_queue) == -1) {
712 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
716 wl_display_flush(wl_egl_display->wl_display);
717 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
723 __thread_func_disp_finalize(tpl_gsource *gsource)
725 tpl_wl_egl_display_t *wl_egl_display =
726 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
728 if (wl_egl_display->wl_initialized)
729 _thread_wl_display_fini(wl_egl_display);
731 TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)",
732 wl_egl_display, gsource);
738 static tpl_gsource_functions disp_funcs = {
739 .prepare = __thread_func_disp_prepare,
740 .check = __thread_func_disp_check,
741 .dispatch = __thread_func_disp_dispatch,
742 .finalize = __thread_func_disp_finalize,
746 __tpl_wl_egl_display_init(tpl_display_t *display)
748 tpl_wl_egl_display_t *wl_egl_display = NULL;
752 /* Do not allow default display in wayland. */
753 if (!display->native_handle) {
754 TPL_ERR("Invalid native handle for display.");
755 return TPL_ERROR_INVALID_PARAMETER;
758 if (!_check_native_handle_is_wl_display(display->native_handle)) {
759 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
760 return TPL_ERROR_INVALID_PARAMETER;
763 wl_egl_display = (tpl_wl_egl_display_t *) calloc(1,
764 sizeof(tpl_wl_egl_display_t));
765 if (!wl_egl_display) {
766 TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t.");
767 return TPL_ERROR_OUT_OF_MEMORY;
770 display->backend.data = wl_egl_display;
771 display->bufmgr_fd = -1;
773 wl_egl_display->tdm_initialized = TPL_FALSE;
774 wl_egl_display->wl_initialized = TPL_FALSE;
776 wl_egl_display->ev_queue = NULL;
777 wl_egl_display->wl_display = (struct wl_display *)display->native_handle;
778 wl_egl_display->last_error = 0;
779 wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled
780 wl_egl_display->prepared = TPL_FALSE;
782 /* Wayland Interfaces */
783 wl_egl_display->tss = NULL;
784 wl_egl_display->presentation = NULL;
785 wl_egl_display->explicit_sync = NULL;
786 wl_egl_display->wl_tbm_client = NULL;
788 wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled
790 char *env = tpl_getenv("TPL_WAIT_VBLANK");
791 if (env && !atoi(env)) {
792 wl_egl_display->use_wait_vblank = TPL_FALSE;
796 tpl_gmutex_init(&wl_egl_display->wl_event_mutex);
799 wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
800 (tpl_gthread_func)_thread_init,
801 (void *)wl_egl_display);
802 if (!wl_egl_display->thread) {
803 TPL_ERR("Failed to create wl_egl_thread");
807 wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread,
808 (void *)wl_egl_display,
809 wl_display_get_fd(wl_egl_display->wl_display),
810 &disp_funcs, SOURCE_TYPE_NORMAL);
811 if (!wl_egl_display->disp_source) {
812 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
813 display->native_handle,
814 wl_egl_display->thread);
818 wl_egl_display->tdm_source = tpl_gsource_create(wl_egl_display->thread,
819 (void *)wl_egl_display,
820 wl_egl_display->tdm_display_fd,
821 &tdm_funcs, SOURCE_TYPE_NORMAL);
822 if (!wl_egl_display->tdm_source) {
823 TPL_ERR("Failed to create tdm_gsource\n");
827 TPL_INFO("[DISPLAY_INIT]",
828 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
830 wl_egl_display->thread,
831 wl_egl_display->wl_display);
833 TPL_INFO("[DISPLAY_INIT]",
834 "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)",
835 wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE",
836 wl_egl_display->tss ? "TRUE" : "FALSE",
837 wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE");
839 return TPL_ERROR_NONE;
842 if (wl_egl_display->thread) {
843 if (wl_egl_display->tdm_source)
844 tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
845 if (wl_egl_display->disp_source)
846 tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
848 tpl_gthread_destroy(wl_egl_display->thread);
851 wl_egl_display->thread = NULL;
852 free(wl_egl_display);
854 display->backend.data = NULL;
855 return TPL_ERROR_INVALID_OPERATION;
859 __tpl_wl_egl_display_fini(tpl_display_t *display)
861 tpl_wl_egl_display_t *wl_egl_display;
865 wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data;
866 if (wl_egl_display) {
867 TPL_INFO("[DISPLAY_FINI]",
868 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
870 wl_egl_display->thread,
871 wl_egl_display->wl_display);
873 if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) {
874 tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
875 wl_egl_display->tdm_source = NULL;
878 if (wl_egl_display->disp_source) {
879 tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
880 wl_egl_display->disp_source = NULL;
883 if (wl_egl_display->thread) {
884 tpl_gthread_destroy(wl_egl_display->thread);
885 wl_egl_display->thread = NULL;
888 tpl_gmutex_clear(&wl_egl_display->wl_event_mutex);
890 free(wl_egl_display);
893 display->backend.data = NULL;
897 __tpl_wl_egl_display_query_config(tpl_display_t *display,
898 tpl_surface_type_t surface_type,
899 int red_size, int green_size,
900 int blue_size, int alpha_size,
901 int color_depth, int *native_visual_id,
906 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
907 green_size == 8 && blue_size == 8 &&
908 (color_depth == 32 || color_depth == 24)) {
910 if (alpha_size == 8) {
911 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
912 if (is_slow) *is_slow = TPL_FALSE;
913 return TPL_ERROR_NONE;
915 if (alpha_size == 0) {
916 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
917 if (is_slow) *is_slow = TPL_FALSE;
918 return TPL_ERROR_NONE;
922 return TPL_ERROR_INVALID_PARAMETER;
926 __tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
930 TPL_IGNORE(visual_id);
931 TPL_IGNORE(alpha_size);
932 return TPL_ERROR_NONE;
936 __tpl_wl_egl_display_get_window_info(tpl_display_t *display,
937 tpl_handle_t window, int *width,
938 int *height, tbm_format *format,
939 int depth, int a_size)
941 tpl_result_t ret = TPL_ERROR_NONE;
942 struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window;
947 if (!wl_egl_window) {
948 TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window);
949 return TPL_ERROR_INVALID_PARAMETER;
952 if (width) *width = wl_egl_window->width;
953 if (height) *height = wl_egl_window->height;
955 struct tizen_private *tizen_private =
956 (struct tizen_private *)wl_egl_window->driver_private;
957 if (tizen_private && tizen_private->data) {
958 tpl_wl_egl_surface_t *wl_egl_surface =
959 (tpl_wl_egl_surface_t *)tizen_private->data;
960 *format = wl_egl_surface->format;
963 *format = TBM_FORMAT_ARGB8888;
965 *format = TBM_FORMAT_XRGB8888;
973 __tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display,
974 tpl_handle_t pixmap, int *width,
975 int *height, tbm_format *format)
977 tbm_surface_h tbm_surface = NULL;
980 TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap);
981 return TPL_ERROR_INVALID_PARAMETER;
984 tbm_surface = wayland_tbm_server_get_surface(NULL,
985 (struct wl_resource *)pixmap);
987 TPL_ERR("Failed to get tbm_surface from wayland_tbm.");
988 return TPL_ERROR_INVALID_PARAMETER;
991 if (width) *width = tbm_surface_get_width(tbm_surface);
992 if (height) *height = tbm_surface_get_height(tbm_surface);
993 if (format) *format = tbm_surface_get_format(tbm_surface);
995 return TPL_ERROR_NONE;
999 __tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
1001 tbm_surface_h tbm_surface = NULL;
1005 tbm_surface = wayland_tbm_server_get_surface(NULL,
1006 (struct wl_resource *)pixmap);
1008 TPL_ERR("Failed to get tbm_surface_h from wayland_tbm.");
1016 __tpl_display_choose_backend_wl_egl_thread2(tpl_handle_t native_dpy)
1018 struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
1020 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE);
1022 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
1023 is a memory address pointing the structure of wl_display_interface. */
1024 if (wl_egl_native_dpy == &wl_display_interface)
1027 if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
1028 strlen(wl_display_interface.name)) == 0) {
1035 /* -- BEGIN -- wl_egl_window callback functions */
1037 __cb_destroy_callback(void *private)
1039 struct tizen_private *tizen_private = (struct tizen_private *)private;
1040 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1042 if (!tizen_private) {
1043 TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface");
1047 wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1048 if (wl_egl_surface) {
1049 TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.",
1050 wl_egl_surface->wl_egl_window);
1051 TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface.");
1053 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1054 wl_egl_surface->wl_egl_window->destroy_window_callback = NULL;
1055 wl_egl_surface->wl_egl_window->resize_callback = NULL;
1056 wl_egl_surface->wl_egl_window->driver_private = NULL;
1057 wl_egl_surface->wl_egl_window = NULL;
1058 wl_egl_surface->wl_surface = NULL;
1060 tizen_private->set_window_serial_callback = NULL;
1061 tizen_private->rotate_callback = NULL;
1062 tizen_private->get_rotation_capability = NULL;
1063 tizen_private->set_frontbuffer_callback = NULL;
1064 tizen_private->create_commit_sync_fd = NULL;
1065 tizen_private->create_presentation_sync_fd = NULL;
1066 tizen_private->data = NULL;
1068 free(tizen_private);
1069 tizen_private = NULL;
1070 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1075 __cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
1077 TPL_ASSERT(private);
1078 TPL_ASSERT(wl_egl_window);
1080 struct tizen_private *tizen_private = (struct tizen_private *)private;
1081 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1082 int cur_w, cur_h, req_w, req_h, format;
1084 if (!wl_egl_surface) {
1085 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1090 format = wl_egl_surface->format;
1091 cur_w = wl_egl_surface->width;
1092 cur_h = wl_egl_surface->height;
1093 req_w = wl_egl_window->width;
1094 req_h = wl_egl_window->height;
1096 TPL_INFO("[WINDOW_RESIZE]",
1097 "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)",
1098 wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h);
1100 if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
1101 != TBM_SURFACE_QUEUE_ERROR_NONE) {
1102 TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue);
1106 /* -- END -- wl_egl_window callback functions */
1108 /* -- BEGIN -- wl_egl_window tizen private callback functions */
1110 /* There is no usecase for using prerotation callback below */
1112 __cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
1114 TPL_ASSERT(private);
1115 TPL_ASSERT(wl_egl_window);
1117 struct tizen_private *tizen_private = (struct tizen_private *)private;
1118 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1119 int rotation = tizen_private->rotation;
1121 if (!wl_egl_surface) {
1122 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1127 TPL_INFO("[WINDOW_ROTATE]",
1128 "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)",
1129 wl_egl_surface, wl_egl_window,
1130 wl_egl_surface->rotation, rotation);
1132 wl_egl_surface->rotation = rotation;
1135 /* There is no usecase for using prerotation callback below */
1137 __cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
1140 TPL_ASSERT(private);
1141 TPL_ASSERT(wl_egl_window);
1143 int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
1144 struct tizen_private *tizen_private = (struct tizen_private *)private;
1145 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1147 if (!wl_egl_surface) {
1148 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1150 return rotation_capability;
1153 if (wl_egl_surface->prerotation_capability == TPL_TRUE)
1154 rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
1156 rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
1159 return rotation_capability;
1163 __cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
1164 void *private, unsigned int serial)
1166 TPL_ASSERT(private);
1167 TPL_ASSERT(wl_egl_window);
1169 struct tizen_private *tizen_private = (struct tizen_private *)private;
1170 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1172 if (!wl_egl_surface) {
1173 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1178 wl_egl_surface->set_serial_is_used = TPL_TRUE;
1179 wl_egl_surface->serial = serial;
1183 __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
1185 TPL_ASSERT(private);
1186 TPL_ASSERT(wl_egl_window);
1188 int commit_sync_fd = -1;
1190 struct tizen_private *tizen_private = (struct tizen_private *)private;
1191 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1193 if (!wl_egl_surface) {
1194 TPL_ERR("Invalid parameter. wl_egl_surface(%p) is NULL", wl_egl_surface);
1198 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
1200 if (wl_egl_surface->commit_sync.fd != -1) {
1201 commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
1202 TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
1203 wl_egl_surface->commit_sync.fd, commit_sync_fd);
1204 TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)",
1205 wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd);
1206 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1207 return commit_sync_fd;
1210 wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
1211 if (wl_egl_surface->commit_sync.fd == -1) {
1212 TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)",
1214 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1218 commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
1220 TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
1221 wl_egl_surface->commit_sync.fd, commit_sync_fd);
1222 TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)",
1223 wl_egl_surface, commit_sync_fd);
1225 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1227 return commit_sync_fd;
1231 __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
1233 TPL_ASSERT(private);
1234 TPL_ASSERT(wl_egl_window);
1236 int presentation_sync_fd = -1;
1238 struct tizen_private *tizen_private = (struct tizen_private *)private;
1239 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1241 if (!wl_egl_surface) {
1242 TPL_ERR("Invalid parameter. wl_egl_surface is NULL");
1246 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1247 if (wl_egl_surface->presentation_sync.fd != -1) {
1248 presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
1249 TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
1250 wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1251 TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
1252 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1253 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1254 return presentation_sync_fd;
1257 wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
1258 if (wl_egl_surface->presentation_sync.fd == -1) {
1259 TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)",
1261 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1265 presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
1266 TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
1267 wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1268 TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
1269 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1271 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1273 return presentation_sync_fd;
1275 /* -- END -- wl_egl_window tizen private callback functions */
1277 /* -- BEGIN -- tizen_surface_shm_flusher_listener */
1278 static void __cb_tss_flusher_flush_callback(void *data,
1279 struct tizen_surface_shm_flusher *tss_flusher)
1281 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1282 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1284 TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
1285 wl_egl_surface, wl_egl_surface->tbm_queue);
1287 _print_buffer_lists(wl_egl_surface);
1289 tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue);
1290 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1291 TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
1296 static void __cb_tss_flusher_free_flush_callback(void *data,
1297 struct tizen_surface_shm_flusher *tss_flusher)
1299 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1300 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1302 TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
1303 wl_egl_surface, wl_egl_surface->tbm_queue);
1305 _print_buffer_lists(wl_egl_surface);
1307 tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue);
1308 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1309 TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
1314 static const struct tizen_surface_shm_flusher_listener
1315 tss_flusher_listener = {
1316 __cb_tss_flusher_flush_callback,
1317 __cb_tss_flusher_free_flush_callback
1319 /* -- END -- tizen_surface_shm_flusher_listener */
1322 /* -- BEGIN -- tbm_surface_queue callback funstions */
1324 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1327 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1328 tpl_wl_egl_display_t *wl_egl_display = NULL;
1329 tpl_surface_t *surface = NULL;
1330 tpl_bool_t is_activated = TPL_FALSE;
1333 wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1334 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1336 wl_egl_display = wl_egl_surface->wl_egl_display;
1337 TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
1339 surface = wl_egl_surface->tpl_surface;
1340 TPL_CHECK_ON_NULL_RETURN(surface);
1342 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1343 * the changed window size at the next frame. */
1344 width = tbm_surface_queue_get_width(tbm_queue);
1345 height = tbm_surface_queue_get_height(tbm_queue);
1346 if (surface->width != width || surface->height != height) {
1347 TPL_INFO("[QUEUE_RESIZE]",
1348 "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1349 wl_egl_surface, tbm_queue,
1350 surface->width, surface->height, width, height);
1353 /* When queue_reset_callback is called, if is_activated is different from
1354 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1355 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1356 is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client,
1357 wl_egl_surface->tbm_queue);
1358 if (wl_egl_surface->is_activated != is_activated) {
1360 TPL_INFO("[ACTIVATED]",
1361 "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1362 wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
1364 TPL_LOG_T("[DEACTIVATED]",
1365 " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1366 wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
1370 wl_egl_surface->reset = TPL_TRUE;
1372 if (surface->reset_cb)
1373 surface->reset_cb(surface->reset_data);
1377 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1380 TPL_IGNORE(tbm_queue);
1382 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1383 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1385 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1387 tpl_gsource_send_message(wl_egl_surface->surf_source, 2);
1389 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1391 /* -- END -- tbm_surface_queue callback funstions */
1394 _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
1396 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1398 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1400 TPL_INFO("[SURFACE_FINI]",
1401 "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
1402 wl_egl_surface, wl_egl_surface->wl_egl_window,
1403 wl_egl_surface->wl_surface);
1405 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1407 if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) {
1408 while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) {
1409 struct pst_feedback *pst_feedback =
1410 (struct pst_feedback *)__tpl_list_pop_front(
1411 wl_egl_surface->presentation_feedbacks, NULL);
1413 _write_to_eventfd(pst_feedback->pst_sync_fd);
1414 close(pst_feedback->pst_sync_fd);
1415 pst_feedback->pst_sync_fd = -1;
1417 wp_presentation_feedback_destroy(pst_feedback->presentation_feedback);
1418 pst_feedback->presentation_feedback = NULL;
1424 __tpl_list_free(wl_egl_surface->presentation_feedbacks, NULL);
1425 wl_egl_surface->presentation_feedbacks = NULL;
1428 if (wl_egl_surface->presentation_sync.fd != -1) {
1429 _write_to_eventfd(wl_egl_surface->presentation_sync.fd);
1430 close(wl_egl_surface->presentation_sync.fd);
1431 wl_egl_surface->presentation_sync.fd = -1;
1434 if (wl_egl_surface->vblank_waiting_buffers) {
1435 __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL);
1436 wl_egl_surface->vblank_waiting_buffers = NULL;
1439 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1441 if (wl_egl_surface->surface_sync) {
1442 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1443 "wl_egl_surface(%p) surface_sync(%p)",
1444 wl_egl_surface, wl_egl_surface->surface_sync);
1445 zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync);
1446 wl_egl_surface->surface_sync = NULL;
1449 if (wl_egl_surface->tss_flusher) {
1450 TPL_INFO("[FLUSHER_DESTROY]",
1451 "wl_egl_surface(%p) tss_flusher(%p)",
1452 wl_egl_surface, wl_egl_surface->tss_flusher);
1453 tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher);
1454 wl_egl_surface->tss_flusher = NULL;
1457 if (wl_egl_surface->vblank) {
1458 TPL_INFO("[VBLANK_DESTROY]",
1459 "wl_egl_surface(%p) vblank(%p)",
1460 wl_egl_surface, wl_egl_surface->vblank);
1461 tdm_client_vblank_destroy(wl_egl_surface->vblank);
1462 wl_egl_surface->vblank = NULL;
1465 if (wl_egl_surface->tbm_queue) {
1466 TPL_INFO("[TBM_QUEUE_DESTROY]",
1467 "wl_egl_surface(%p) tbm_queue(%p)",
1468 wl_egl_surface, wl_egl_surface->tbm_queue);
1469 tbm_surface_queue_destroy(wl_egl_surface->tbm_queue);
1470 wl_egl_surface->tbm_queue = NULL;
1473 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1477 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1479 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1481 wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
1483 /* Initialize surface */
1485 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1486 TPL_DEBUG("wl_egl_surface(%p) initialize message received!",
1488 _thread_wl_egl_surface_init(wl_egl_surface);
1489 tpl_gcond_signal(&wl_egl_surface->surf_cond);
1490 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1491 } else if (message == 2) {
1492 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1493 TPL_DEBUG("wl_egl_surface(%p) acquirable message received!",
1495 _thread_surface_queue_acquire(wl_egl_surface);
1496 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1503 __thread_func_surf_finalize(tpl_gsource *gsource)
1505 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1507 wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
1508 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1510 _thread_wl_egl_surface_fini(wl_egl_surface);
1512 TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)",
1513 wl_egl_surface, gsource);
1516 static tpl_gsource_functions surf_funcs = {
1519 .dispatch = __thread_func_surf_dispatch,
1520 .finalize = __thread_func_surf_finalize,
1524 __tpl_wl_egl_surface_init(tpl_surface_t *surface)
1526 tpl_wl_egl_display_t *wl_egl_display = NULL;
1527 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1528 tpl_gsource *surf_source = NULL;
1530 struct wl_egl_window *wl_egl_window =
1531 (struct wl_egl_window *)surface->native_handle;
1533 TPL_ASSERT(surface);
1534 TPL_ASSERT(surface->display);
1535 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1536 TPL_ASSERT(surface->native_handle);
1539 (tpl_wl_egl_display_t *)surface->display->backend.data;
1540 if (!wl_egl_display) {
1541 TPL_ERR("Invalid parameter. wl_egl_display(%p)",
1543 return TPL_ERROR_INVALID_PARAMETER;
1546 wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1,
1547 sizeof(tpl_wl_egl_surface_t));
1548 if (!wl_egl_surface) {
1549 TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
1550 return TPL_ERROR_OUT_OF_MEMORY;
1553 surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface,
1554 -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1556 TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)",
1558 goto surf_source_create_fail;
1561 surface->backend.data = (void *)wl_egl_surface;
1562 surface->width = wl_egl_window->width;
1563 surface->height = wl_egl_window->height;
1564 surface->rotation = 0;
1566 wl_egl_surface->tpl_surface = surface;
1567 wl_egl_surface->width = wl_egl_window->width;
1568 wl_egl_surface->height = wl_egl_window->height;
1569 wl_egl_surface->format = surface->format;
1571 wl_egl_surface->surf_source = surf_source;
1572 wl_egl_surface->wl_egl_window = wl_egl_window;
1573 wl_egl_surface->wl_surface = wl_egl_window->surface;
1575 wl_egl_surface->wl_egl_display = wl_egl_display;
1577 wl_egl_surface->reset = TPL_FALSE;
1578 wl_egl_surface->is_activated = TPL_FALSE;
1579 wl_egl_surface->need_to_enqueue = TPL_TRUE;
1580 wl_egl_surface->prerotation_capability = TPL_FALSE;
1581 wl_egl_surface->vblank_done = TPL_TRUE;
1582 wl_egl_surface->use_render_done_fence = TPL_FALSE;
1583 wl_egl_surface->set_serial_is_used = TPL_FALSE;
1585 wl_egl_surface->latest_transform = 0;
1586 wl_egl_surface->render_done_cnt = 0;
1587 wl_egl_surface->serial = 0;
1589 wl_egl_surface->vblank = NULL;
1590 wl_egl_surface->tss_flusher = NULL;
1591 wl_egl_surface->surface_sync = NULL;
1593 wl_egl_surface->post_interval = surface->post_interval;
1595 wl_egl_surface->commit_sync.fd = -1;
1596 wl_egl_surface->presentation_sync.fd = -1;
1600 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1601 wl_egl_surface->buffers[i] = NULL;
1602 wl_egl_surface->buffer_cnt = 0;
1606 struct tizen_private *tizen_private = NULL;
1608 if (wl_egl_window->driver_private)
1609 tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
1611 tizen_private = tizen_private_create();
1612 wl_egl_window->driver_private = (void *)tizen_private;
1615 if (tizen_private) {
1616 tizen_private->data = (void *)wl_egl_surface;
1617 tizen_private->rotate_callback = (void *)__cb_rotate_callback;
1618 tizen_private->get_rotation_capability = (void *)
1619 __cb_get_rotation_capability;
1620 tizen_private->set_window_serial_callback = (void *)
1621 __cb_set_window_serial_callback;
1622 tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
1623 tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
1625 wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
1626 wl_egl_window->resize_callback = (void *)__cb_resize_callback;
1630 tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
1631 tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
1633 tpl_gmutex_init(&wl_egl_surface->buffers_mutex);
1635 tpl_gmutex_init(&wl_egl_surface->surf_mutex);
1636 tpl_gcond_init(&wl_egl_surface->surf_cond);
1638 /* Initialize in thread */
1639 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1640 tpl_gsource_send_message(wl_egl_surface->surf_source, 1);
1641 tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
1642 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1644 TPL_ASSERT(wl_egl_surface->tbm_queue);
1646 TPL_INFO("[SURFACE_INIT]",
1647 "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)",
1648 surface, wl_egl_surface, wl_egl_surface->surf_source);
1650 return TPL_ERROR_NONE;
1652 surf_source_create_fail:
1653 free(wl_egl_surface);
1654 surface->backend.data = NULL;
1655 return TPL_ERROR_INVALID_OPERATION;
1658 static tbm_surface_queue_h
1659 _thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface,
1660 struct wayland_tbm_client *wl_tbm_client,
1663 tbm_surface_queue_h tbm_queue = NULL;
1664 tbm_bufmgr bufmgr = NULL;
1665 unsigned int capability;
1667 struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
1668 int width = wl_egl_surface->width;
1669 int height = wl_egl_surface->height;
1670 int format = wl_egl_surface->format;
1672 if (!wl_tbm_client || !wl_surface) {
1673 TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)",
1674 wl_tbm_client, wl_surface);
1678 bufmgr = tbm_bufmgr_init(-1);
1679 capability = tbm_bufmgr_get_capability(bufmgr);
1680 tbm_bufmgr_deinit(bufmgr);
1682 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1683 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1691 tbm_queue = wayland_tbm_client_create_surface_queue(
1701 TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)",
1706 if (tbm_surface_queue_set_modes(
1707 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1708 TBM_SURFACE_QUEUE_ERROR_NONE) {
1709 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1711 tbm_surface_queue_destroy(tbm_queue);
1715 if (tbm_surface_queue_add_reset_cb(
1717 __cb_tbm_queue_reset_callback,
1718 (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1719 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1721 tbm_surface_queue_destroy(tbm_queue);
1725 if (tbm_surface_queue_add_acquirable_cb(
1727 __cb_tbm_queue_acquirable_callback,
1728 (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1729 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1731 tbm_surface_queue_destroy(tbm_queue);
1738 static tdm_client_vblank*
1739 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1741 tdm_client_vblank *vblank = NULL;
1742 tdm_client_output *tdm_output = NULL;
1743 tdm_error tdm_err = TDM_ERROR_NONE;
1746 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1750 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1751 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1752 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1756 vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1757 if (!vblank || tdm_err != TDM_ERROR_NONE) {
1758 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1762 tdm_client_vblank_set_enable_fake(vblank, 1);
1763 tdm_client_vblank_set_sync(vblank, 0);
1769 _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
1771 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1773 wl_egl_surface->tbm_queue = _thread_create_tbm_queue(
1775 wl_egl_display->wl_tbm_client,
1777 if (!wl_egl_surface->tbm_queue) {
1778 TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)",
1779 wl_egl_surface, wl_egl_display->wl_tbm_client);
1783 TPL_INFO("[QUEUE_CREATION]",
1784 "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)",
1785 wl_egl_surface, wl_egl_surface->wl_surface,
1786 wl_egl_display->wl_tbm_client);
1787 TPL_INFO("[QUEUE_CREATION]",
1788 "tbm_queue(%p) size(%d x %d) X %d format(%d)",
1789 wl_egl_surface->tbm_queue,
1790 wl_egl_surface->width,
1791 wl_egl_surface->height,
1793 wl_egl_surface->format);
1795 wl_egl_surface->vblank = _thread_create_tdm_client_vblank(
1796 wl_egl_display->tdm_client);
1797 if (wl_egl_surface->vblank) {
1798 TPL_INFO("[VBLANK_INIT]",
1799 "wl_egl_surface(%p) tdm_client(%p) vblank(%p)",
1800 wl_egl_surface, wl_egl_display->tdm_client,
1801 wl_egl_surface->vblank);
1804 if (wl_egl_display->tss) {
1805 wl_egl_surface->tss_flusher =
1806 tizen_surface_shm_get_flusher(wl_egl_display->tss,
1807 wl_egl_surface->wl_surface);
1810 if (wl_egl_surface->tss_flusher) {
1811 tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher,
1812 &tss_flusher_listener,
1814 TPL_INFO("[FLUSHER_INIT]",
1815 "wl_egl_surface(%p) tss_flusher(%p)",
1816 wl_egl_surface, wl_egl_surface->tss_flusher);
1819 if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) {
1820 wl_egl_surface->surface_sync =
1821 zwp_linux_explicit_synchronization_v1_get_synchronization(
1822 wl_egl_display->explicit_sync, wl_egl_surface->wl_surface);
1823 if (wl_egl_surface->surface_sync) {
1824 TPL_INFO("[EXPLICIT_SYNC_INIT]",
1825 "wl_egl_surface(%p) surface_sync(%p)",
1826 wl_egl_surface, wl_egl_surface->surface_sync);
1828 TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)",
1830 wl_egl_display->use_explicit_sync = TPL_FALSE;
1834 wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc();
1835 wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
1839 _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
1841 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1842 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1843 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
1844 tpl_bool_t need_to_release = TPL_FALSE;
1845 tpl_bool_t need_to_cancel = TPL_FALSE;
1846 buffer_status_t status = RELEASED;
1849 while (wl_egl_surface->buffer_cnt) {
1850 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
1851 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
1852 wl_egl_buffer = wl_egl_surface->buffers[idx];
1854 if (wl_egl_buffer) {
1855 wl_egl_surface->buffers[idx] = NULL;
1856 wl_egl_surface->buffer_cnt--;
1858 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
1859 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
1864 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
1866 tpl_gmutex_lock(&wl_egl_buffer->mutex);
1868 status = wl_egl_buffer->status;
1870 TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
1872 wl_egl_buffer->tbm_surface,
1873 status_to_string[status]);
1875 if (status >= ENQUEUED) {
1876 tpl_bool_t need_to_wait = TPL_FALSE;
1877 tpl_result_t wait_result = TPL_ERROR_NONE;
1879 if (!wl_egl_display->use_explicit_sync &&
1880 status < WAITING_VBLANK)
1881 need_to_wait = TPL_TRUE;
1883 if (wl_egl_display->use_explicit_sync &&
1885 need_to_wait = TPL_TRUE;
1888 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
1889 wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
1890 &wl_egl_buffer->mutex,
1892 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
1894 status = wl_egl_buffer->status;
1896 if (wait_result == TPL_ERROR_TIME_OUT)
1897 TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
1902 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1903 /* It has been acquired but has not yet been released, so this
1904 * buffer must be released. */
1905 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
1907 /* After dequeue, it has not been enqueued yet
1908 * so cancel_dequeue must be performed. */
1909 need_to_cancel = (status == DEQUEUED);
1911 if (need_to_release) {
1912 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
1913 wl_egl_buffer->tbm_surface);
1914 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1915 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1916 wl_egl_buffer->tbm_surface, tsq_err);
1919 if (need_to_cancel) {
1920 tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
1921 wl_egl_buffer->tbm_surface);
1922 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1923 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1924 wl_egl_buffer->tbm_surface, tsq_err);
1927 wl_egl_buffer->status = RELEASED;
1929 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
1931 if (need_to_release || need_to_cancel)
1932 tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
1934 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
1941 __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
1943 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1944 tpl_wl_egl_display_t *wl_egl_display = NULL;
1946 TPL_ASSERT(surface);
1947 TPL_ASSERT(surface->display);
1949 TPL_CHECK_ON_FALSE_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW);
1951 wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data;
1952 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1954 wl_egl_display = wl_egl_surface->wl_egl_display;
1955 TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
1957 TPL_INFO("[SURFACE_FINI][BEGIN]",
1958 "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1960 wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
1962 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1963 _tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
1964 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1966 if (wl_egl_surface->surf_source)
1967 tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
1968 wl_egl_surface->surf_source = NULL;
1970 _print_buffer_lists(wl_egl_surface);
1972 if (wl_egl_surface->wl_egl_window) {
1973 struct tizen_private *tizen_private = NULL;
1974 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
1975 TPL_INFO("[WL_EGL_WINDOW_FINI]",
1976 "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
1977 wl_egl_surface, wl_egl_window,
1978 wl_egl_surface->wl_surface);
1979 tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
1980 if (tizen_private) {
1981 tizen_private->set_window_serial_callback = NULL;
1982 tizen_private->rotate_callback = NULL;
1983 tizen_private->get_rotation_capability = NULL;
1984 tizen_private->create_presentation_sync_fd = NULL;
1985 tizen_private->create_commit_sync_fd = NULL;
1986 tizen_private->set_frontbuffer_callback = NULL;
1987 tizen_private->merge_sync_fds = NULL;
1988 tizen_private->data = NULL;
1989 free(tizen_private);
1991 wl_egl_window->driver_private = NULL;
1994 wl_egl_window->destroy_window_callback = NULL;
1995 wl_egl_window->resize_callback = NULL;
1997 wl_egl_surface->wl_egl_window = NULL;
2000 wl_egl_surface->wl_surface = NULL;
2001 wl_egl_surface->wl_egl_display = NULL;
2002 wl_egl_surface->tpl_surface = NULL;
2004 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
2005 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
2006 tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex);
2008 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2009 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2010 tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex);
2012 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2013 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2014 tpl_gmutex_clear(&wl_egl_surface->surf_mutex);
2015 tpl_gcond_clear(&wl_egl_surface->surf_cond);
2017 TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface);
2019 free(wl_egl_surface);
2020 surface->backend.data = NULL;
2024 __tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
2027 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
2029 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
2031 wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
2033 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
2035 TPL_INFO("[SET_PREROTATION_CAPABILITY]",
2036 "wl_egl_surface(%p) prerotation capability set to [%s]",
2037 wl_egl_surface, (set ? "TRUE" : "FALSE"));
2039 wl_egl_surface->prerotation_capability = set;
2040 return TPL_ERROR_NONE;
2044 __tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
2047 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
2049 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
2051 wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
2053 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
2055 TPL_INFO("[SET_POST_INTERVAL]",
2056 "wl_egl_surface(%p) post_interval(%d -> %d)",
2057 wl_egl_surface, wl_egl_surface->post_interval, post_interval);
2059 wl_egl_surface->post_interval = post_interval;
2061 return TPL_ERROR_NONE;
2065 __tpl_wl_egl_surface_validate(tpl_surface_t *surface)
2067 tpl_bool_t retval = TPL_TRUE;
2069 TPL_ASSERT(surface);
2070 TPL_ASSERT(surface->backend.data);
2072 tpl_wl_egl_surface_t *wl_egl_surface =
2073 (tpl_wl_egl_surface_t *)surface->backend.data;
2075 retval = !(wl_egl_surface->reset);
2081 __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
2083 tpl_wl_egl_surface_t *wl_egl_surface =
2084 (tpl_wl_egl_surface_t *)surface->backend.data;
2087 *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
2089 *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
2092 #define CAN_DEQUEUE_TIMEOUT_MS 10000
2095 _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
2097 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2099 _print_buffer_lists(wl_egl_surface);
2101 if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue))
2102 != TBM_SURFACE_QUEUE_ERROR_NONE) {
2103 TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
2104 wl_egl_surface->tbm_queue, tsq_err);
2105 return TPL_ERROR_INVALID_OPERATION;
2110 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2111 for (i = 0; i < BUFFER_ARRAY_SIZE; i++) {
2112 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
2113 wl_egl_buffer = wl_egl_surface->buffers[i];
2114 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
2115 if (wl_egl_buffer && wl_egl_buffer->status == COMMITTED) {
2116 wl_egl_buffer->status = RELEASED;
2117 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2118 wl_egl_buffer->tbm_surface);
2119 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2120 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
2121 wl_egl_buffer->tbm_surface, tsq_err);
2122 tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
2127 TPL_INFO("[FORCE_FLUSH]",
2128 "wl_egl_surface(%p) tbm_queue(%p)",
2129 wl_egl_surface, wl_egl_surface->tbm_queue);
2131 return TPL_ERROR_NONE;
2135 _wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer,
2136 tpl_wl_egl_surface_t *wl_egl_surface)
2138 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2139 struct tizen_private *tizen_private =
2140 (struct tizen_private *)wl_egl_window->driver_private;
2142 TPL_ASSERT(tizen_private);
2144 wl_egl_buffer->draw_done = TPL_FALSE;
2145 wl_egl_buffer->need_to_commit = TPL_TRUE;
2146 wl_egl_buffer->buffer_release = NULL;
2147 wl_egl_buffer->transform = tizen_private->transform;
2149 if (wl_egl_buffer->w_transform != tizen_private->window_transform) {
2150 wl_egl_buffer->w_transform = tizen_private->window_transform;
2151 wl_egl_buffer->w_rotated = TPL_TRUE;
2154 if (wl_egl_surface->set_serial_is_used) {
2155 wl_egl_buffer->serial = wl_egl_surface->serial;
2157 wl_egl_buffer->serial = ++tizen_private->serial;
2160 if (wl_egl_buffer->rects) {
2161 free(wl_egl_buffer->rects);
2162 wl_egl_buffer->rects = NULL;
2163 wl_egl_buffer->num_rects = 0;
2167 static tpl_wl_egl_buffer_t *
2168 _get_wl_egl_buffer(tbm_surface_h tbm_surface)
2170 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2171 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2172 (void **)&wl_egl_buffer);
2173 return wl_egl_buffer;
2176 static tpl_wl_egl_buffer_t *
2177 _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
2178 tbm_surface_h tbm_surface)
2180 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2181 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2183 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2185 if (!wl_egl_buffer) {
2186 wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t));
2187 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL);
2189 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2190 (tbm_data_free)__cb_wl_egl_buffer_free);
2191 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2194 wl_egl_buffer->wl_buffer = NULL;
2195 wl_egl_buffer->tbm_surface = tbm_surface;
2196 wl_egl_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
2197 wl_egl_buffer->wl_egl_surface = wl_egl_surface;
2199 wl_egl_buffer->status = RELEASED;
2201 wl_egl_buffer->acquire_fence_fd = -1;
2202 wl_egl_buffer->commit_sync_fd = -1;
2203 wl_egl_buffer->presentation_sync_fd = -1;
2204 wl_egl_buffer->release_fence_fd = -1;
2206 wl_egl_buffer->dx = wl_egl_window->dx;
2207 wl_egl_buffer->dy = wl_egl_window->dy;
2208 wl_egl_buffer->width = tbm_surface_get_width(tbm_surface);
2209 wl_egl_buffer->height = tbm_surface_get_height(tbm_surface);
2211 tpl_gmutex_init(&wl_egl_buffer->mutex);
2212 tpl_gcond_init(&wl_egl_buffer->cond);
2214 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
2217 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2218 if (wl_egl_surface->buffers[i] == NULL) break;
2220 /* If this exception is reached,
2221 * it may be a critical memory leak problem. */
2222 if (i == BUFFER_ARRAY_SIZE) {
2223 tpl_wl_egl_buffer_t *evicted_buffer = NULL;
2224 int evicted_idx = 0; /* evict the frontmost buffer */
2226 evicted_buffer = wl_egl_surface->buffers[evicted_idx];
2228 TPL_WARN("wl_egl_surface(%p) buffers array is full. evict one.",
2230 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2231 evicted_buffer, evicted_buffer->tbm_surface,
2232 status_to_string[evicted_buffer->status]);
2234 /* [TODO] need to think about whether there will be
2235 * better modifications */
2236 wl_egl_surface->buffer_cnt--;
2237 wl_egl_surface->buffers[evicted_idx] = NULL;
2242 wl_egl_surface->buffer_cnt++;
2243 wl_egl_surface->buffers[i] = wl_egl_buffer;
2244 wl_egl_buffer->idx = i;
2246 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
2248 TPL_INFO("[WL_EGL_BUFFER_CREATE]",
2249 "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2250 wl_egl_surface, wl_egl_buffer, tbm_surface,
2251 wl_egl_buffer->bo_name);
2254 _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface);
2256 return wl_egl_buffer;
2259 static tbm_surface_h
2260 __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
2261 int32_t *release_fence)
2263 TPL_ASSERT(surface);
2264 TPL_ASSERT(surface->backend.data);
2265 TPL_ASSERT(surface->display);
2266 TPL_ASSERT(surface->display->backend.data);
2267 TPL_OBJECT_CHECK_RETURN(surface, NULL);
2269 tpl_wl_egl_surface_t *wl_egl_surface =
2270 (tpl_wl_egl_surface_t *)surface->backend.data;
2271 tpl_wl_egl_display_t *wl_egl_display =
2272 (tpl_wl_egl_display_t *)surface->display->backend.data;
2273 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2275 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2276 tpl_bool_t is_activated = 0;
2278 tbm_surface_h tbm_surface = NULL;
2280 TPL_OBJECT_UNLOCK(surface);
2281 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2282 wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
2283 TPL_OBJECT_LOCK(surface);
2285 /* After the can dequeue state, lock the wl_event_mutex to prevent other
2286 * events from being processed in wayland_egl_thread
2287 * during below dequeue procedure. */
2288 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
2290 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2291 TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
2292 wl_egl_surface->tbm_queue, surface);
2293 if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
2294 TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
2295 wl_egl_surface->tbm_queue, surface);
2296 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2299 tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2303 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2304 TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
2305 wl_egl_surface->tbm_queue, surface);
2306 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2310 /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
2311 * below function [wayland_tbm_client_queue_check_activate()].
2312 * This function has to be called before tbm_surface_queue_dequeue()
2313 * in order to know what state the buffer will be dequeued next.
2315 * ACTIVATED state means non-composite mode. Client can get buffers which
2316 can be displayed directly(without compositing).
2317 * DEACTIVATED state means composite mode. Client's buffer will be displayed
2318 by compositor(E20) with compositing.
2320 is_activated = wayland_tbm_client_queue_check_activate(
2321 wl_egl_display->wl_tbm_client,
2322 wl_egl_surface->tbm_queue);
2324 wl_egl_surface->is_activated = is_activated;
2326 surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
2327 surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
2328 wl_egl_surface->width = surface->width;
2329 wl_egl_surface->height = surface->height;
2331 if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
2332 /* If surface->frontbuffer is already set in frontbuffer mode,
2333 * it will return that frontbuffer if it is still activated,
2334 * otherwise dequeue the new buffer after initializing
2335 * surface->frontbuffer to NULL. */
2336 if (is_activated && !wl_egl_surface->reset) {
2337 bo_name = _get_tbm_surface_bo_name(surface->frontbuffer);
2340 "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
2341 surface->frontbuffer, bo_name);
2342 TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
2343 "[DEQ]~[ENQ] BO_NAME:%d",
2345 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2346 return surface->frontbuffer;
2348 surface->frontbuffer = NULL;
2349 wl_egl_surface->need_to_enqueue = TPL_TRUE;
2352 surface->frontbuffer = NULL;
2355 tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue,
2358 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d",
2359 wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err);
2360 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2364 tbm_surface_internal_ref(tbm_surface);
2366 wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
2367 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
2369 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2370 wl_egl_buffer->status = DEQUEUED;
2372 /* If wl_egl_buffer->release_fence_fd is -1,
2373 * the tbm_surface can be used immediately.
2374 * If not, user(EGL) have to wait until signaled. */
2375 if (release_fence) {
2376 if (wl_egl_surface->surface_sync) {
2377 *release_fence = wl_egl_buffer->release_fence_fd;
2378 TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)",
2379 wl_egl_surface, wl_egl_buffer, *release_fence);
2381 wl_egl_buffer->release_fence_fd = -1;
2383 *release_fence = -1;
2387 if (surface->is_frontbuffer_mode && is_activated)
2388 surface->frontbuffer = tbm_surface;
2390 wl_egl_surface->reset = TPL_FALSE;
2392 TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name);
2393 TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d",
2394 wl_egl_buffer->bo_name);
2395 TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2396 wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name,
2397 release_fence ? *release_fence : -1);
2399 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2400 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2406 __tpl_wl_egl_surface_cancel_buffer(tpl_surface_t *surface,
2407 tbm_surface_h tbm_surface)
2409 TPL_ASSERT(surface);
2410 TPL_ASSERT(surface->backend.data);
2412 tpl_wl_egl_surface_t *wl_egl_surface =
2413 (tpl_wl_egl_surface_t *)surface->backend.data;
2414 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2415 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2417 if (!tbm_surface_internal_is_valid(tbm_surface)) {
2418 TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
2419 return TPL_ERROR_INVALID_PARAMETER;
2422 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2423 if (wl_egl_buffer) {
2424 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2425 wl_egl_buffer->status = RELEASED;
2426 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2429 tbm_surface_internal_unref(tbm_surface);
2431 tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
2433 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2434 TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
2435 tbm_surface, surface);
2436 return TPL_ERROR_INVALID_OPERATION;
2439 TPL_INFO("[CANCEL_BUFFER]", "wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
2440 wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
2442 return TPL_ERROR_NONE;
2446 __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
2447 tbm_surface_h tbm_surface,
2448 int num_rects, const int *rects, int32_t acquire_fence)
2450 TPL_ASSERT(surface);
2451 TPL_ASSERT(surface->display);
2452 TPL_ASSERT(surface->backend.data);
2453 TPL_ASSERT(tbm_surface);
2454 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2456 tpl_wl_egl_surface_t *wl_egl_surface =
2457 (tpl_wl_egl_surface_t *) surface->backend.data;
2458 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2459 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2462 if (!tbm_surface_internal_is_valid(tbm_surface)) {
2463 TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
2465 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2466 return TPL_ERROR_INVALID_PARAMETER;
2469 bo_name = _get_tbm_surface_bo_name(tbm_surface);
2471 TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
2473 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2475 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2477 /* If there are received region information, save it to wl_egl_buffer */
2478 if (num_rects && rects) {
2479 if (wl_egl_buffer->rects != NULL) {
2480 free(wl_egl_buffer->rects);
2481 wl_egl_buffer->rects = NULL;
2482 wl_egl_buffer->num_rects = 0;
2485 wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2486 wl_egl_buffer->num_rects = num_rects;
2488 if (!wl_egl_buffer->rects) {
2489 TPL_ERR("Failed to allocate memory fo damage rects info.");
2490 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2491 return TPL_ERROR_OUT_OF_MEMORY;
2494 memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects);
2497 if (!wl_egl_surface->need_to_enqueue ||
2498 !wl_egl_buffer->need_to_commit) {
2499 TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
2500 ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
2501 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2502 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2503 return TPL_ERROR_NONE;
2506 /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
2507 * commit if surface->frontbuffer that is already set and the tbm_surface
2508 * client want to enqueue are the same.
2510 if (surface->is_frontbuffer_mode) {
2511 /* The first buffer to be activated in frontbuffer mode must be
2512 * committed. Subsequence frames do not need to be committed because
2513 * the buffer is already displayed.
2515 if (surface->frontbuffer == tbm_surface)
2516 wl_egl_surface->need_to_enqueue = TPL_FALSE;
2518 if (acquire_fence != -1) {
2519 close(acquire_fence);
2524 if (wl_egl_buffer->acquire_fence_fd != -1)
2525 close(wl_egl_buffer->acquire_fence_fd);
2527 wl_egl_buffer->acquire_fence_fd = acquire_fence;
2529 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2530 if (wl_egl_surface->presentation_sync.fd != -1) {
2531 wl_egl_buffer->presentation_sync_fd = wl_egl_surface->presentation_sync.fd;
2532 wl_egl_surface->presentation_sync.fd = -1;
2534 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2536 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
2537 if (wl_egl_surface->commit_sync.fd != -1) {
2538 wl_egl_buffer->commit_sync_fd = wl_egl_surface->commit_sync.fd;
2539 wl_egl_surface->commit_sync.fd = -1;
2540 TRACE_ASYNC_BEGIN(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
2541 _get_tbm_surface_bo_name(tbm_surface));
2543 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
2545 wl_egl_buffer->status = ENQUEUED;
2547 "[ENQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2548 wl_egl_buffer, tbm_surface, bo_name, acquire_fence);
2550 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2552 tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
2554 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2555 tbm_surface_internal_unref(tbm_surface);
2556 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d",
2557 tbm_surface, wl_egl_surface, tsq_err);
2558 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2559 return TPL_ERROR_INVALID_OPERATION;
2562 tbm_surface_internal_unref(tbm_surface);
2564 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2566 return TPL_ERROR_NONE;
2570 __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message)
2572 tpl_wl_egl_buffer_t *wl_egl_buffer =
2573 (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
2574 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2575 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2576 tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
2578 wl_egl_surface->render_done_cnt++;
2580 TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
2581 wl_egl_buffer->acquire_fence_fd);
2583 TPL_DEBUG("[RENDER DONE] wl_egl_buffer(%p) tbm_surface(%p)",
2584 wl_egl_buffer, tbm_surface);
2586 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2587 wl_egl_buffer->status = WAITING_VBLANK;
2588 tpl_gcond_signal(&wl_egl_buffer->cond);
2589 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2591 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2593 if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done)
2594 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2596 __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers,
2599 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2605 __thread_func_waiting_source_finalize(tpl_gsource *gsource)
2607 tpl_wl_egl_buffer_t *wl_egl_buffer =
2608 (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
2610 TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
2611 wl_egl_buffer, wl_egl_buffer->waiting_source,
2612 wl_egl_buffer->acquire_fence_fd);
2614 close(wl_egl_buffer->acquire_fence_fd);
2615 wl_egl_buffer->acquire_fence_fd = -1;
2616 wl_egl_buffer->waiting_source = NULL;
2619 static tpl_gsource_functions buffer_funcs = {
2622 .dispatch = __thread_func_waiting_source_dispatch,
2623 .finalize = __thread_func_waiting_source_finalize,
2627 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
2629 tbm_surface_h tbm_surface = NULL;
2630 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2631 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2632 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2633 tpl_bool_t ready_to_commit = TPL_FALSE;
2635 while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) {
2636 tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue,
2638 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2639 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2640 wl_egl_surface->tbm_queue);
2641 return TPL_ERROR_INVALID_OPERATION;
2644 tbm_surface_internal_ref(tbm_surface);
2646 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2647 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
2648 "wl_egl_buffer sould be not NULL");
2650 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2652 wl_egl_buffer->status = ACQUIRED;
2654 TPL_LOG_T("WL_EGL", "[ACQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2655 wl_egl_buffer, tbm_surface,
2656 _get_tbm_surface_bo_name(tbm_surface));
2658 if (wl_egl_buffer->wl_buffer == NULL) {
2659 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2660 wl_egl_buffer->wl_buffer =
2661 (struct wl_proxy *)wayland_tbm_client_create_buffer(
2662 wl_egl_display->wl_tbm_client, tbm_surface);
2664 if (!wl_egl_buffer->wl_buffer) {
2665 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2666 wl_egl_display->wl_tbm_client, tbm_surface);
2669 "[WL_BUFFER_CREATE] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2670 wl_egl_buffer, wl_egl_buffer->wl_buffer, tbm_surface);
2674 if (wl_egl_buffer->acquire_fence_fd != -1) {
2675 if (wl_egl_surface->surface_sync)
2676 ready_to_commit = TPL_TRUE;
2678 if (wl_egl_buffer->waiting_source) {
2679 tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
2680 wl_egl_buffer->waiting_source = NULL;
2683 wl_egl_buffer->waiting_source =
2684 tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
2685 wl_egl_buffer->acquire_fence_fd, &buffer_funcs,
2686 SOURCE_TYPE_DISPOSABLE);
2687 wl_egl_buffer->status = WAITING_SIGNALED;
2689 TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
2690 wl_egl_buffer->acquire_fence_fd);
2692 ready_to_commit = TPL_FALSE;
2695 ready_to_commit = TPL_TRUE;
2698 if (ready_to_commit) {
2699 if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done)
2700 ready_to_commit = TPL_TRUE;
2702 wl_egl_buffer->status = WAITING_VBLANK;
2703 __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, wl_egl_buffer);
2704 ready_to_commit = TPL_FALSE;
2708 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2710 if (ready_to_commit)
2711 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2714 return TPL_ERROR_NONE;
2717 /* -- BEGIN -- tdm_client vblank callback function */
2719 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2720 unsigned int sequence, unsigned int tv_sec,
2721 unsigned int tv_usec, void *user_data)
2723 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data;
2724 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2726 TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK");
2727 TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface);
2729 if (error == TDM_ERROR_TIMEOUT)
2730 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)",
2733 wl_egl_surface->vblank_done = TPL_TRUE;
2735 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2736 wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(
2737 wl_egl_surface->vblank_waiting_buffers,
2740 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2741 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2743 /* -- END -- tdm_client vblank callback function */
2746 __cb_buffer_fenced_release(void *data,
2747 struct zwp_linux_buffer_release_v1 *release, int32_t fence)
2749 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
2750 tbm_surface_h tbm_surface = NULL;
2752 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
2754 tbm_surface = wl_egl_buffer->tbm_surface;
2756 if (tbm_surface_internal_is_valid(tbm_surface)) {
2758 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2759 if (wl_egl_buffer->status == COMMITTED) {
2760 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2761 tbm_surface_queue_error_e tsq_err;
2763 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
2764 wl_egl_buffer->buffer_release = NULL;
2766 wl_egl_buffer->release_fence_fd = fence;
2767 wl_egl_buffer->status = RELEASED;
2769 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2770 _get_tbm_surface_bo_name(tbm_surface),
2772 TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
2773 _get_tbm_surface_bo_name(tbm_surface));
2776 "[FENCED_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2777 wl_egl_buffer, tbm_surface,
2778 _get_tbm_surface_bo_name(tbm_surface),
2781 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2783 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2784 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2786 tbm_surface_internal_unref(tbm_surface);
2789 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2792 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2797 __cb_buffer_immediate_release(void *data,
2798 struct zwp_linux_buffer_release_v1 *release)
2800 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
2801 tbm_surface_h tbm_surface = NULL;
2803 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
2805 tbm_surface = wl_egl_buffer->tbm_surface;
2807 if (tbm_surface_internal_is_valid(tbm_surface)) {
2809 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2810 if (wl_egl_buffer->status == COMMITTED) {
2811 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2812 tbm_surface_queue_error_e tsq_err;
2814 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
2815 wl_egl_buffer->buffer_release = NULL;
2817 wl_egl_buffer->release_fence_fd = -1;
2818 wl_egl_buffer->status = RELEASED;
2820 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2821 _get_tbm_surface_bo_name(tbm_surface));
2822 TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
2823 _get_tbm_surface_bo_name(tbm_surface));
2826 "[IMMEDIATE_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2827 wl_egl_buffer, tbm_surface,
2828 _get_tbm_surface_bo_name(tbm_surface));
2830 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2832 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2833 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2835 tbm_surface_internal_unref(tbm_surface);
2838 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2841 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2845 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2846 __cb_buffer_fenced_release,
2847 __cb_buffer_immediate_release,
2851 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2853 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
2854 tbm_surface_h tbm_surface = NULL;
2856 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer)
2858 tbm_surface = wl_egl_buffer->tbm_surface;
2860 if (tbm_surface_internal_is_valid(tbm_surface)) {
2861 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2862 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2864 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2866 if (wl_egl_buffer->status == COMMITTED) {
2868 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2870 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2871 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2873 wl_egl_buffer->status = RELEASED;
2875 TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
2876 TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
2877 _get_tbm_surface_bo_name(tbm_surface));
2879 TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2880 wl_egl_buffer->wl_buffer, tbm_surface,
2881 _get_tbm_surface_bo_name(tbm_surface));
2884 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2886 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
2887 tbm_surface_internal_unref(tbm_surface);
2889 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2893 static const struct wl_buffer_listener wl_buffer_release_listener = {
2894 (void *)__cb_wl_buffer_release,
2898 __cb_presentation_feedback_sync_output(void *data,
2899 struct wp_presentation_feedback *presentation_feedback,
2900 struct wl_output *output)
2903 TPL_IGNORE(presentation_feedback);
2909 __cb_presentation_feedback_presented(void *data,
2910 struct wp_presentation_feedback *presentation_feedback,
2914 uint32_t refresh_nsec,
2919 TPL_IGNORE(tv_sec_hi);
2920 TPL_IGNORE(tv_sec_lo);
2921 TPL_IGNORE(tv_nsec);
2922 TPL_IGNORE(refresh_nsec);
2927 struct pst_feedback *pst_feedback = (struct pst_feedback *)data;
2928 tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface;
2930 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2932 TPL_DEBUG("[FEEDBACK][PRESENTED] pst_feedback(%p) presentation_feedback(%p) bo(%d)",
2933 pst_feedback, presentation_feedback, pst_feedback->bo_name);
2935 if (pst_feedback->pst_sync_fd != -1) {
2936 int ret = _write_to_eventfd(pst_feedback->pst_sync_fd);
2938 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
2939 pst_feedback->pst_sync_fd);
2942 TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
2943 "[PRESENTATION_SYNC] bo(%d)",
2944 pst_feedback->bo_name);
2946 close(pst_feedback->pst_sync_fd);
2947 pst_feedback->pst_sync_fd = -1;
2950 wp_presentation_feedback_destroy(presentation_feedback);
2952 pst_feedback->presentation_feedback = NULL;
2953 pst_feedback->wl_egl_surface = NULL;
2954 pst_feedback->bo_name = 0;
2956 __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
2961 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2965 __cb_presentation_feedback_discarded(void *data,
2966 struct wp_presentation_feedback *presentation_feedback)
2968 struct pst_feedback *pst_feedback = (struct pst_feedback *)data;
2969 tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface;
2971 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2973 TPL_DEBUG("[FEEDBACK][DISCARDED] pst_feedback(%p) presentation_feedback(%p) bo(%d)",
2974 pst_feedback, presentation_feedback, pst_feedback->bo_name);
2976 if (pst_feedback->pst_sync_fd != -1) {
2977 int ret = _write_to_eventfd(pst_feedback->pst_sync_fd);
2979 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
2980 pst_feedback->pst_sync_fd);
2983 TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
2984 "[PRESENTATION_SYNC] bo(%d)",
2985 pst_feedback->bo_name);
2987 close(pst_feedback->pst_sync_fd);
2988 pst_feedback->pst_sync_fd = -1;
2991 wp_presentation_feedback_destroy(presentation_feedback);
2993 pst_feedback->presentation_feedback = NULL;
2994 pst_feedback->wl_egl_surface = NULL;
2995 pst_feedback->bo_name = 0;
2997 __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
3002 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3005 static const struct wp_presentation_feedback_listener feedback_listener = {
3006 __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
3007 __cb_presentation_feedback_presented,
3008 __cb_presentation_feedback_discarded
3012 _thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface)
3014 tdm_error tdm_err = TDM_ERROR_NONE;
3015 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3017 if (wl_egl_surface->vblank == NULL) {
3018 wl_egl_surface->vblank =
3019 _thread_create_tdm_client_vblank(wl_egl_display->tdm_client);
3020 if (!wl_egl_surface->vblank) {
3021 TPL_WARN("Failed to create vblank. wl_egl_surface(%p)",
3023 return TPL_ERROR_OUT_OF_MEMORY;
3027 tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank,
3028 wl_egl_surface->post_interval,
3029 __cb_tdm_client_vblank,
3030 (void *)wl_egl_surface);
3032 if (tdm_err == TDM_ERROR_NONE) {
3033 wl_egl_surface->vblank_done = TPL_FALSE;
3034 TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK");
3036 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
3037 return TPL_ERROR_INVALID_OPERATION;
3040 return TPL_ERROR_NONE;
3044 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
3045 tpl_wl_egl_buffer_t *wl_egl_buffer)
3047 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3048 struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
3049 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
3052 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
3053 "wl_egl_buffer sould be not NULL");
3055 if (wl_egl_buffer->wl_buffer == NULL) {
3056 wl_egl_buffer->wl_buffer =
3057 (struct wl_proxy *)wayland_tbm_client_create_buffer(
3058 wl_egl_display->wl_tbm_client,
3059 wl_egl_buffer->tbm_surface);
3061 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
3062 "[FATAL] Failed to create wl_buffer");
3064 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
3066 /* create presentation feedback and add listener */
3067 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
3068 if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) {
3070 struct pst_feedback *pst_feedback = NULL;
3071 pst_feedback = (struct pst_feedback *) calloc(1, sizeof(struct pst_feedback));
3073 pst_feedback->presentation_feedback =
3074 wp_presentation_feedback(wl_egl_display->presentation,
3077 pst_feedback->wl_egl_surface = wl_egl_surface;
3078 pst_feedback->bo_name = wl_egl_buffer->bo_name;
3080 pst_feedback->pst_sync_fd = wl_egl_buffer->presentation_sync_fd;
3081 wl_egl_buffer->presentation_sync_fd = -1;
3083 wp_presentation_feedback_add_listener(pst_feedback->presentation_feedback,
3084 &feedback_listener, pst_feedback);
3085 __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, pst_feedback);
3086 TRACE_ASYNC_BEGIN(pst_feedback->pst_sync_fd,
3087 "[PRESENTATION_SYNC] bo(%d)",
3088 pst_feedback->bo_name);
3090 TPL_ERR("Failed to create presentation feedback. wl_egl_buffer(%p)",
3092 _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
3093 close(wl_egl_buffer->presentation_sync_fd);
3094 wl_egl_buffer->presentation_sync_fd = -1;
3097 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3099 if (wl_egl_buffer->w_rotated == TPL_TRUE) {
3100 wayland_tbm_client_set_buffer_transform(
3101 wl_egl_display->wl_tbm_client,
3102 (void *)wl_egl_buffer->wl_buffer,
3103 wl_egl_buffer->w_transform);
3104 wl_egl_buffer->w_rotated = TPL_FALSE;
3107 if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) {
3108 wl_egl_surface->latest_transform = wl_egl_buffer->transform;
3109 wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
3112 if (wl_egl_window) {
3113 wl_egl_window->attached_width = wl_egl_buffer->width;
3114 wl_egl_window->attached_height = wl_egl_buffer->height;
3117 wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer,
3118 wl_egl_buffer->dx, wl_egl_buffer->dy);
3120 if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) {
3122 wl_surface_damage(wl_surface,
3123 wl_egl_buffer->dx, wl_egl_buffer->dy,
3124 wl_egl_buffer->width, wl_egl_buffer->height);
3126 wl_surface_damage_buffer(wl_surface,
3128 wl_egl_buffer->width, wl_egl_buffer->height);
3132 for (i = 0; i < wl_egl_buffer->num_rects; i++) {
3134 wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] +
3135 wl_egl_buffer->rects[i * 4 + 3]);
3137 wl_surface_damage(wl_surface,
3138 wl_egl_buffer->rects[i * 4 + 0],
3140 wl_egl_buffer->rects[i * 4 + 2],
3141 wl_egl_buffer->rects[i * 4 + 3]);
3143 wl_surface_damage_buffer(wl_surface,
3144 wl_egl_buffer->rects[i * 4 + 0],
3146 wl_egl_buffer->rects[i * 4 + 2],
3147 wl_egl_buffer->rects[i * 4 + 3]);
3152 wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client,
3153 (void *)wl_egl_buffer->wl_buffer,
3154 wl_egl_buffer->serial);
3156 if (wl_egl_display->use_explicit_sync &&
3157 wl_egl_surface->surface_sync) {
3159 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync,
3160 wl_egl_buffer->acquire_fence_fd);
3161 TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)",
3162 wl_egl_surface, wl_egl_buffer->tbm_surface, wl_egl_buffer->acquire_fence_fd);
3163 close(wl_egl_buffer->acquire_fence_fd);
3164 wl_egl_buffer->acquire_fence_fd = -1;
3166 wl_egl_buffer->buffer_release =
3167 zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync);
3168 if (!wl_egl_buffer->buffer_release) {
3169 TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface);
3171 zwp_linux_buffer_release_v1_add_listener(
3172 wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer);
3173 TPL_DEBUG("add explicit_sync_release_listener.");
3176 wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer,
3177 &wl_buffer_release_listener, wl_egl_buffer);
3180 wl_surface_commit(wl_surface);
3182 wl_display_flush(wl_egl_display->wl_display);
3184 TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3185 wl_egl_buffer->bo_name);
3187 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3189 wl_egl_buffer->need_to_commit = TPL_FALSE;
3190 wl_egl_buffer->status = COMMITTED;
3192 tpl_gcond_signal(&wl_egl_buffer->cond);
3194 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3197 "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
3198 wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface,
3199 wl_egl_buffer->bo_name);
3201 if (wl_egl_display->use_wait_vblank &&
3202 _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
3203 TPL_ERR("Failed to set wait vblank.");
3205 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
3207 if (wl_egl_buffer->commit_sync_fd != -1) {
3208 int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
3210 TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd);
3213 TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
3214 wl_egl_buffer->bo_name);
3215 TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)",
3216 wl_egl_surface, wl_egl_buffer->commit_sync_fd);
3218 close(wl_egl_buffer->commit_sync_fd);
3219 wl_egl_buffer->commit_sync_fd = -1;
3222 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
3226 _write_to_eventfd(int eventfd)
3231 if (eventfd == -1) {
3232 TPL_ERR("Invalid fd(-1)");
3236 ret = write(eventfd, &value, sizeof(uint64_t));
3238 TPL_ERR("failed to write to fd(%d)", eventfd);
3246 __tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend)
3248 TPL_ASSERT(backend);
3250 backend->type = TPL_BACKEND_WAYLAND_THREAD;
3251 backend->data = NULL;
3253 backend->init = __tpl_wl_egl_display_init;
3254 backend->fini = __tpl_wl_egl_display_fini;
3255 backend->query_config = __tpl_wl_egl_display_query_config;
3256 backend->filter_config = __tpl_wl_egl_display_filter_config;
3257 backend->get_window_info = __tpl_wl_egl_display_get_window_info;
3258 backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
3259 backend->get_buffer_from_native_pixmap =
3260 __tpl_wl_egl_display_get_buffer_from_native_pixmap;
3264 __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
3266 TPL_ASSERT(backend);
3268 backend->type = TPL_BACKEND_WAYLAND_THREAD;
3269 backend->data = NULL;
3271 backend->init = __tpl_wl_egl_surface_init;
3272 backend->fini = __tpl_wl_egl_surface_fini;
3273 backend->validate = __tpl_wl_egl_surface_validate;
3274 backend->cancel_dequeued_buffer =
3275 __tpl_wl_egl_surface_cancel_buffer;
3276 backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
3277 backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
3278 backend->set_rotation_capability =
3279 __tpl_wl_egl_surface_set_rotation_capability;
3280 backend->set_post_interval =
3281 __tpl_wl_egl_surface_set_post_interval;
3283 __tpl_wl_egl_surface_get_size;
3287 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
3289 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3290 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3292 TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
3293 wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);
3295 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3296 if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) {
3297 wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL;
3298 wl_egl_surface->buffer_cnt--;
3300 wl_egl_buffer->idx = -1;
3302 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
3304 wl_display_flush(wl_egl_display->wl_display);
3306 if (wl_egl_buffer->wl_buffer) {
3307 wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
3308 (void *)wl_egl_buffer->wl_buffer);
3309 wl_egl_buffer->wl_buffer = NULL;
3312 if (wl_egl_buffer->buffer_release) {
3313 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
3314 wl_egl_buffer->buffer_release = NULL;
3317 if (wl_egl_buffer->release_fence_fd != -1) {
3318 close(wl_egl_buffer->release_fence_fd);
3319 wl_egl_buffer->release_fence_fd = -1;
3322 if (wl_egl_buffer->waiting_source) {
3323 tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
3324 wl_egl_buffer->waiting_source = NULL;
3327 if (wl_egl_buffer->commit_sync_fd != -1) {
3328 int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
3330 TPL_ERR("Failed to send commit_sync signal to fd(%d)",
3331 wl_egl_buffer->commit_sync_fd);
3332 close(wl_egl_buffer->commit_sync_fd);
3333 wl_egl_buffer->commit_sync_fd = -1;
3336 if (wl_egl_buffer->presentation_sync_fd != -1) {
3337 int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
3339 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
3340 wl_egl_buffer->presentation_sync_fd);
3341 close(wl_egl_buffer->presentation_sync_fd);
3342 wl_egl_buffer->presentation_sync_fd = -1;
3345 if (wl_egl_buffer->rects) {
3346 free(wl_egl_buffer->rects);
3347 wl_egl_buffer->rects = NULL;
3348 wl_egl_buffer->num_rects = 0;
3351 wl_egl_buffer->tbm_surface = NULL;
3352 wl_egl_buffer->bo_name = -1;
3354 free(wl_egl_buffer);
3358 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
3360 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
3364 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
3368 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3369 TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)",
3370 wl_egl_surface, wl_egl_surface->buffer_cnt);
3371 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
3372 tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
3373 if (wl_egl_buffer) {
3375 "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
3376 idx, wl_egl_buffer, wl_egl_buffer->tbm_surface,
3377 wl_egl_buffer->bo_name,
3378 status_to_string[wl_egl_buffer->status]);
3381 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);