2 #include "tpl_internal.h"
7 #include <sys/eventfd.h>
9 #include <tbm_bufmgr.h>
10 #include <tbm_surface.h>
11 #include <tbm_surface_internal.h>
12 #include <tbm_surface_queue.h>
14 #include <wayland-client.h>
15 #include <wayland-tbm-server.h>
16 #include <wayland-tbm-client.h>
17 #include <wayland-egl-backend.h>
19 #include <tdm_client.h>
21 #include "wayland-egl-tizen/wayland-egl-tizen.h"
22 #include "wayland-egl-tizen/wayland-egl-tizen-priv.h"
24 #include <tizen-surface-client-protocol.h>
25 #include <presentation-time-client-protocol.h>
26 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
28 #include "tpl_utils_gthread.h"
30 static int wl_egl_buffer_key;
31 #define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key)
33 /* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
34 #define CLIENT_QUEUE_SIZE 3
35 #define BUFFER_ARRAY_SIZE (CLIENT_QUEUE_SIZE * 2)
37 typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
38 typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
39 typedef struct _tpl_wl_egl_buffer tpl_wl_egl_buffer_t;
41 struct _tpl_wl_egl_display {
42 tpl_gsource *disp_source;
44 tpl_gmutex wl_event_mutex;
46 struct wl_display *wl_display;
47 struct wl_event_queue *ev_queue;
48 struct wayland_tbm_client *wl_tbm_client;
49 int last_error; /* errno of the last wl_display error*/
51 tpl_bool_t wl_initialized;
52 tpl_bool_t tdm_initialized;
54 tdm_client *tdm_client;
55 tpl_gsource *tdm_source;
58 tpl_bool_t use_wait_vblank;
59 tpl_bool_t use_explicit_sync;
62 struct tizen_surface_shm *tss; /* used for surface buffer_flush */
63 struct wp_presentation *presentation; /* for presentation feedback */
64 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
67 struct _tpl_wl_egl_surface {
68 tpl_gsource *surf_source;
70 tbm_surface_queue_h tbm_queue;
72 struct wl_egl_window *wl_egl_window;
73 struct wl_surface *wl_surface;
74 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
75 struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */
77 tdm_client_vblank *vblank;
79 /* surface information */
90 tpl_wl_egl_display_t *wl_egl_display;
91 tpl_surface_t *tpl_surface;
93 /* wl_egl_buffer array for buffer tracing */
94 tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE];
95 int buffer_cnt; /* the number of using wl_egl_buffers */
96 tpl_gmutex buffers_mutex;
98 tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
99 tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
111 tpl_gmutex surf_mutex;
114 /* for waiting draw done */
115 tpl_bool_t use_render_done_fence;
116 tpl_bool_t is_activated;
117 tpl_bool_t reset; /* TRUE if queue reseted by external */
118 tpl_bool_t need_to_enqueue;
119 tpl_bool_t prerotation_capability;
120 tpl_bool_t vblank_done;
121 tpl_bool_t set_serial_is_used;
124 typedef enum buffer_status {
129 WAITING_SIGNALED, // 4
134 static const char *status_to_string[7] = {
139 "WAITING_SIGNALED", // 4
140 "WAITING_VBLANK", // 5
144 struct _tpl_wl_egl_buffer {
145 tbm_surface_h tbm_surface;
148 struct wl_proxy *wl_buffer;
149 int dx, dy; /* position to attach to wl_surface */
150 int width, height; /* size to attach to wl_surface */
152 buffer_status_t status; /* for tracing buffer status */
153 int idx; /* position index in buffers array of wl_egl_surface */
155 /* for damage region */
159 /* for wayland_tbm_client_set_buffer_transform */
161 tpl_bool_t w_rotated;
163 /* for wl_surface_set_buffer_transform */
166 /* for wayland_tbm_client_set_buffer_serial */
169 /* for checking need_to_commit (frontbuffer mode) */
170 tpl_bool_t need_to_commit;
172 /* for checking draw done */
173 tpl_bool_t draw_done;
176 /* to get release event via zwp_linux_buffer_release_v1 */
177 struct zwp_linux_buffer_release_v1 *buffer_release;
179 /* each buffers own its release_fence_fd, until it passes ownership
181 int32_t release_fence_fd;
183 /* each buffers own its acquire_fence_fd.
184 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
185 * will be passed to display server
186 * Otherwise it will be used as a fence waiting for render done
188 int32_t acquire_fence_fd;
190 /* Fd to send a signal when wl_surface_commit with this buffer */
191 int32_t commit_sync_fd;
193 /* Fd to send a siganl when receive the
194 * presentation feedback from display server */
195 int32_t presentation_sync_fd;
197 tpl_gsource *waiting_source;
202 tpl_wl_egl_surface_t *wl_egl_surface;
205 struct pst_feedback {
206 /* to get presentation feedback from display server */
207 struct wp_presentation_feedback *presentation_feedback;
212 tpl_wl_egl_surface_t *wl_egl_surface;
217 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
219 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface);
221 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer);
222 static tpl_wl_egl_buffer_t *
223 _get_wl_egl_buffer(tbm_surface_h tbm_surface);
225 _write_to_eventfd(int eventfd);
227 _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface);
229 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface);
231 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
232 tpl_wl_egl_buffer_t *wl_egl_buffer);
235 _check_native_handle_is_wl_display(tpl_handle_t display)
237 struct wl_interface *wl_egl_native_dpy = *(void **) display;
239 if (!wl_egl_native_dpy) {
240 TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy);
244 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
245 is a memory address pointing the structure of wl_display_interface. */
246 if (wl_egl_native_dpy == &wl_display_interface)
249 if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
250 strlen(wl_display_interface.name)) == 0) {
258 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
260 tpl_wl_egl_display_t *wl_egl_display = NULL;
261 tdm_error tdm_err = TDM_ERROR_NONE;
265 wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
266 if (!wl_egl_display) {
267 TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource);
268 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
272 tdm_err = tdm_client_handle_events(wl_egl_display->tdm_client);
274 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
275 * When tdm_source is no longer available due to an unexpected situation,
276 * wl_egl_thread must remove it from the thread and destroy it.
277 * In that case, tdm_vblank can no longer be used for surfaces and displays
278 * that used this tdm_source. */
279 if (tdm_err != TDM_ERROR_NONE) {
280 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
282 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
284 tpl_gsource_destroy(gsource, TPL_FALSE);
286 wl_egl_display->tdm_source = NULL;
295 __thread_func_tdm_finalize(tpl_gsource *gsource)
297 tpl_wl_egl_display_t *wl_egl_display = NULL;
299 wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
302 "tdm_destroy| wl_egl_display(%p) tdm_client(%p) tpl_gsource(%p)",
303 wl_egl_display, wl_egl_display->tdm_client, gsource);
305 if (wl_egl_display->tdm_client) {
306 tdm_client_destroy(wl_egl_display->tdm_client);
307 wl_egl_display->tdm_client = NULL;
308 wl_egl_display->tdm_display_fd = -1;
311 wl_egl_display->tdm_initialized = TPL_FALSE;
314 static tpl_gsource_functions tdm_funcs = {
317 .dispatch = __thread_func_tdm_dispatch,
318 .finalize = __thread_func_tdm_finalize,
322 _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display)
324 tdm_client *tdm_client = NULL;
325 int tdm_display_fd = -1;
326 tdm_error tdm_err = TDM_ERROR_NONE;
328 tdm_client = tdm_client_create(&tdm_err);
329 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
330 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
331 return TPL_ERROR_INVALID_OPERATION;
334 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
335 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
336 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
337 tdm_client_destroy(tdm_client);
338 return TPL_ERROR_INVALID_OPERATION;
341 wl_egl_display->tdm_display_fd = tdm_display_fd;
342 wl_egl_display->tdm_client = tdm_client;
343 wl_egl_display->tdm_source = NULL;
344 wl_egl_display->tdm_initialized = TPL_TRUE;
346 TPL_INFO("[TDM_CLIENT_INIT]",
347 "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
348 wl_egl_display, tdm_client, tdm_display_fd);
350 return TPL_ERROR_NONE;
353 #define IMPL_TIZEN_SURFACE_SHM_VERSION 2
356 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
357 uint32_t name, const char *interface,
360 tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
362 if (!strcmp(interface, "tizen_surface_shm")) {
363 wl_egl_display->tss =
364 wl_registry_bind(wl_registry,
366 &tizen_surface_shm_interface,
367 ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ?
368 version : IMPL_TIZEN_SURFACE_SHM_VERSION));
369 } else if (!strcmp(interface, wp_presentation_interface.name)) {
370 wl_egl_display->presentation =
371 wl_registry_bind(wl_registry,
372 name, &wp_presentation_interface, 1);
373 TPL_DEBUG("bind wp_presentation_interface");
374 } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
375 char *env = tpl_getenv("TPL_EFS");
376 if (env && !atoi(env)) {
377 wl_egl_display->use_explicit_sync = TPL_FALSE;
379 wl_egl_display->explicit_sync =
380 wl_registry_bind(wl_registry, name,
381 &zwp_linux_explicit_synchronization_v1_interface, 1);
382 wl_egl_display->use_explicit_sync = TPL_TRUE;
383 TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
389 __cb_wl_resistry_global_remove_callback(void *data,
390 struct wl_registry *wl_registry,
395 static const struct wl_registry_listener registry_listener = {
396 __cb_wl_resistry_global_callback,
397 __cb_wl_resistry_global_remove_callback
401 _wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display,
402 const char *func_name)
406 strerror_r(errno, buf, sizeof(buf));
408 if (wl_egl_display->last_error == errno)
411 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
413 dpy_err = wl_display_get_error(wl_egl_display->wl_display);
414 if (dpy_err == EPROTO) {
415 const struct wl_interface *err_interface;
416 uint32_t err_proxy_id, err_code;
417 err_code = wl_display_get_protocol_error(wl_egl_display->wl_display,
420 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
421 err_interface->name, err_code, err_proxy_id);
424 wl_egl_display->last_error = errno;
428 _thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display)
430 struct wl_registry *registry = NULL;
431 struct wl_event_queue *queue = NULL;
432 struct wl_display *display_wrapper = NULL;
433 struct wl_proxy *wl_tbm = NULL;
434 struct wayland_tbm_client *wl_tbm_client = NULL;
436 tpl_result_t result = TPL_ERROR_NONE;
438 queue = wl_display_create_queue(wl_egl_display->wl_display);
440 TPL_ERR("Failed to create wl_queue wl_display(%p)",
441 wl_egl_display->wl_display);
442 result = TPL_ERROR_INVALID_OPERATION;
446 wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display);
447 if (!wl_egl_display->ev_queue) {
448 TPL_ERR("Failed to create wl_queue wl_display(%p)",
449 wl_egl_display->wl_display);
450 result = TPL_ERROR_INVALID_OPERATION;
454 display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display);
455 if (!display_wrapper) {
456 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
457 wl_egl_display->wl_display);
458 result = TPL_ERROR_INVALID_OPERATION;
462 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
464 registry = wl_display_get_registry(display_wrapper);
466 TPL_ERR("Failed to create wl_registry");
467 result = TPL_ERROR_INVALID_OPERATION;
471 wl_proxy_wrapper_destroy(display_wrapper);
472 display_wrapper = NULL;
474 wl_tbm_client = wayland_tbm_client_init(wl_egl_display->wl_display);
475 if (!wl_tbm_client) {
476 TPL_ERR("Failed to initialize wl_tbm_client.");
477 result = TPL_ERROR_INVALID_CONNECTION;
481 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
483 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
484 result = TPL_ERROR_INVALID_CONNECTION;
488 wl_proxy_set_queue(wl_tbm, wl_egl_display->ev_queue);
489 wl_egl_display->wl_tbm_client = wl_tbm_client;
491 if (wl_registry_add_listener(registry, ®istry_listener,
493 TPL_ERR("Failed to wl_registry_add_listener");
494 result = TPL_ERROR_INVALID_OPERATION;
498 ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue);
500 _wl_display_print_err(wl_egl_display, "roundtrip_queue");
501 result = TPL_ERROR_INVALID_OPERATION;
505 /* set tizen_surface_shm's queue as client's private queue */
506 if (wl_egl_display->tss) {
507 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss,
508 wl_egl_display->ev_queue);
509 TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss);
512 if (wl_egl_display->presentation) {
513 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation,
514 wl_egl_display->ev_queue);
515 TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.",
516 wl_egl_display->presentation);
519 if (wl_egl_display->explicit_sync) {
520 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync,
521 wl_egl_display->ev_queue);
522 TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.",
523 wl_egl_display->explicit_sync);
526 wl_egl_display->wl_initialized = TPL_TRUE;
528 TPL_INFO("[WAYLAND_INIT]",
529 "wl_egl_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
530 wl_egl_display, wl_egl_display->wl_display,
531 wl_egl_display->wl_tbm_client, wl_egl_display->ev_queue);
532 TPL_INFO("[WAYLAND_INIT]",
533 "tizen_surface_shm(%p) wp_presentation(%p) explicit_sync(%p)",
534 wl_egl_display->tss, wl_egl_display->presentation,
535 wl_egl_display->explicit_sync);
539 wl_proxy_wrapper_destroy(display_wrapper);
541 wl_registry_destroy(registry);
543 wl_event_queue_destroy(queue);
549 _thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display)
551 /* If wl_egl_display is in prepared state, cancel it */
552 if (wl_egl_display->prepared) {
553 wl_display_cancel_read(wl_egl_display->wl_display);
554 wl_egl_display->prepared = TPL_FALSE;
557 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
558 wl_egl_display->ev_queue) == -1) {
559 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
562 if (wl_egl_display->tss) {
563 TPL_INFO("[TIZEN_SURFACE_SHM_DESTROY]",
564 "wl_egl_display(%p) tizen_surface_shm(%p) fini.",
565 wl_egl_display, wl_egl_display->tss);
566 tizen_surface_shm_destroy(wl_egl_display->tss);
567 wl_egl_display->tss = NULL;
570 if (wl_egl_display->presentation) {
571 TPL_INFO("[WP_PRESENTATION_DESTROY]",
572 "wl_egl_display(%p) wp_presentation(%p) fini.",
573 wl_egl_display, wl_egl_display->presentation);
574 wp_presentation_destroy(wl_egl_display->presentation);
575 wl_egl_display->presentation = NULL;
578 if (wl_egl_display->explicit_sync) {
579 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
580 "wl_egl_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
581 wl_egl_display, wl_egl_display->explicit_sync);
582 zwp_linux_explicit_synchronization_v1_destroy(wl_egl_display->explicit_sync);
583 wl_egl_display->explicit_sync = NULL;
586 if (wl_egl_display->wl_tbm_client) {
587 struct wl_proxy *wl_tbm = NULL;
589 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
590 wl_egl_display->wl_tbm_client);
592 wl_proxy_set_queue(wl_tbm, NULL);
595 TPL_INFO("[WL_TBM_DEINIT]",
596 "wl_egl_display(%p) wl_tbm_client(%p)",
597 wl_egl_display, wl_egl_display->wl_tbm_client);
598 wayland_tbm_client_deinit(wl_egl_display->wl_tbm_client);
599 wl_egl_display->wl_tbm_client = NULL;
602 wl_event_queue_destroy(wl_egl_display->ev_queue);
604 wl_egl_display->wl_initialized = TPL_FALSE;
606 TPL_INFO("[DISPLAY_FINI]", "wl_egl_display(%p) wl_display(%p)",
607 wl_egl_display, wl_egl_display->wl_display);
611 _thread_init(void *data)
613 tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
615 if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) {
616 TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)",
617 wl_egl_display, wl_egl_display->wl_display);
620 if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
621 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
624 return wl_egl_display;
628 __thread_func_disp_prepare(tpl_gsource *gsource)
630 tpl_wl_egl_display_t *wl_egl_display =
631 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
633 /* If this wl_egl_display is already prepared,
634 * do nothing in this function. */
635 if (wl_egl_display->prepared)
638 /* If there is a last_error, there is no need to poll,
639 * so skip directly to dispatch.
640 * prepare -> dispatch */
641 if (wl_egl_display->last_error)
644 while (wl_display_prepare_read_queue(wl_egl_display->wl_display,
645 wl_egl_display->ev_queue) != 0) {
646 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
647 wl_egl_display->ev_queue) == -1) {
648 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
652 wl_egl_display->prepared = TPL_TRUE;
654 wl_display_flush(wl_egl_display->wl_display);
660 __thread_func_disp_check(tpl_gsource *gsource)
662 tpl_wl_egl_display_t *wl_egl_display =
663 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
664 tpl_bool_t ret = TPL_FALSE;
666 if (!wl_egl_display->prepared)
669 /* If prepared, but last_error is set,
670 * cancel_read is executed and FALSE is returned.
671 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
672 * and skipping disp_check from prepare to disp_dispatch.
673 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
674 if (wl_egl_display->prepared && wl_egl_display->last_error) {
675 wl_display_cancel_read(wl_egl_display->wl_display);
679 if (tpl_gsource_check_io_condition(gsource)) {
680 if (wl_display_read_events(wl_egl_display->wl_display) == -1)
681 _wl_display_print_err(wl_egl_display, "read_event");
684 wl_display_cancel_read(wl_egl_display->wl_display);
688 wl_egl_display->prepared = TPL_FALSE;
694 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
696 tpl_wl_egl_display_t *wl_egl_display =
697 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
701 /* If there is last_error, SOURCE_REMOVE should be returned
702 * to remove the gsource from the main loop.
703 * This is because wl_egl_display is not valid since last_error was set.*/
704 if (wl_egl_display->last_error) {
708 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
709 if (tpl_gsource_check_io_condition(gsource)) {
710 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
711 wl_egl_display->ev_queue) == -1) {
712 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
716 wl_display_flush(wl_egl_display->wl_display);
717 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
723 __thread_func_disp_finalize(tpl_gsource *gsource)
725 tpl_wl_egl_display_t *wl_egl_display =
726 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
728 if (wl_egl_display->wl_initialized)
729 _thread_wl_display_fini(wl_egl_display);
731 TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)",
732 wl_egl_display, gsource);
738 static tpl_gsource_functions disp_funcs = {
739 .prepare = __thread_func_disp_prepare,
740 .check = __thread_func_disp_check,
741 .dispatch = __thread_func_disp_dispatch,
742 .finalize = __thread_func_disp_finalize,
746 __tpl_wl_egl_display_init(tpl_display_t *display)
748 tpl_wl_egl_display_t *wl_egl_display = NULL;
752 /* Do not allow default display in wayland. */
753 if (!display->native_handle) {
754 TPL_ERR("Invalid native handle for display.");
755 return TPL_ERROR_INVALID_PARAMETER;
758 if (!_check_native_handle_is_wl_display(display->native_handle)) {
759 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
760 return TPL_ERROR_INVALID_PARAMETER;
763 wl_egl_display = (tpl_wl_egl_display_t *) calloc(1,
764 sizeof(tpl_wl_egl_display_t));
765 if (!wl_egl_display) {
766 TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t.");
767 return TPL_ERROR_OUT_OF_MEMORY;
770 display->backend.data = wl_egl_display;
771 display->bufmgr_fd = -1;
773 wl_egl_display->tdm_initialized = TPL_FALSE;
774 wl_egl_display->wl_initialized = TPL_FALSE;
776 wl_egl_display->ev_queue = NULL;
777 wl_egl_display->wl_display = (struct wl_display *)display->native_handle;
778 wl_egl_display->last_error = 0;
779 wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled
780 wl_egl_display->prepared = TPL_FALSE;
782 /* Wayland Interfaces */
783 wl_egl_display->tss = NULL;
784 wl_egl_display->presentation = NULL;
785 wl_egl_display->explicit_sync = NULL;
786 wl_egl_display->wl_tbm_client = NULL;
788 wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled
790 char *env = tpl_getenv("TPL_WAIT_VBLANK");
791 if (env && !atoi(env)) {
792 wl_egl_display->use_wait_vblank = TPL_FALSE;
796 tpl_gmutex_init(&wl_egl_display->wl_event_mutex);
799 wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
800 (tpl_gthread_func)_thread_init,
801 (void *)wl_egl_display);
802 if (!wl_egl_display->thread) {
803 TPL_ERR("Failed to create wl_egl_thread");
807 wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread,
808 (void *)wl_egl_display,
809 wl_display_get_fd(wl_egl_display->wl_display),
810 &disp_funcs, SOURCE_TYPE_NORMAL);
811 if (!wl_egl_display->disp_source) {
812 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
813 display->native_handle,
814 wl_egl_display->thread);
818 wl_egl_display->tdm_source = tpl_gsource_create(wl_egl_display->thread,
819 (void *)wl_egl_display,
820 wl_egl_display->tdm_display_fd,
821 &tdm_funcs, SOURCE_TYPE_NORMAL);
822 if (!wl_egl_display->tdm_source) {
823 TPL_ERR("Failed to create tdm_gsource\n");
827 TPL_INFO("[DISPLAY_INIT]",
828 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
830 wl_egl_display->thread,
831 wl_egl_display->wl_display);
833 TPL_INFO("[DISPLAY_INIT]",
834 "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)",
835 wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE",
836 wl_egl_display->tss ? "TRUE" : "FALSE",
837 wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE");
839 return TPL_ERROR_NONE;
842 if (wl_egl_display->thread) {
843 if (wl_egl_display->tdm_source)
844 tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
845 if (wl_egl_display->disp_source)
846 tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
848 tpl_gthread_destroy(wl_egl_display->thread);
851 wl_egl_display->thread = NULL;
852 free(wl_egl_display);
854 display->backend.data = NULL;
855 return TPL_ERROR_INVALID_OPERATION;
859 __tpl_wl_egl_display_fini(tpl_display_t *display)
861 tpl_wl_egl_display_t *wl_egl_display;
865 wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data;
866 if (wl_egl_display) {
867 TPL_INFO("[DISPLAY_FINI]",
868 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
870 wl_egl_display->thread,
871 wl_egl_display->wl_display);
873 if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) {
874 tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
875 wl_egl_display->tdm_source = NULL;
878 if (wl_egl_display->disp_source) {
879 tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
880 wl_egl_display->disp_source = NULL;
883 if (wl_egl_display->thread) {
884 tpl_gthread_destroy(wl_egl_display->thread);
885 wl_egl_display->thread = NULL;
888 tpl_gmutex_clear(&wl_egl_display->wl_event_mutex);
890 free(wl_egl_display);
893 display->backend.data = NULL;
897 __tpl_wl_egl_display_query_config(tpl_display_t *display,
898 tpl_surface_type_t surface_type,
899 int red_size, int green_size,
900 int blue_size, int alpha_size,
901 int color_depth, int *native_visual_id,
906 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
907 green_size == 8 && blue_size == 8 &&
908 (color_depth == 32 || color_depth == 24)) {
910 if (alpha_size == 8) {
911 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
912 if (is_slow) *is_slow = TPL_FALSE;
913 return TPL_ERROR_NONE;
915 if (alpha_size == 0) {
916 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
917 if (is_slow) *is_slow = TPL_FALSE;
918 return TPL_ERROR_NONE;
922 return TPL_ERROR_INVALID_PARAMETER;
926 __tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
930 TPL_IGNORE(visual_id);
931 TPL_IGNORE(alpha_size);
932 return TPL_ERROR_NONE;
936 __tpl_wl_egl_display_get_window_info(tpl_display_t *display,
937 tpl_handle_t window, int *width,
938 int *height, tbm_format *format,
939 int depth, int a_size)
941 tpl_result_t ret = TPL_ERROR_NONE;
942 struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window;
947 if (!wl_egl_window) {
948 TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window);
949 return TPL_ERROR_INVALID_PARAMETER;
952 if (width) *width = wl_egl_window->width;
953 if (height) *height = wl_egl_window->height;
955 struct tizen_private *tizen_private =
956 (struct tizen_private *)wl_egl_window->driver_private;
957 if (tizen_private && tizen_private->data) {
958 tpl_wl_egl_surface_t *wl_egl_surface =
959 (tpl_wl_egl_surface_t *)tizen_private->data;
960 *format = wl_egl_surface->format;
963 *format = TBM_FORMAT_ARGB8888;
965 *format = TBM_FORMAT_XRGB8888;
973 __tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display,
974 tpl_handle_t pixmap, int *width,
975 int *height, tbm_format *format)
977 tbm_surface_h tbm_surface = NULL;
980 TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap);
981 return TPL_ERROR_INVALID_PARAMETER;
984 tbm_surface = wayland_tbm_server_get_surface(NULL,
985 (struct wl_resource *)pixmap);
987 TPL_ERR("Failed to get tbm_surface from wayland_tbm.");
988 return TPL_ERROR_INVALID_PARAMETER;
991 if (width) *width = tbm_surface_get_width(tbm_surface);
992 if (height) *height = tbm_surface_get_height(tbm_surface);
993 if (format) *format = tbm_surface_get_format(tbm_surface);
995 return TPL_ERROR_NONE;
999 __tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
1001 tbm_surface_h tbm_surface = NULL;
1005 tbm_surface = wayland_tbm_server_get_surface(NULL,
1006 (struct wl_resource *)pixmap);
1008 TPL_ERR("Failed to get tbm_surface_h from wayland_tbm.");
1016 __tpl_display_choose_backend_wl_egl_thread2(tpl_handle_t native_dpy)
1018 struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
1020 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE);
1022 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
1023 is a memory address pointing the structure of wl_display_interface. */
1024 if (wl_egl_native_dpy == &wl_display_interface)
1027 if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
1028 strlen(wl_display_interface.name)) == 0) {
1035 /* -- BEGIN -- wl_egl_window callback functions */
1037 __cb_destroy_callback(void *private)
1039 struct tizen_private *tizen_private = (struct tizen_private *)private;
1040 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1042 if (!tizen_private) {
1043 TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface");
1047 wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1048 if (wl_egl_surface) {
1049 TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.",
1050 wl_egl_surface->wl_egl_window);
1051 TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface.");
1053 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1054 wl_egl_surface->wl_egl_window->destroy_window_callback = NULL;
1055 wl_egl_surface->wl_egl_window->resize_callback = NULL;
1056 wl_egl_surface->wl_egl_window->driver_private = NULL;
1057 wl_egl_surface->wl_egl_window = NULL;
1058 wl_egl_surface->wl_surface = NULL;
1060 tizen_private->set_window_serial_callback = NULL;
1061 tizen_private->rotate_callback = NULL;
1062 tizen_private->get_rotation_capability = NULL;
1063 tizen_private->set_frontbuffer_callback = NULL;
1064 tizen_private->create_commit_sync_fd = NULL;
1065 tizen_private->create_presentation_sync_fd = NULL;
1066 tizen_private->data = NULL;
1068 free(tizen_private);
1069 tizen_private = NULL;
1070 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1075 __cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
1077 TPL_ASSERT(private);
1078 TPL_ASSERT(wl_egl_window);
1080 struct tizen_private *tizen_private = (struct tizen_private *)private;
1081 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1082 int cur_w, cur_h, req_w, req_h, format;
1084 if (!wl_egl_surface) {
1085 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1090 format = wl_egl_surface->format;
1091 cur_w = wl_egl_surface->width;
1092 cur_h = wl_egl_surface->height;
1093 req_w = wl_egl_window->width;
1094 req_h = wl_egl_window->height;
1096 TPL_INFO("[WINDOW_RESIZE]",
1097 "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)",
1098 wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h);
1100 if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
1101 != TBM_SURFACE_QUEUE_ERROR_NONE) {
1102 TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue);
1106 /* -- END -- wl_egl_window callback functions */
1108 /* -- BEGIN -- wl_egl_window tizen private callback functions */
1110 /* There is no usecase for using prerotation callback below */
1112 __cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
1114 TPL_ASSERT(private);
1115 TPL_ASSERT(wl_egl_window);
1117 struct tizen_private *tizen_private = (struct tizen_private *)private;
1118 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1119 int rotation = tizen_private->rotation;
1121 if (!wl_egl_surface) {
1122 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1127 TPL_INFO("[WINDOW_ROTATE]",
1128 "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)",
1129 wl_egl_surface, wl_egl_window,
1130 wl_egl_surface->rotation, rotation);
1132 wl_egl_surface->rotation = rotation;
1135 /* There is no usecase for using prerotation callback below */
1137 __cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
1140 TPL_ASSERT(private);
1141 TPL_ASSERT(wl_egl_window);
1143 int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
1144 struct tizen_private *tizen_private = (struct tizen_private *)private;
1145 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1147 if (!wl_egl_surface) {
1148 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1150 return rotation_capability;
1153 if (wl_egl_surface->prerotation_capability == TPL_TRUE)
1154 rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
1156 rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
1159 return rotation_capability;
1163 __cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
1164 void *private, unsigned int serial)
1166 TPL_ASSERT(private);
1167 TPL_ASSERT(wl_egl_window);
1169 struct tizen_private *tizen_private = (struct tizen_private *)private;
1170 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1172 if (!wl_egl_surface) {
1173 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1178 wl_egl_surface->set_serial_is_used = TPL_TRUE;
1179 wl_egl_surface->serial = serial;
1183 __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
1185 TPL_ASSERT(private);
1186 TPL_ASSERT(wl_egl_window);
1188 int commit_sync_fd = -1;
1190 struct tizen_private *tizen_private = (struct tizen_private *)private;
1191 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1193 if (!wl_egl_surface) {
1194 TPL_ERR("Invalid parameter. wl_egl_surface(%p) is NULL", wl_egl_surface);
1198 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
1200 if (wl_egl_surface->commit_sync.fd != -1) {
1201 commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
1202 TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
1203 wl_egl_surface->commit_sync.fd, commit_sync_fd);
1204 TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)",
1205 wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd);
1206 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1207 return commit_sync_fd;
1210 wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
1211 if (wl_egl_surface->commit_sync.fd == -1) {
1212 TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)",
1214 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1218 commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
1220 TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
1221 wl_egl_surface->commit_sync.fd, commit_sync_fd);
1222 TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)",
1223 wl_egl_surface, commit_sync_fd);
1225 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1227 return commit_sync_fd;
1231 __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
1233 TPL_ASSERT(private);
1234 TPL_ASSERT(wl_egl_window);
1236 int presentation_sync_fd = -1;
1238 struct tizen_private *tizen_private = (struct tizen_private *)private;
1239 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1241 if (!wl_egl_surface) {
1242 TPL_ERR("Invalid parameter. wl_egl_surface is NULL");
1246 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1247 if (wl_egl_surface->presentation_sync.fd != -1) {
1248 presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
1249 TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
1250 wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1251 TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
1252 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1253 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1254 return presentation_sync_fd;
1257 wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
1258 if (wl_egl_surface->presentation_sync.fd == -1) {
1259 TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)",
1261 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1265 presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
1266 TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
1267 wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1268 TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
1269 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1271 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1273 return presentation_sync_fd;
1275 /* -- END -- wl_egl_window tizen private callback functions */
1277 /* -- BEGIN -- tizen_surface_shm_flusher_listener */
1278 static void __cb_tss_flusher_flush_callback(void *data,
1279 struct tizen_surface_shm_flusher *tss_flusher)
1281 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1282 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1284 TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
1285 wl_egl_surface, wl_egl_surface->tbm_queue);
1287 _print_buffer_lists(wl_egl_surface);
1289 tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue);
1290 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1291 TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
1296 static void __cb_tss_flusher_free_flush_callback(void *data,
1297 struct tizen_surface_shm_flusher *tss_flusher)
1299 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1300 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1302 TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
1303 wl_egl_surface, wl_egl_surface->tbm_queue);
1305 _print_buffer_lists(wl_egl_surface);
1307 tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue);
1308 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1309 TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
1314 static const struct tizen_surface_shm_flusher_listener
1315 tss_flusher_listener = {
1316 __cb_tss_flusher_flush_callback,
1317 __cb_tss_flusher_free_flush_callback
1319 /* -- END -- tizen_surface_shm_flusher_listener */
1322 /* -- BEGIN -- tbm_surface_queue callback funstions */
1324 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1327 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1328 tpl_wl_egl_display_t *wl_egl_display = NULL;
1329 tpl_surface_t *surface = NULL;
1330 tpl_bool_t is_activated = TPL_FALSE;
1333 wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1334 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1336 wl_egl_display = wl_egl_surface->wl_egl_display;
1337 TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
1339 surface = wl_egl_surface->tpl_surface;
1340 TPL_CHECK_ON_NULL_RETURN(surface);
1342 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1343 * the changed window size at the next frame. */
1344 width = tbm_surface_queue_get_width(tbm_queue);
1345 height = tbm_surface_queue_get_height(tbm_queue);
1346 if (surface->width != width || surface->height != height) {
1347 TPL_INFO("[QUEUE_RESIZE]",
1348 "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1349 wl_egl_surface, tbm_queue,
1350 surface->width, surface->height, width, height);
1353 /* When queue_reset_callback is called, if is_activated is different from
1354 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1355 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1356 is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client,
1357 wl_egl_surface->tbm_queue);
1358 if (wl_egl_surface->is_activated != is_activated) {
1360 TPL_INFO("[ACTIVATED]",
1361 "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1362 wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
1364 TPL_LOG_T("[DEACTIVATED]",
1365 " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1366 wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
1370 wl_egl_surface->reset = TPL_TRUE;
1372 if (surface->reset_cb)
1373 surface->reset_cb(surface->reset_data);
1377 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1380 TPL_IGNORE(tbm_queue);
1382 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1383 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1385 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1387 tpl_gsource_send_message(wl_egl_surface->surf_source, 2);
1389 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1391 /* -- END -- tbm_surface_queue callback funstions */
1394 _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
1396 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1398 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1400 TPL_INFO("[SURFACE_FINI]",
1401 "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
1402 wl_egl_surface, wl_egl_surface->wl_egl_window,
1403 wl_egl_surface->wl_surface);
1405 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1407 if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) {
1408 while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) {
1409 struct pst_feedback *pst_feedback =
1410 (struct pst_feedback *)__tpl_list_pop_front(
1411 wl_egl_surface->presentation_feedbacks, NULL);
1413 _write_to_eventfd(pst_feedback->pst_sync_fd);
1414 close(pst_feedback->pst_sync_fd);
1415 pst_feedback->pst_sync_fd = -1;
1417 wp_presentation_feedback_destroy(pst_feedback->presentation_feedback);
1418 pst_feedback->presentation_feedback = NULL;
1424 __tpl_list_free(wl_egl_surface->presentation_feedbacks, NULL);
1425 wl_egl_surface->presentation_feedbacks = NULL;
1428 if (wl_egl_surface->presentation_sync.fd != -1) {
1429 _write_to_eventfd(wl_egl_surface->presentation_sync.fd);
1430 close(wl_egl_surface->presentation_sync.fd);
1431 wl_egl_surface->presentation_sync.fd = -1;
1434 if (wl_egl_surface->vblank_waiting_buffers) {
1435 __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL);
1436 wl_egl_surface->vblank_waiting_buffers = NULL;
1439 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1441 if (wl_egl_surface->surface_sync) {
1442 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1443 "wl_egl_surface(%p) surface_sync(%p)",
1444 wl_egl_surface, wl_egl_surface->surface_sync);
1445 zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync);
1446 wl_egl_surface->surface_sync = NULL;
1449 if (wl_egl_surface->tss_flusher) {
1450 TPL_INFO("[FLUSHER_DESTROY]",
1451 "wl_egl_surface(%p) tss_flusher(%p)",
1452 wl_egl_surface, wl_egl_surface->tss_flusher);
1453 tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher);
1454 wl_egl_surface->tss_flusher = NULL;
1457 if (wl_egl_surface->vblank) {
1458 TPL_INFO("[VBLANK_DESTROY]",
1459 "wl_egl_surface(%p) vblank(%p)",
1460 wl_egl_surface, wl_egl_surface->vblank);
1461 tdm_client_vblank_destroy(wl_egl_surface->vblank);
1462 wl_egl_surface->vblank = NULL;
1465 if (wl_egl_surface->tbm_queue) {
1466 TPL_INFO("[TBM_QUEUE_DESTROY]",
1467 "wl_egl_surface(%p) tbm_queue(%p)",
1468 wl_egl_surface, wl_egl_surface->tbm_queue);
1469 tbm_surface_queue_destroy(wl_egl_surface->tbm_queue);
1470 wl_egl_surface->tbm_queue = NULL;
1473 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1477 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1479 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1481 wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
1483 /* Initialize surface */
1485 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1486 TPL_DEBUG("wl_egl_surface(%p) initialize message received!",
1488 _thread_wl_egl_surface_init(wl_egl_surface);
1489 tpl_gcond_signal(&wl_egl_surface->surf_cond);
1490 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1491 } else if (message == 2) {
1492 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1493 TPL_DEBUG("wl_egl_surface(%p) acquirable message received!",
1495 _thread_surface_queue_acquire(wl_egl_surface);
1496 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1503 __thread_func_surf_finalize(tpl_gsource *gsource)
1505 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1507 wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
1508 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1510 _thread_wl_egl_surface_fini(wl_egl_surface);
1512 TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)",
1513 wl_egl_surface, gsource);
1516 static tpl_gsource_functions surf_funcs = {
1519 .dispatch = __thread_func_surf_dispatch,
1520 .finalize = __thread_func_surf_finalize,
1524 __tpl_wl_egl_surface_init(tpl_surface_t *surface)
1526 tpl_wl_egl_display_t *wl_egl_display = NULL;
1527 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1528 tpl_gsource *surf_source = NULL;
1530 struct wl_egl_window *wl_egl_window =
1531 (struct wl_egl_window *)surface->native_handle;
1533 TPL_ASSERT(surface);
1534 TPL_ASSERT(surface->display);
1535 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1536 TPL_ASSERT(surface->native_handle);
1539 (tpl_wl_egl_display_t *)surface->display->backend.data;
1540 if (!wl_egl_display) {
1541 TPL_ERR("Invalid parameter. wl_egl_display(%p)",
1543 return TPL_ERROR_INVALID_PARAMETER;
1546 wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1,
1547 sizeof(tpl_wl_egl_surface_t));
1548 if (!wl_egl_surface) {
1549 TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
1550 return TPL_ERROR_OUT_OF_MEMORY;
1553 surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface,
1554 -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1556 TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)",
1558 goto surf_source_create_fail;
1561 surface->backend.data = (void *)wl_egl_surface;
1562 surface->width = wl_egl_window->width;
1563 surface->height = wl_egl_window->height;
1564 surface->rotation = 0;
1566 wl_egl_surface->tpl_surface = surface;
1567 wl_egl_surface->width = wl_egl_window->width;
1568 wl_egl_surface->height = wl_egl_window->height;
1569 wl_egl_surface->format = surface->format;
1571 wl_egl_surface->surf_source = surf_source;
1572 wl_egl_surface->wl_egl_window = wl_egl_window;
1573 wl_egl_surface->wl_surface = wl_egl_window->surface;
1575 wl_egl_surface->wl_egl_display = wl_egl_display;
1577 wl_egl_surface->reset = TPL_FALSE;
1578 wl_egl_surface->is_activated = TPL_FALSE;
1579 wl_egl_surface->need_to_enqueue = TPL_TRUE;
1580 wl_egl_surface->prerotation_capability = TPL_FALSE;
1581 wl_egl_surface->vblank_done = TPL_TRUE;
1582 wl_egl_surface->use_render_done_fence = TPL_FALSE;
1583 wl_egl_surface->set_serial_is_used = TPL_FALSE;
1585 wl_egl_surface->latest_transform = 0;
1586 wl_egl_surface->render_done_cnt = 0;
1587 wl_egl_surface->serial = 0;
1589 wl_egl_surface->vblank = NULL;
1590 wl_egl_surface->tss_flusher = NULL;
1591 wl_egl_surface->surface_sync = NULL;
1593 wl_egl_surface->post_interval = surface->post_interval;
1595 wl_egl_surface->commit_sync.fd = -1;
1596 wl_egl_surface->presentation_sync.fd = -1;
1600 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1601 wl_egl_surface->buffers[i] = NULL;
1602 wl_egl_surface->buffer_cnt = 0;
1606 struct tizen_private *tizen_private = NULL;
1608 if (wl_egl_window->driver_private)
1609 tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
1611 tizen_private = tizen_private_create();
1612 wl_egl_window->driver_private = (void *)tizen_private;
1615 if (tizen_private) {
1616 tizen_private->data = (void *)wl_egl_surface;
1617 tizen_private->rotate_callback = (void *)__cb_rotate_callback;
1618 tizen_private->get_rotation_capability = (void *)
1619 __cb_get_rotation_capability;
1620 tizen_private->set_window_serial_callback = (void *)
1621 __cb_set_window_serial_callback;
1622 tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
1623 tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
1625 wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
1626 wl_egl_window->resize_callback = (void *)__cb_resize_callback;
1630 tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
1631 tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
1633 tpl_gmutex_init(&wl_egl_surface->buffers_mutex);
1635 tpl_gmutex_init(&wl_egl_surface->surf_mutex);
1636 tpl_gcond_init(&wl_egl_surface->surf_cond);
1638 /* Initialize in thread */
1639 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1640 tpl_gsource_send_message(wl_egl_surface->surf_source, 1);
1641 tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
1642 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1644 TPL_ASSERT(wl_egl_surface->tbm_queue);
1646 TPL_INFO("[SURFACE_INIT]",
1647 "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)",
1648 surface, wl_egl_surface, wl_egl_surface->surf_source);
1650 return TPL_ERROR_NONE;
1652 surf_source_create_fail:
1653 free(wl_egl_surface);
1654 surface->backend.data = NULL;
1655 return TPL_ERROR_INVALID_OPERATION;
1658 static tbm_surface_queue_h
1659 _thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface,
1660 struct wayland_tbm_client *wl_tbm_client,
1663 tbm_surface_queue_h tbm_queue = NULL;
1664 tbm_bufmgr bufmgr = NULL;
1665 unsigned int capability;
1667 struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
1668 int width = wl_egl_surface->width;
1669 int height = wl_egl_surface->height;
1670 int format = wl_egl_surface->format;
1672 if (!wl_tbm_client || !wl_surface) {
1673 TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)",
1674 wl_tbm_client, wl_surface);
1678 bufmgr = tbm_bufmgr_init(-1);
1679 capability = tbm_bufmgr_get_capability(bufmgr);
1680 tbm_bufmgr_deinit(bufmgr);
1682 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1683 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1691 tbm_queue = wayland_tbm_client_create_surface_queue(
1701 TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)",
1706 if (tbm_surface_queue_set_modes(
1707 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1708 TBM_SURFACE_QUEUE_ERROR_NONE) {
1709 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1711 tbm_surface_queue_destroy(tbm_queue);
1715 if (tbm_surface_queue_add_reset_cb(
1717 __cb_tbm_queue_reset_callback,
1718 (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1719 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1721 tbm_surface_queue_destroy(tbm_queue);
1725 if (tbm_surface_queue_add_acquirable_cb(
1727 __cb_tbm_queue_acquirable_callback,
1728 (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1729 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1731 tbm_surface_queue_destroy(tbm_queue);
1738 static tdm_client_vblank*
1739 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1741 tdm_client_vblank *vblank = NULL;
1742 tdm_client_output *tdm_output = NULL;
1743 tdm_error tdm_err = TDM_ERROR_NONE;
1746 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1750 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1751 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1752 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1756 vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1757 if (!vblank || tdm_err != TDM_ERROR_NONE) {
1758 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1762 tdm_client_vblank_set_enable_fake(vblank, 1);
1763 tdm_client_vblank_set_sync(vblank, 0);
1769 _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
1771 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1773 wl_egl_surface->tbm_queue = _thread_create_tbm_queue(
1775 wl_egl_display->wl_tbm_client,
1777 if (!wl_egl_surface->tbm_queue) {
1778 TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)",
1779 wl_egl_surface, wl_egl_display->wl_tbm_client);
1783 TPL_INFO("[QUEUE_CREATION]",
1784 "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)",
1785 wl_egl_surface, wl_egl_surface->wl_surface,
1786 wl_egl_display->wl_tbm_client);
1787 TPL_INFO("[QUEUE_CREATION]",
1788 "tbm_queue(%p) size(%d x %d) X %d format(%d)",
1789 wl_egl_surface->tbm_queue,
1790 wl_egl_surface->width,
1791 wl_egl_surface->height,
1793 wl_egl_surface->format);
1795 wl_egl_surface->vblank = _thread_create_tdm_client_vblank(
1796 wl_egl_display->tdm_client);
1797 if (wl_egl_surface->vblank) {
1798 TPL_INFO("[VBLANK_INIT]",
1799 "wl_egl_surface(%p) tdm_client(%p) vblank(%p)",
1800 wl_egl_surface, wl_egl_display->tdm_client,
1801 wl_egl_surface->vblank);
1804 if (wl_egl_display->tss) {
1805 wl_egl_surface->tss_flusher =
1806 tizen_surface_shm_get_flusher(wl_egl_display->tss,
1807 wl_egl_surface->wl_surface);
1810 if (wl_egl_surface->tss_flusher) {
1811 tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher,
1812 &tss_flusher_listener,
1814 TPL_INFO("[FLUSHER_INIT]",
1815 "wl_egl_surface(%p) tss_flusher(%p)",
1816 wl_egl_surface, wl_egl_surface->tss_flusher);
1819 if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) {
1820 wl_egl_surface->surface_sync =
1821 zwp_linux_explicit_synchronization_v1_get_synchronization(
1822 wl_egl_display->explicit_sync, wl_egl_surface->wl_surface);
1823 if (wl_egl_surface->surface_sync) {
1824 TPL_INFO("[EXPLICIT_SYNC_INIT]",
1825 "wl_egl_surface(%p) surface_sync(%p)",
1826 wl_egl_surface, wl_egl_surface->surface_sync);
1828 TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)",
1830 wl_egl_display->use_explicit_sync = TPL_FALSE;
1834 wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc();
1835 wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
1839 _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
1841 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1842 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1843 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
1844 tpl_bool_t need_to_release = TPL_FALSE;
1845 tpl_bool_t need_to_cancel = TPL_FALSE;
1846 buffer_status_t status = RELEASED;
1849 while (wl_egl_surface->buffer_cnt) {
1850 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
1851 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
1852 wl_egl_buffer = wl_egl_surface->buffers[idx];
1854 if (wl_egl_buffer) {
1855 wl_egl_surface->buffers[idx] = NULL;
1856 wl_egl_surface->buffer_cnt--;
1858 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
1859 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
1864 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
1866 tpl_gmutex_lock(&wl_egl_buffer->mutex);
1868 status = wl_egl_buffer->status;
1870 TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
1872 wl_egl_buffer->tbm_surface,
1873 status_to_string[status]);
1875 if (status >= ENQUEUED) {
1876 tpl_bool_t need_to_wait = TPL_FALSE;
1877 tpl_result_t wait_result = TPL_ERROR_NONE;
1879 if (!wl_egl_display->use_explicit_sync &&
1880 status < WAITING_VBLANK)
1881 need_to_wait = TPL_TRUE;
1883 if (wl_egl_display->use_explicit_sync &&
1885 need_to_wait = TPL_TRUE;
1888 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
1889 wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
1890 &wl_egl_buffer->mutex,
1892 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
1894 status = wl_egl_buffer->status;
1896 if (wait_result == TPL_ERROR_TIME_OUT)
1897 TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
1902 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1903 /* It has been acquired but has not yet been released, so this
1904 * buffer must be released. */
1905 need_to_release = (status >= ACQUIRED && status <= COMMITTED);
1907 /* After dequeue, it has not been enqueued yet
1908 * so cancel_dequeue must be performed. */
1909 need_to_cancel = (status == DEQUEUED);
1911 if (need_to_release) {
1912 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
1913 wl_egl_buffer->tbm_surface);
1914 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1915 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1916 wl_egl_buffer->tbm_surface, tsq_err);
1919 if (need_to_cancel) {
1920 tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
1921 wl_egl_buffer->tbm_surface);
1922 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1923 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1924 wl_egl_buffer->tbm_surface, tsq_err);
1927 wl_egl_buffer->status = RELEASED;
1929 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
1931 if (need_to_release || need_to_cancel)
1932 tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
1934 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
1941 __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
1943 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1944 tpl_wl_egl_display_t *wl_egl_display = NULL;
1946 TPL_ASSERT(surface);
1947 TPL_ASSERT(surface->display);
1949 TPL_CHECK_ON_FALSE_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW);
1951 wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data;
1952 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1954 wl_egl_display = wl_egl_surface->wl_egl_display;
1955 TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
1957 TPL_INFO("[SURFACE_FINI][BEGIN]",
1958 "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1960 wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
1962 _tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
1964 if (wl_egl_surface->surf_source)
1965 tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
1966 wl_egl_surface->surf_source = NULL;
1968 _print_buffer_lists(wl_egl_surface);
1970 if (wl_egl_surface->wl_egl_window) {
1971 struct tizen_private *tizen_private = NULL;
1972 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
1973 TPL_INFO("[WL_EGL_WINDOW_FINI]",
1974 "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
1975 wl_egl_surface, wl_egl_window,
1976 wl_egl_surface->wl_surface);
1977 tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
1978 if (tizen_private) {
1979 tizen_private->set_window_serial_callback = NULL;
1980 tizen_private->rotate_callback = NULL;
1981 tizen_private->get_rotation_capability = NULL;
1982 tizen_private->create_presentation_sync_fd = NULL;
1983 tizen_private->create_commit_sync_fd = NULL;
1984 tizen_private->set_frontbuffer_callback = NULL;
1985 tizen_private->merge_sync_fds = NULL;
1986 tizen_private->data = NULL;
1987 free(tizen_private);
1989 wl_egl_window->driver_private = NULL;
1992 wl_egl_window->destroy_window_callback = NULL;
1993 wl_egl_window->resize_callback = NULL;
1995 wl_egl_surface->wl_egl_window = NULL;
1998 wl_egl_surface->wl_surface = NULL;
1999 wl_egl_surface->wl_egl_display = NULL;
2000 wl_egl_surface->tpl_surface = NULL;
2002 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
2003 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
2004 tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex);
2006 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2007 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2008 tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex);
2010 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2011 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2012 tpl_gmutex_clear(&wl_egl_surface->surf_mutex);
2013 tpl_gcond_clear(&wl_egl_surface->surf_cond);
2015 TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface);
2017 free(wl_egl_surface);
2018 surface->backend.data = NULL;
2022 __tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
2025 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
2027 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
2029 wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
2031 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
2033 TPL_INFO("[SET_PREROTATION_CAPABILITY]",
2034 "wl_egl_surface(%p) prerotation capability set to [%s]",
2035 wl_egl_surface, (set ? "TRUE" : "FALSE"));
2037 wl_egl_surface->prerotation_capability = set;
2038 return TPL_ERROR_NONE;
2042 __tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
2045 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
2047 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
2049 wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
2051 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
2053 TPL_INFO("[SET_POST_INTERVAL]",
2054 "wl_egl_surface(%p) post_interval(%d -> %d)",
2055 wl_egl_surface, wl_egl_surface->post_interval, post_interval);
2057 wl_egl_surface->post_interval = post_interval;
2059 return TPL_ERROR_NONE;
2063 __tpl_wl_egl_surface_validate(tpl_surface_t *surface)
2065 tpl_bool_t retval = TPL_TRUE;
2067 TPL_ASSERT(surface);
2068 TPL_ASSERT(surface->backend.data);
2070 tpl_wl_egl_surface_t *wl_egl_surface =
2071 (tpl_wl_egl_surface_t *)surface->backend.data;
2073 retval = !(wl_egl_surface->reset);
2079 __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
2081 tpl_wl_egl_surface_t *wl_egl_surface =
2082 (tpl_wl_egl_surface_t *)surface->backend.data;
2085 *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
2087 *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
2090 #define CAN_DEQUEUE_TIMEOUT_MS 10000
2093 _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
2095 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2097 _print_buffer_lists(wl_egl_surface);
2099 if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue))
2100 != TBM_SURFACE_QUEUE_ERROR_NONE) {
2101 TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
2102 wl_egl_surface->tbm_queue, tsq_err);
2103 return TPL_ERROR_INVALID_OPERATION;
2108 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2109 for (i = 0; i < BUFFER_ARRAY_SIZE; i++) {
2110 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
2111 wl_egl_buffer = wl_egl_surface->buffers[i];
2112 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
2113 if (wl_egl_buffer && wl_egl_buffer->status == COMMITTED) {
2114 wl_egl_buffer->status = RELEASED;
2115 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2116 wl_egl_buffer->tbm_surface);
2117 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2118 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
2119 wl_egl_buffer->tbm_surface, tsq_err);
2120 tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
2125 TPL_INFO("[FORCE_FLUSH]",
2126 "wl_egl_surface(%p) tbm_queue(%p)",
2127 wl_egl_surface, wl_egl_surface->tbm_queue);
2129 return TPL_ERROR_NONE;
2133 _wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer,
2134 tpl_wl_egl_surface_t *wl_egl_surface)
2136 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2137 struct tizen_private *tizen_private =
2138 (struct tizen_private *)wl_egl_window->driver_private;
2140 TPL_ASSERT(tizen_private);
2142 wl_egl_buffer->draw_done = TPL_FALSE;
2143 wl_egl_buffer->need_to_commit = TPL_TRUE;
2144 wl_egl_buffer->buffer_release = NULL;
2145 wl_egl_buffer->transform = tizen_private->transform;
2147 if (wl_egl_buffer->w_transform != tizen_private->window_transform) {
2148 wl_egl_buffer->w_transform = tizen_private->window_transform;
2149 wl_egl_buffer->w_rotated = TPL_TRUE;
2152 if (wl_egl_surface->set_serial_is_used) {
2153 wl_egl_buffer->serial = wl_egl_surface->serial;
2155 wl_egl_buffer->serial = ++tizen_private->serial;
2158 if (wl_egl_buffer->rects) {
2159 free(wl_egl_buffer->rects);
2160 wl_egl_buffer->rects = NULL;
2161 wl_egl_buffer->num_rects = 0;
2165 static tpl_wl_egl_buffer_t *
2166 _get_wl_egl_buffer(tbm_surface_h tbm_surface)
2168 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2169 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2170 (void **)&wl_egl_buffer);
2171 return wl_egl_buffer;
2174 static tpl_wl_egl_buffer_t *
2175 _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
2176 tbm_surface_h tbm_surface)
2178 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2179 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2181 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2183 if (!wl_egl_buffer) {
2184 wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t));
2185 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL);
2187 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2188 (tbm_data_free)__cb_wl_egl_buffer_free);
2189 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2192 wl_egl_buffer->wl_buffer = NULL;
2193 wl_egl_buffer->tbm_surface = tbm_surface;
2194 wl_egl_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
2195 wl_egl_buffer->wl_egl_surface = wl_egl_surface;
2197 wl_egl_buffer->status = RELEASED;
2199 wl_egl_buffer->acquire_fence_fd = -1;
2200 wl_egl_buffer->commit_sync_fd = -1;
2201 wl_egl_buffer->presentation_sync_fd = -1;
2202 wl_egl_buffer->release_fence_fd = -1;
2204 wl_egl_buffer->dx = wl_egl_window->dx;
2205 wl_egl_buffer->dy = wl_egl_window->dy;
2206 wl_egl_buffer->width = tbm_surface_get_width(tbm_surface);
2207 wl_egl_buffer->height = tbm_surface_get_height(tbm_surface);
2209 tpl_gmutex_init(&wl_egl_buffer->mutex);
2210 tpl_gcond_init(&wl_egl_buffer->cond);
2212 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
2215 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2216 if (wl_egl_surface->buffers[i] == NULL) break;
2218 /* If this exception is reached,
2219 * it may be a critical memory leak problem. */
2220 if (i == BUFFER_ARRAY_SIZE) {
2221 tpl_wl_egl_buffer_t *evicted_buffer = NULL;
2222 int evicted_idx = 0; /* evict the frontmost buffer */
2224 evicted_buffer = wl_egl_surface->buffers[evicted_idx];
2226 TPL_WARN("wl_egl_surface(%p) buffers array is full. evict one.",
2228 TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
2229 evicted_buffer, evicted_buffer->tbm_surface,
2230 status_to_string[evicted_buffer->status]);
2232 /* [TODO] need to think about whether there will be
2233 * better modifications */
2234 wl_egl_surface->buffer_cnt--;
2235 wl_egl_surface->buffers[evicted_idx] = NULL;
2240 wl_egl_surface->buffer_cnt++;
2241 wl_egl_surface->buffers[i] = wl_egl_buffer;
2242 wl_egl_buffer->idx = i;
2244 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
2246 TPL_INFO("[WL_EGL_BUFFER_CREATE]",
2247 "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2248 wl_egl_surface, wl_egl_buffer, tbm_surface,
2249 wl_egl_buffer->bo_name);
2252 _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface);
2254 return wl_egl_buffer;
2257 static tbm_surface_h
2258 __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
2259 int32_t *release_fence)
2261 TPL_ASSERT(surface);
2262 TPL_ASSERT(surface->backend.data);
2263 TPL_ASSERT(surface->display);
2264 TPL_ASSERT(surface->display->backend.data);
2265 TPL_OBJECT_CHECK_RETURN(surface, NULL);
2267 tpl_wl_egl_surface_t *wl_egl_surface =
2268 (tpl_wl_egl_surface_t *)surface->backend.data;
2269 tpl_wl_egl_display_t *wl_egl_display =
2270 (tpl_wl_egl_display_t *)surface->display->backend.data;
2271 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2273 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2274 tpl_bool_t is_activated = 0;
2276 tbm_surface_h tbm_surface = NULL;
2278 TPL_OBJECT_UNLOCK(surface);
2279 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2280 wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
2281 TPL_OBJECT_LOCK(surface);
2283 /* After the can dequeue state, lock the wl_event_mutex to prevent other
2284 * events from being processed in wayland_egl_thread
2285 * during below dequeue procedure. */
2286 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
2288 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2289 TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
2290 wl_egl_surface->tbm_queue, surface);
2291 if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
2292 TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
2293 wl_egl_surface->tbm_queue, surface);
2294 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2297 tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2301 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2302 TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
2303 wl_egl_surface->tbm_queue, surface);
2304 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2308 /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
2309 * below function [wayland_tbm_client_queue_check_activate()].
2310 * This function has to be called before tbm_surface_queue_dequeue()
2311 * in order to know what state the buffer will be dequeued next.
2313 * ACTIVATED state means non-composite mode. Client can get buffers which
2314 can be displayed directly(without compositing).
2315 * DEACTIVATED state means composite mode. Client's buffer will be displayed
2316 by compositor(E20) with compositing.
2318 is_activated = wayland_tbm_client_queue_check_activate(
2319 wl_egl_display->wl_tbm_client,
2320 wl_egl_surface->tbm_queue);
2322 wl_egl_surface->is_activated = is_activated;
2324 surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
2325 surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
2326 wl_egl_surface->width = surface->width;
2327 wl_egl_surface->height = surface->height;
2329 if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
2330 /* If surface->frontbuffer is already set in frontbuffer mode,
2331 * it will return that frontbuffer if it is still activated,
2332 * otherwise dequeue the new buffer after initializing
2333 * surface->frontbuffer to NULL. */
2334 if (is_activated && !wl_egl_surface->reset) {
2335 bo_name = _get_tbm_surface_bo_name(surface->frontbuffer);
2338 "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
2339 surface->frontbuffer, bo_name);
2340 TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
2341 "[DEQ]~[ENQ] BO_NAME:%d",
2343 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2344 return surface->frontbuffer;
2346 surface->frontbuffer = NULL;
2347 wl_egl_surface->need_to_enqueue = TPL_TRUE;
2350 surface->frontbuffer = NULL;
2353 tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue,
2356 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d",
2357 wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err);
2358 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2362 tbm_surface_internal_ref(tbm_surface);
2364 wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
2365 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
2367 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2368 wl_egl_buffer->status = DEQUEUED;
2370 /* If wl_egl_buffer->release_fence_fd is -1,
2371 * the tbm_surface can be used immediately.
2372 * If not, user(EGL) have to wait until signaled. */
2373 if (release_fence) {
2374 if (wl_egl_surface->surface_sync) {
2375 *release_fence = wl_egl_buffer->release_fence_fd;
2376 TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)",
2377 wl_egl_surface, wl_egl_buffer, *release_fence);
2379 wl_egl_buffer->release_fence_fd = -1;
2381 *release_fence = -1;
2385 if (surface->is_frontbuffer_mode && is_activated)
2386 surface->frontbuffer = tbm_surface;
2388 wl_egl_surface->reset = TPL_FALSE;
2390 TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name);
2391 TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d",
2392 wl_egl_buffer->bo_name);
2393 TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2394 wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name,
2395 release_fence ? *release_fence : -1);
2397 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2398 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2404 __tpl_wl_egl_surface_cancel_buffer(tpl_surface_t *surface,
2405 tbm_surface_h tbm_surface)
2407 TPL_ASSERT(surface);
2408 TPL_ASSERT(surface->backend.data);
2410 tpl_wl_egl_surface_t *wl_egl_surface =
2411 (tpl_wl_egl_surface_t *)surface->backend.data;
2412 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2413 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2415 if (!tbm_surface_internal_is_valid(tbm_surface)) {
2416 TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
2417 return TPL_ERROR_INVALID_PARAMETER;
2420 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2421 if (wl_egl_buffer) {
2422 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2423 wl_egl_buffer->status = RELEASED;
2424 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2427 tbm_surface_internal_unref(tbm_surface);
2429 tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
2431 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2432 TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
2433 tbm_surface, surface);
2434 return TPL_ERROR_INVALID_OPERATION;
2437 TPL_INFO("[CANCEL_BUFFER]", "wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
2438 wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
2440 return TPL_ERROR_NONE;
2444 __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
2445 tbm_surface_h tbm_surface,
2446 int num_rects, const int *rects, int32_t acquire_fence)
2448 TPL_ASSERT(surface);
2449 TPL_ASSERT(surface->display);
2450 TPL_ASSERT(surface->backend.data);
2451 TPL_ASSERT(tbm_surface);
2452 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2454 tpl_wl_egl_surface_t *wl_egl_surface =
2455 (tpl_wl_egl_surface_t *) surface->backend.data;
2456 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2457 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2460 if (!tbm_surface_internal_is_valid(tbm_surface)) {
2461 TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
2463 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2464 return TPL_ERROR_INVALID_PARAMETER;
2467 bo_name = _get_tbm_surface_bo_name(tbm_surface);
2469 TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
2471 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2473 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2475 /* If there are received region information, save it to wl_egl_buffer */
2476 if (num_rects && rects) {
2477 if (wl_egl_buffer->rects != NULL) {
2478 free(wl_egl_buffer->rects);
2479 wl_egl_buffer->rects = NULL;
2480 wl_egl_buffer->num_rects = 0;
2483 wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2484 wl_egl_buffer->num_rects = num_rects;
2486 if (!wl_egl_buffer->rects) {
2487 TPL_ERR("Failed to allocate memory fo damage rects info.");
2488 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2489 return TPL_ERROR_OUT_OF_MEMORY;
2492 memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects);
2495 if (!wl_egl_surface->need_to_enqueue ||
2496 !wl_egl_buffer->need_to_commit) {
2497 TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
2498 ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
2499 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2500 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2501 return TPL_ERROR_NONE;
2504 /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
2505 * commit if surface->frontbuffer that is already set and the tbm_surface
2506 * client want to enqueue are the same.
2508 if (surface->is_frontbuffer_mode) {
2509 /* The first buffer to be activated in frontbuffer mode must be
2510 * committed. Subsequence frames do not need to be committed because
2511 * the buffer is already displayed.
2513 if (surface->frontbuffer == tbm_surface)
2514 wl_egl_surface->need_to_enqueue = TPL_FALSE;
2516 if (acquire_fence != -1) {
2517 close(acquire_fence);
2522 if (wl_egl_buffer->acquire_fence_fd != -1)
2523 close(wl_egl_buffer->acquire_fence_fd);
2525 wl_egl_buffer->acquire_fence_fd = acquire_fence;
2527 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2528 if (wl_egl_surface->presentation_sync.fd != -1) {
2529 wl_egl_buffer->presentation_sync_fd = wl_egl_surface->presentation_sync.fd;
2530 wl_egl_surface->presentation_sync.fd = -1;
2532 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2534 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
2535 if (wl_egl_surface->commit_sync.fd != -1) {
2536 wl_egl_buffer->commit_sync_fd = wl_egl_surface->commit_sync.fd;
2537 wl_egl_surface->commit_sync.fd = -1;
2538 TRACE_ASYNC_BEGIN(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
2539 _get_tbm_surface_bo_name(tbm_surface));
2541 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
2543 wl_egl_buffer->status = ENQUEUED;
2545 "[ENQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2546 wl_egl_buffer, tbm_surface, bo_name, acquire_fence);
2548 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2550 tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
2552 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2553 tbm_surface_internal_unref(tbm_surface);
2554 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d",
2555 tbm_surface, wl_egl_surface, tsq_err);
2556 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2557 return TPL_ERROR_INVALID_OPERATION;
2560 tbm_surface_internal_unref(tbm_surface);
2562 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2564 return TPL_ERROR_NONE;
2568 __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message)
2570 tpl_wl_egl_buffer_t *wl_egl_buffer =
2571 (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
2572 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2573 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2574 tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
2576 wl_egl_surface->render_done_cnt++;
2578 TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
2579 wl_egl_buffer->acquire_fence_fd);
2581 TPL_DEBUG("[RENDER DONE] wl_egl_buffer(%p) tbm_surface(%p)",
2582 wl_egl_buffer, tbm_surface);
2584 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2585 wl_egl_buffer->status = WAITING_VBLANK;
2586 tpl_gcond_signal(&wl_egl_buffer->cond);
2587 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2589 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2591 if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done)
2592 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2594 __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers,
2597 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2603 __thread_func_waiting_source_finalize(tpl_gsource *gsource)
2605 tpl_wl_egl_buffer_t *wl_egl_buffer =
2606 (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
2608 TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
2609 wl_egl_buffer, wl_egl_buffer->waiting_source,
2610 wl_egl_buffer->acquire_fence_fd);
2612 close(wl_egl_buffer->acquire_fence_fd);
2613 wl_egl_buffer->acquire_fence_fd = -1;
2614 wl_egl_buffer->waiting_source = NULL;
2617 static tpl_gsource_functions buffer_funcs = {
2620 .dispatch = __thread_func_waiting_source_dispatch,
2621 .finalize = __thread_func_waiting_source_finalize,
2625 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
2627 tbm_surface_h tbm_surface = NULL;
2628 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2629 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2630 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2631 tpl_bool_t ready_to_commit = TPL_FALSE;
2633 while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) {
2634 tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue,
2636 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2637 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2638 wl_egl_surface->tbm_queue);
2639 return TPL_ERROR_INVALID_OPERATION;
2642 tbm_surface_internal_ref(tbm_surface);
2644 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2645 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
2646 "wl_egl_buffer sould be not NULL");
2648 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2650 wl_egl_buffer->status = ACQUIRED;
2652 TPL_LOG_T("WL_EGL", "[ACQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2653 wl_egl_buffer, tbm_surface,
2654 _get_tbm_surface_bo_name(tbm_surface));
2656 if (wl_egl_buffer->wl_buffer == NULL) {
2657 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2658 wl_egl_buffer->wl_buffer =
2659 (struct wl_proxy *)wayland_tbm_client_create_buffer(
2660 wl_egl_display->wl_tbm_client, tbm_surface);
2662 if (!wl_egl_buffer->wl_buffer) {
2663 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2664 wl_egl_display->wl_tbm_client, tbm_surface);
2667 "[WL_BUFFER_CREATE] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
2668 wl_egl_buffer, wl_egl_buffer->wl_buffer, tbm_surface);
2672 if (wl_egl_buffer->acquire_fence_fd != -1) {
2673 if (wl_egl_surface->surface_sync)
2674 ready_to_commit = TPL_TRUE;
2676 if (wl_egl_buffer->waiting_source) {
2677 tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
2678 wl_egl_buffer->waiting_source = NULL;
2681 wl_egl_buffer->waiting_source =
2682 tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
2683 wl_egl_buffer->acquire_fence_fd, &buffer_funcs,
2684 SOURCE_TYPE_DISPOSABLE);
2685 wl_egl_buffer->status = WAITING_SIGNALED;
2687 TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
2688 wl_egl_buffer->acquire_fence_fd);
2690 ready_to_commit = TPL_FALSE;
2693 ready_to_commit = TPL_TRUE;
2696 if (ready_to_commit) {
2697 if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done)
2698 ready_to_commit = TPL_TRUE;
2700 wl_egl_buffer->status = WAITING_VBLANK;
2701 __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, wl_egl_buffer);
2702 ready_to_commit = TPL_FALSE;
2706 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2708 if (ready_to_commit)
2709 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2712 return TPL_ERROR_NONE;
2715 /* -- BEGIN -- tdm_client vblank callback function */
2717 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2718 unsigned int sequence, unsigned int tv_sec,
2719 unsigned int tv_usec, void *user_data)
2721 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data;
2722 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2724 TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK");
2725 TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface);
2727 if (error == TDM_ERROR_TIMEOUT)
2728 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)",
2731 wl_egl_surface->vblank_done = TPL_TRUE;
2733 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2734 wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(
2735 wl_egl_surface->vblank_waiting_buffers,
2738 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2739 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2741 /* -- END -- tdm_client vblank callback function */
2744 __cb_buffer_fenced_release(void *data,
2745 struct zwp_linux_buffer_release_v1 *release, int32_t fence)
2747 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
2748 tbm_surface_h tbm_surface = NULL;
2750 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
2752 tbm_surface = wl_egl_buffer->tbm_surface;
2754 if (tbm_surface_internal_is_valid(tbm_surface)) {
2756 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2757 if (wl_egl_buffer->status == COMMITTED) {
2758 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2759 tbm_surface_queue_error_e tsq_err;
2761 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
2762 wl_egl_buffer->buffer_release = NULL;
2764 wl_egl_buffer->release_fence_fd = fence;
2765 wl_egl_buffer->status = RELEASED;
2767 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2768 _get_tbm_surface_bo_name(tbm_surface),
2770 TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
2771 _get_tbm_surface_bo_name(tbm_surface));
2774 "[FENCED_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2775 wl_egl_buffer, tbm_surface,
2776 _get_tbm_surface_bo_name(tbm_surface),
2779 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2781 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2782 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2784 tbm_surface_internal_unref(tbm_surface);
2787 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2790 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2795 __cb_buffer_immediate_release(void *data,
2796 struct zwp_linux_buffer_release_v1 *release)
2798 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
2799 tbm_surface_h tbm_surface = NULL;
2801 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
2803 tbm_surface = wl_egl_buffer->tbm_surface;
2805 if (tbm_surface_internal_is_valid(tbm_surface)) {
2807 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2808 if (wl_egl_buffer->status == COMMITTED) {
2809 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2810 tbm_surface_queue_error_e tsq_err;
2812 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
2813 wl_egl_buffer->buffer_release = NULL;
2815 wl_egl_buffer->release_fence_fd = -1;
2816 wl_egl_buffer->status = RELEASED;
2818 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2819 _get_tbm_surface_bo_name(tbm_surface));
2820 TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
2821 _get_tbm_surface_bo_name(tbm_surface));
2824 "[IMMEDIATE_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2825 wl_egl_buffer, tbm_surface,
2826 _get_tbm_surface_bo_name(tbm_surface));
2828 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2830 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2831 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2833 tbm_surface_internal_unref(tbm_surface);
2836 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2839 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2843 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2844 __cb_buffer_fenced_release,
2845 __cb_buffer_immediate_release,
2849 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2851 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
2852 tbm_surface_h tbm_surface = NULL;
2854 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer)
2856 tbm_surface = wl_egl_buffer->tbm_surface;
2858 if (tbm_surface_internal_is_valid(tbm_surface)) {
2859 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2860 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2862 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2864 if (wl_egl_buffer->status == COMMITTED) {
2866 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2868 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2869 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2871 wl_egl_buffer->status = RELEASED;
2873 TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
2874 TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
2875 _get_tbm_surface_bo_name(tbm_surface));
2877 TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2878 wl_egl_buffer->wl_buffer, tbm_surface,
2879 _get_tbm_surface_bo_name(tbm_surface));
2882 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2884 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
2885 tbm_surface_internal_unref(tbm_surface);
2887 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2891 static const struct wl_buffer_listener wl_buffer_release_listener = {
2892 (void *)__cb_wl_buffer_release,
2896 __cb_presentation_feedback_sync_output(void *data,
2897 struct wp_presentation_feedback *presentation_feedback,
2898 struct wl_output *output)
2901 TPL_IGNORE(presentation_feedback);
2907 __cb_presentation_feedback_presented(void *data,
2908 struct wp_presentation_feedback *presentation_feedback,
2912 uint32_t refresh_nsec,
2917 TPL_IGNORE(tv_sec_hi);
2918 TPL_IGNORE(tv_sec_lo);
2919 TPL_IGNORE(tv_nsec);
2920 TPL_IGNORE(refresh_nsec);
2925 struct pst_feedback *pst_feedback = (struct pst_feedback *)data;
2926 tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface;
2928 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2930 TPL_DEBUG("[FEEDBACK][PRESENTED] pst_feedback(%p) presentation_feedback(%p) bo(%d)",
2931 pst_feedback, presentation_feedback, pst_feedback->bo_name);
2933 if (pst_feedback->pst_sync_fd != -1) {
2934 int ret = _write_to_eventfd(pst_feedback->pst_sync_fd);
2936 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
2937 pst_feedback->pst_sync_fd);
2940 TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
2941 "[PRESENTATION_SYNC] bo(%d)",
2942 pst_feedback->bo_name);
2944 close(pst_feedback->pst_sync_fd);
2945 pst_feedback->pst_sync_fd = -1;
2948 wp_presentation_feedback_destroy(presentation_feedback);
2950 pst_feedback->presentation_feedback = NULL;
2951 pst_feedback->wl_egl_surface = NULL;
2952 pst_feedback->bo_name = 0;
2954 __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
2959 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2963 __cb_presentation_feedback_discarded(void *data,
2964 struct wp_presentation_feedback *presentation_feedback)
2966 struct pst_feedback *pst_feedback = (struct pst_feedback *)data;
2967 tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface;
2969 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2971 TPL_DEBUG("[FEEDBACK][DISCARDED] pst_feedback(%p) presentation_feedback(%p) bo(%d)",
2972 pst_feedback, presentation_feedback, pst_feedback->bo_name);
2974 if (pst_feedback->pst_sync_fd != -1) {
2975 int ret = _write_to_eventfd(pst_feedback->pst_sync_fd);
2977 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
2978 pst_feedback->pst_sync_fd);
2981 TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
2982 "[PRESENTATION_SYNC] bo(%d)",
2983 pst_feedback->bo_name);
2985 close(pst_feedback->pst_sync_fd);
2986 pst_feedback->pst_sync_fd = -1;
2989 wp_presentation_feedback_destroy(presentation_feedback);
2991 pst_feedback->presentation_feedback = NULL;
2992 pst_feedback->wl_egl_surface = NULL;
2993 pst_feedback->bo_name = 0;
2995 __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
3000 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3003 static const struct wp_presentation_feedback_listener feedback_listener = {
3004 __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
3005 __cb_presentation_feedback_presented,
3006 __cb_presentation_feedback_discarded
3010 _thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface)
3012 tdm_error tdm_err = TDM_ERROR_NONE;
3013 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3015 if (wl_egl_surface->vblank == NULL) {
3016 wl_egl_surface->vblank =
3017 _thread_create_tdm_client_vblank(wl_egl_display->tdm_client);
3018 if (!wl_egl_surface->vblank) {
3019 TPL_WARN("Failed to create vblank. wl_egl_surface(%p)",
3021 return TPL_ERROR_OUT_OF_MEMORY;
3025 tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank,
3026 wl_egl_surface->post_interval,
3027 __cb_tdm_client_vblank,
3028 (void *)wl_egl_surface);
3030 if (tdm_err == TDM_ERROR_NONE) {
3031 wl_egl_surface->vblank_done = TPL_FALSE;
3032 TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK");
3034 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
3035 return TPL_ERROR_INVALID_OPERATION;
3038 return TPL_ERROR_NONE;
3042 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
3043 tpl_wl_egl_buffer_t *wl_egl_buffer)
3045 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3046 struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
3047 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
3050 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
3051 "wl_egl_buffer sould be not NULL");
3053 if (wl_egl_buffer->wl_buffer == NULL) {
3054 wl_egl_buffer->wl_buffer =
3055 (struct wl_proxy *)wayland_tbm_client_create_buffer(
3056 wl_egl_display->wl_tbm_client,
3057 wl_egl_buffer->tbm_surface);
3059 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
3060 "[FATAL] Failed to create wl_buffer");
3062 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
3064 /* create presentation feedback and add listener */
3065 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
3066 if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) {
3068 struct pst_feedback *pst_feedback = NULL;
3069 pst_feedback = (struct pst_feedback *) calloc(1, sizeof(struct pst_feedback));
3071 pst_feedback->presentation_feedback =
3072 wp_presentation_feedback(wl_egl_display->presentation,
3075 pst_feedback->wl_egl_surface = wl_egl_surface;
3076 pst_feedback->bo_name = wl_egl_buffer->bo_name;
3078 pst_feedback->pst_sync_fd = wl_egl_buffer->presentation_sync_fd;
3079 wl_egl_buffer->presentation_sync_fd = -1;
3081 wp_presentation_feedback_add_listener(pst_feedback->presentation_feedback,
3082 &feedback_listener, pst_feedback);
3083 __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, pst_feedback);
3084 TRACE_ASYNC_BEGIN(pst_feedback->pst_sync_fd,
3085 "[PRESENTATION_SYNC] bo(%d)",
3086 pst_feedback->bo_name);
3088 TPL_ERR("Failed to create presentation feedback. wl_egl_buffer(%p)",
3090 _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
3091 close(wl_egl_buffer->presentation_sync_fd);
3092 wl_egl_buffer->presentation_sync_fd = -1;
3095 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3097 if (wl_egl_buffer->w_rotated == TPL_TRUE) {
3098 wayland_tbm_client_set_buffer_transform(
3099 wl_egl_display->wl_tbm_client,
3100 (void *)wl_egl_buffer->wl_buffer,
3101 wl_egl_buffer->w_transform);
3102 wl_egl_buffer->w_rotated = TPL_FALSE;
3105 if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) {
3106 wl_egl_surface->latest_transform = wl_egl_buffer->transform;
3107 wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
3110 if (wl_egl_window) {
3111 wl_egl_window->attached_width = wl_egl_buffer->width;
3112 wl_egl_window->attached_height = wl_egl_buffer->height;
3115 wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer,
3116 wl_egl_buffer->dx, wl_egl_buffer->dy);
3118 if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) {
3120 wl_surface_damage(wl_surface,
3121 wl_egl_buffer->dx, wl_egl_buffer->dy,
3122 wl_egl_buffer->width, wl_egl_buffer->height);
3124 wl_surface_damage_buffer(wl_surface,
3126 wl_egl_buffer->width, wl_egl_buffer->height);
3130 for (i = 0; i < wl_egl_buffer->num_rects; i++) {
3132 wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] +
3133 wl_egl_buffer->rects[i * 4 + 3]);
3135 wl_surface_damage(wl_surface,
3136 wl_egl_buffer->rects[i * 4 + 0],
3138 wl_egl_buffer->rects[i * 4 + 2],
3139 wl_egl_buffer->rects[i * 4 + 3]);
3141 wl_surface_damage_buffer(wl_surface,
3142 wl_egl_buffer->rects[i * 4 + 0],
3144 wl_egl_buffer->rects[i * 4 + 2],
3145 wl_egl_buffer->rects[i * 4 + 3]);
3150 wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client,
3151 (void *)wl_egl_buffer->wl_buffer,
3152 wl_egl_buffer->serial);
3154 if (wl_egl_display->use_explicit_sync &&
3155 wl_egl_surface->surface_sync) {
3157 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync,
3158 wl_egl_buffer->acquire_fence_fd);
3159 TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)",
3160 wl_egl_surface, wl_egl_buffer->tbm_surface, wl_egl_buffer->acquire_fence_fd);
3161 close(wl_egl_buffer->acquire_fence_fd);
3162 wl_egl_buffer->acquire_fence_fd = -1;
3164 wl_egl_buffer->buffer_release =
3165 zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync);
3166 if (!wl_egl_buffer->buffer_release) {
3167 TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface);
3169 zwp_linux_buffer_release_v1_add_listener(
3170 wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer);
3171 TPL_DEBUG("add explicit_sync_release_listener.");
3174 wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer,
3175 &wl_buffer_release_listener, wl_egl_buffer);
3178 wl_surface_commit(wl_surface);
3180 wl_display_flush(wl_egl_display->wl_display);
3182 TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3183 wl_egl_buffer->bo_name);
3185 tpl_gmutex_lock(&wl_egl_buffer->mutex);
3187 wl_egl_buffer->need_to_commit = TPL_FALSE;
3188 wl_egl_buffer->status = COMMITTED;
3190 tpl_gcond_signal(&wl_egl_buffer->cond);
3192 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
3195 "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
3196 wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface,
3197 wl_egl_buffer->bo_name);
3199 if (wl_egl_display->use_wait_vblank &&
3200 _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
3201 TPL_ERR("Failed to set wait vblank.");
3203 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
3205 if (wl_egl_buffer->commit_sync_fd != -1) {
3206 int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
3208 TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd);
3211 TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
3212 wl_egl_buffer->bo_name);
3213 TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)",
3214 wl_egl_surface, wl_egl_buffer->commit_sync_fd);
3216 close(wl_egl_buffer->commit_sync_fd);
3217 wl_egl_buffer->commit_sync_fd = -1;
3220 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
3224 _write_to_eventfd(int eventfd)
3229 if (eventfd == -1) {
3230 TPL_ERR("Invalid fd(-1)");
3234 ret = write(eventfd, &value, sizeof(uint64_t));
3236 TPL_ERR("failed to write to fd(%d)", eventfd);
3244 __tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend)
3246 TPL_ASSERT(backend);
3248 backend->type = TPL_BACKEND_WAYLAND_THREAD;
3249 backend->data = NULL;
3251 backend->init = __tpl_wl_egl_display_init;
3252 backend->fini = __tpl_wl_egl_display_fini;
3253 backend->query_config = __tpl_wl_egl_display_query_config;
3254 backend->filter_config = __tpl_wl_egl_display_filter_config;
3255 backend->get_window_info = __tpl_wl_egl_display_get_window_info;
3256 backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
3257 backend->get_buffer_from_native_pixmap =
3258 __tpl_wl_egl_display_get_buffer_from_native_pixmap;
3262 __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
3264 TPL_ASSERT(backend);
3266 backend->type = TPL_BACKEND_WAYLAND_THREAD;
3267 backend->data = NULL;
3269 backend->init = __tpl_wl_egl_surface_init;
3270 backend->fini = __tpl_wl_egl_surface_fini;
3271 backend->validate = __tpl_wl_egl_surface_validate;
3272 backend->cancel_dequeued_buffer =
3273 __tpl_wl_egl_surface_cancel_buffer;
3274 backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
3275 backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
3276 backend->set_rotation_capability =
3277 __tpl_wl_egl_surface_set_rotation_capability;
3278 backend->set_post_interval =
3279 __tpl_wl_egl_surface_set_post_interval;
3281 __tpl_wl_egl_surface_get_size;
3285 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
3287 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3288 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3290 TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
3291 wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);
3293 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3294 if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) {
3295 wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL;
3296 wl_egl_surface->buffer_cnt--;
3298 wl_egl_buffer->idx = -1;
3300 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
3302 wl_display_flush(wl_egl_display->wl_display);
3304 if (wl_egl_buffer->wl_buffer) {
3305 wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
3306 (void *)wl_egl_buffer->wl_buffer);
3307 wl_egl_buffer->wl_buffer = NULL;
3310 if (wl_egl_buffer->buffer_release) {
3311 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
3312 wl_egl_buffer->buffer_release = NULL;
3315 if (wl_egl_buffer->release_fence_fd != -1) {
3316 close(wl_egl_buffer->release_fence_fd);
3317 wl_egl_buffer->release_fence_fd = -1;
3320 if (wl_egl_buffer->waiting_source) {
3321 tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
3322 wl_egl_buffer->waiting_source = NULL;
3325 if (wl_egl_buffer->commit_sync_fd != -1) {
3326 int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
3328 TPL_ERR("Failed to send commit_sync signal to fd(%d)",
3329 wl_egl_buffer->commit_sync_fd);
3330 close(wl_egl_buffer->commit_sync_fd);
3331 wl_egl_buffer->commit_sync_fd = -1;
3334 if (wl_egl_buffer->presentation_sync_fd != -1) {
3335 int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
3337 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
3338 wl_egl_buffer->presentation_sync_fd);
3339 close(wl_egl_buffer->presentation_sync_fd);
3340 wl_egl_buffer->presentation_sync_fd = -1;
3343 if (wl_egl_buffer->rects) {
3344 free(wl_egl_buffer->rects);
3345 wl_egl_buffer->rects = NULL;
3346 wl_egl_buffer->num_rects = 0;
3349 wl_egl_buffer->tbm_surface = NULL;
3350 wl_egl_buffer->bo_name = -1;
3352 free(wl_egl_buffer);
3356 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
3358 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
3362 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
3366 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3367 TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)",
3368 wl_egl_surface, wl_egl_surface->buffer_cnt);
3369 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
3370 tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
3371 if (wl_egl_buffer) {
3373 "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
3374 idx, wl_egl_buffer, wl_egl_buffer->tbm_surface,
3375 wl_egl_buffer->bo_name,
3376 status_to_string[wl_egl_buffer->status]);
3379 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);