2 #include "tpl_internal.h"
7 #include <sys/eventfd.h>
9 #include <tbm_bufmgr.h>
10 #include <tbm_surface.h>
11 #include <tbm_surface_internal.h>
12 #include <tbm_surface_queue.h>
14 #include <wayland-client.h>
15 #include <wayland-tbm-server.h>
16 #include <wayland-tbm-client.h>
17 #include <wayland-egl-backend.h>
19 #include <tdm_client.h>
21 #include "wayland-egl-tizen/wayland-egl-tizen.h"
22 #include "wayland-egl-tizen/wayland-egl-tizen-priv.h"
24 #include <tizen-surface-client-protocol.h>
25 #include <presentation-time-client-protocol.h>
26 #include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
28 #include "tpl_utils_gthread.h"
30 static int wl_egl_buffer_key;
31 #define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key)
33 /* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
34 #define CLIENT_QUEUE_SIZE 3
35 #define BUFFER_ARRAY_SIZE (CLIENT_QUEUE_SIZE * 2)
37 typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
38 typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
39 typedef struct _tpl_wl_egl_buffer tpl_wl_egl_buffer_t;
41 struct _tpl_wl_egl_display {
42 tpl_gsource *disp_source;
44 tpl_gmutex wl_event_mutex;
46 struct wl_display *wl_display;
47 struct wl_event_queue *ev_queue;
48 struct wayland_tbm_client *wl_tbm_client;
49 int last_error; /* errno of the last wl_display error*/
51 tpl_bool_t wl_initialized;
52 tpl_bool_t tdm_initialized;
54 tdm_client *tdm_client;
55 tpl_gsource *tdm_source;
58 tpl_bool_t use_wait_vblank;
59 tpl_bool_t use_explicit_sync;
62 struct tizen_surface_shm *tss; /* used for surface buffer_flush */
63 struct wp_presentation *presentation; /* for presentation feedback */
64 struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
67 struct _tpl_wl_egl_surface {
68 tpl_gsource *surf_source;
70 tbm_surface_queue_h tbm_queue;
72 struct wl_egl_window *wl_egl_window;
73 struct wl_surface *wl_surface;
74 struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
75 struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */
77 tdm_client_vblank *vblank;
79 /* surface information */
90 tpl_wl_egl_display_t *wl_egl_display;
91 tpl_surface_t *tpl_surface;
93 /* wl_egl_buffer array for buffer tracing */
94 tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE];
95 int buffer_cnt; /* the number of using wl_egl_buffers */
96 tpl_gmutex buffers_mutex;
98 tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
99 tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
111 tpl_gmutex surf_mutex;
114 /* for waiting draw done */
115 tpl_bool_t use_render_done_fence;
116 tpl_bool_t is_activated;
117 tpl_bool_t reset; /* TRUE if queue reseted by external */
118 tpl_bool_t need_to_enqueue;
119 tpl_bool_t prerotation_capability;
120 tpl_bool_t vblank_done;
121 tpl_bool_t set_serial_is_used;
124 typedef enum buffer_status {
129 WAITING_SIGNALED, // 4
134 static const char *status_to_string[7] = {
139 "WAITING_SIGNALED", // 4
140 "WAITING_VBLANK", // 5
144 struct _tpl_wl_egl_buffer {
145 tbm_surface_h tbm_surface;
148 struct wl_proxy *wl_buffer;
149 int dx, dy; /* position to attach to wl_surface */
150 int width, height; /* size to attach to wl_surface */
152 buffer_status_t status; /* for tracing buffer status */
153 int idx; /* position index in buffers array of wl_egl_surface */
155 /* for damage region */
159 /* for wayland_tbm_client_set_buffer_transform */
161 tpl_bool_t w_rotated;
163 /* for wl_surface_set_buffer_transform */
166 /* for wayland_tbm_client_set_buffer_serial */
169 /* for checking need_to_commit (frontbuffer mode) */
170 tpl_bool_t need_to_commit;
172 /* for checking draw done */
173 tpl_bool_t draw_done;
176 /* to get release event via zwp_linux_buffer_release_v1 */
177 struct zwp_linux_buffer_release_v1 *buffer_release;
179 /* each buffers own its release_fence_fd, until it passes ownership
181 int32_t release_fence_fd;
183 /* each buffers own its acquire_fence_fd.
184 * If it use zwp_linux_buffer_release_v1 the ownership of this fd
185 * will be passed to display server
186 * Otherwise it will be used as a fence waiting for render done
188 int32_t acquire_fence_fd;
190 /* Fd to send a signal when wl_surface_commit with this buffer */
191 int32_t commit_sync_fd;
193 /* Fd to send a siganl when receive the
194 * presentation feedback from display server */
195 int32_t presentation_sync_fd;
197 tpl_gsource *waiting_source;
202 tpl_wl_egl_surface_t *wl_egl_surface;
205 struct pst_feedback {
206 /* to get presentation feedback from display server */
207 struct wp_presentation_feedback *presentation_feedback;
212 tpl_wl_egl_surface_t *wl_egl_surface;
217 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
219 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface);
221 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer);
222 static tpl_wl_egl_buffer_t *
223 _get_wl_egl_buffer(tbm_surface_h tbm_surface);
225 _write_to_eventfd(int eventfd);
227 _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface);
229 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface);
231 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
232 tpl_wl_egl_buffer_t *wl_egl_buffer);
235 _check_native_handle_is_wl_display(tpl_handle_t display)
237 struct wl_interface *wl_egl_native_dpy = *(void **) display;
239 if (!wl_egl_native_dpy) {
240 TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy);
244 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
245 is a memory address pointing the structure of wl_display_interface. */
246 if (wl_egl_native_dpy == &wl_display_interface)
249 if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
250 strlen(wl_display_interface.name)) == 0) {
258 __thread_func_tdm_dispatch(tpl_gsource *gsource, uint64_t message)
260 tpl_wl_egl_display_t *wl_egl_display = NULL;
261 tdm_error tdm_err = TDM_ERROR_NONE;
265 wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
266 if (!wl_egl_display) {
267 TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource);
268 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
272 tdm_err = tdm_client_handle_events(wl_egl_display->tdm_client);
274 /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
275 * When tdm_source is no longer available due to an unexpected situation,
276 * wl_egl_thread must remove it from the thread and destroy it.
277 * In that case, tdm_vblank can no longer be used for surfaces and displays
278 * that used this tdm_source. */
279 if (tdm_err != TDM_ERROR_NONE) {
280 TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
282 TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
284 tpl_gsource_destroy(gsource, TPL_FALSE);
286 wl_egl_display->tdm_source = NULL;
295 __thread_func_tdm_finalize(tpl_gsource *gsource)
297 tpl_wl_egl_display_t *wl_egl_display = NULL;
299 wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
301 TPL_LOG_T("WL_EGL", "tdm_destroy| wl_egl_display(%p) tdm_client(%p)",
302 wl_egl_display, wl_egl_display->tdm_client);
304 if (wl_egl_display->tdm_client) {
305 tdm_client_destroy(wl_egl_display->tdm_client);
306 wl_egl_display->tdm_client = NULL;
307 wl_egl_display->tdm_display_fd = -1;
310 wl_egl_display->tdm_initialized = TPL_FALSE;
313 static tpl_gsource_functions tdm_funcs = {
316 .dispatch = __thread_func_tdm_dispatch,
317 .finalize = __thread_func_tdm_finalize,
321 _thread_tdm_init(tpl_wl_egl_display_t *wl_egl_display)
323 tdm_client *tdm_client = NULL;
324 int tdm_display_fd = -1;
325 tdm_error tdm_err = TDM_ERROR_NONE;
327 tdm_client = tdm_client_create(&tdm_err);
328 if (!tdm_client || tdm_err != TDM_ERROR_NONE) {
329 TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
330 return TPL_ERROR_INVALID_OPERATION;
333 tdm_err = tdm_client_get_fd(tdm_client, &tdm_display_fd);
334 if (tdm_display_fd < 0 || tdm_err != TDM_ERROR_NONE) {
335 TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
336 tdm_client_destroy(tdm_client);
337 return TPL_ERROR_INVALID_OPERATION;
340 wl_egl_display->tdm_display_fd = tdm_display_fd;
341 wl_egl_display->tdm_client = tdm_client;
342 wl_egl_display->tdm_source = NULL;
343 wl_egl_display->tdm_initialized = TPL_TRUE;
345 TPL_INFO("[TDM_CLIENT_INIT]",
346 "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
347 wl_egl_display, tdm_client, tdm_display_fd);
349 return TPL_ERROR_NONE;
352 #define IMPL_TIZEN_SURFACE_SHM_VERSION 2
355 __cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
356 uint32_t name, const char *interface,
359 tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
361 if (!strcmp(interface, "tizen_surface_shm")) {
362 wl_egl_display->tss =
363 wl_registry_bind(wl_registry,
365 &tizen_surface_shm_interface,
366 ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ?
367 version : IMPL_TIZEN_SURFACE_SHM_VERSION));
368 } else if (!strcmp(interface, wp_presentation_interface.name)) {
369 wl_egl_display->presentation =
370 wl_registry_bind(wl_registry,
371 name, &wp_presentation_interface, 1);
372 TPL_DEBUG("bind wp_presentation_interface");
373 } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
374 char *env = tpl_getenv("TPL_EFS");
375 if (env && atoi(env)) {
376 wl_egl_display->explicit_sync =
377 wl_registry_bind(wl_registry, name,
378 &zwp_linux_explicit_synchronization_v1_interface, 1);
379 wl_egl_display->use_explicit_sync = TPL_TRUE;
380 TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
382 wl_egl_display->use_explicit_sync = TPL_FALSE;
388 __cb_wl_resistry_global_remove_callback(void *data,
389 struct wl_registry *wl_registry,
394 static const struct wl_registry_listener registry_listener = {
395 __cb_wl_resistry_global_callback,
396 __cb_wl_resistry_global_remove_callback
400 _wl_display_print_err(tpl_wl_egl_display_t *wl_egl_display,
401 const char *func_name)
405 strerror_r(errno, buf, sizeof(buf));
407 if (wl_egl_display->last_error == errno)
410 TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
412 dpy_err = wl_display_get_error(wl_egl_display->wl_display);
413 if (dpy_err == EPROTO) {
414 const struct wl_interface *err_interface;
415 uint32_t err_proxy_id, err_code;
416 err_code = wl_display_get_protocol_error(wl_egl_display->wl_display,
419 TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
420 err_interface->name, err_code, err_proxy_id);
423 wl_egl_display->last_error = errno;
427 _thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display)
429 struct wl_registry *registry = NULL;
430 struct wl_event_queue *queue = NULL;
431 struct wl_display *display_wrapper = NULL;
432 struct wl_proxy *wl_tbm = NULL;
433 struct wayland_tbm_client *wl_tbm_client = NULL;
435 tpl_result_t result = TPL_ERROR_NONE;
437 queue = wl_display_create_queue(wl_egl_display->wl_display);
439 TPL_ERR("Failed to create wl_queue wl_display(%p)",
440 wl_egl_display->wl_display);
441 result = TPL_ERROR_INVALID_OPERATION;
445 wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display);
446 if (!wl_egl_display->ev_queue) {
447 TPL_ERR("Failed to create wl_queue wl_display(%p)",
448 wl_egl_display->wl_display);
449 result = TPL_ERROR_INVALID_OPERATION;
453 display_wrapper = wl_proxy_create_wrapper(wl_egl_display->wl_display);
454 if (!display_wrapper) {
455 TPL_ERR("Failed to create a proxy wrapper of wl_display(%p)",
456 wl_egl_display->wl_display);
457 result = TPL_ERROR_INVALID_OPERATION;
461 wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
463 registry = wl_display_get_registry(display_wrapper);
465 TPL_ERR("Failed to create wl_registry");
466 result = TPL_ERROR_INVALID_OPERATION;
470 wl_proxy_wrapper_destroy(display_wrapper);
471 display_wrapper = NULL;
473 wl_tbm_client = wayland_tbm_client_init(wl_egl_display->wl_display);
474 if (!wl_tbm_client) {
475 TPL_ERR("Failed to initialize wl_tbm_client.");
476 result = TPL_ERROR_INVALID_CONNECTION;
480 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
482 TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
483 result = TPL_ERROR_INVALID_CONNECTION;
487 wl_proxy_set_queue(wl_tbm, wl_egl_display->ev_queue);
488 wl_egl_display->wl_tbm_client = wl_tbm_client;
490 if (wl_registry_add_listener(registry, ®istry_listener,
492 TPL_ERR("Failed to wl_registry_add_listener");
493 result = TPL_ERROR_INVALID_OPERATION;
497 ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue);
499 _wl_display_print_err(wl_egl_display, "roundtrip_queue");
500 result = TPL_ERROR_INVALID_OPERATION;
504 /* set tizen_surface_shm's queue as client's private queue */
505 if (wl_egl_display->tss) {
506 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss,
507 wl_egl_display->ev_queue);
508 TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", wl_egl_display->tss);
511 if (wl_egl_display->presentation) {
512 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->presentation,
513 wl_egl_display->ev_queue);
514 TPL_LOG_T("WL_EGL", "wp_presentation(%p) init.",
515 wl_egl_display->presentation);
518 if (wl_egl_display->explicit_sync) {
519 wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->explicit_sync,
520 wl_egl_display->ev_queue);
521 TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.",
522 wl_egl_display->explicit_sync);
525 wl_egl_display->wl_initialized = TPL_TRUE;
527 TPL_INFO("[WAYLAND_INIT]",
528 "wl_egl_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
529 wl_egl_display, wl_egl_display->wl_display,
530 wl_egl_display->wl_tbm_client, wl_egl_display->ev_queue);
531 TPL_INFO("[WAYLAND_INIT]",
532 "tizen_surface_shm(%p) wp_presentation(%p) explicit_sync(%p)",
533 wl_egl_display->tss, wl_egl_display->presentation,
534 wl_egl_display->explicit_sync);
538 wl_proxy_wrapper_destroy(display_wrapper);
540 wl_registry_destroy(registry);
542 wl_event_queue_destroy(queue);
548 _thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display)
550 /* If wl_egl_display is in prepared state, cancel it */
551 if (wl_egl_display->prepared) {
552 wl_display_cancel_read(wl_egl_display->wl_display);
553 wl_egl_display->prepared = TPL_FALSE;
556 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
557 wl_egl_display->ev_queue) == -1) {
558 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
561 if (wl_egl_display->tss) {
562 TPL_INFO("[TIZEN_SURFACE_SHM_DESTROY]",
563 "wl_egl_display(%p) tizen_surface_shm(%p) fini.",
564 wl_egl_display, wl_egl_display->tss);
565 tizen_surface_shm_destroy(wl_egl_display->tss);
566 wl_egl_display->tss = NULL;
569 if (wl_egl_display->presentation) {
570 TPL_INFO("[WP_PRESENTATION_DESTROY]",
571 "wl_egl_display(%p) wp_presentation(%p) fini.",
572 wl_egl_display, wl_egl_display->presentation);
573 wp_presentation_destroy(wl_egl_display->presentation);
574 wl_egl_display->presentation = NULL;
577 if (wl_egl_display->explicit_sync) {
578 TPL_INFO("[EXPLICIT_SYNC_DESTROY]",
579 "wl_egl_display(%p) zwp_linux_explicit_synchronization_v1(%p) fini.",
580 wl_egl_display, wl_egl_display->explicit_sync);
581 zwp_linux_explicit_synchronization_v1_destroy(wl_egl_display->explicit_sync);
582 wl_egl_display->explicit_sync = NULL;
585 if (wl_egl_display->wl_tbm_client) {
586 struct wl_proxy *wl_tbm = NULL;
588 wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(
589 wl_egl_display->wl_tbm_client);
591 wl_proxy_set_queue(wl_tbm, NULL);
594 TPL_INFO("[WL_TBM_DEINIT]",
595 "wl_egl_display(%p) wl_tbm_client(%p)",
596 wl_egl_display, wl_egl_display->wl_tbm_client);
597 wayland_tbm_client_deinit(wl_egl_display->wl_tbm_client);
598 wl_egl_display->wl_tbm_client = NULL;
601 wl_event_queue_destroy(wl_egl_display->ev_queue);
603 wl_egl_display->wl_initialized = TPL_FALSE;
605 TPL_INFO("[DISPLAY_FINI]", "wl_egl_display(%p) wl_display(%p)",
606 wl_egl_display, wl_egl_display->wl_display);
610 _thread_init(void *data)
612 tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
614 if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) {
615 TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)",
616 wl_egl_display, wl_egl_display->wl_display);
619 if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
620 TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
623 return wl_egl_display;
627 __thread_func_disp_prepare(tpl_gsource *gsource)
629 tpl_wl_egl_display_t *wl_egl_display =
630 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
632 /* If this wl_egl_display is already prepared,
633 * do nothing in this function. */
634 if (wl_egl_display->prepared)
637 /* If there is a last_error, there is no need to poll,
638 * so skip directly to dispatch.
639 * prepare -> dispatch */
640 if (wl_egl_display->last_error)
643 while (wl_display_prepare_read_queue(wl_egl_display->wl_display,
644 wl_egl_display->ev_queue) != 0) {
645 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
646 wl_egl_display->ev_queue) == -1) {
647 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
651 wl_egl_display->prepared = TPL_TRUE;
653 wl_display_flush(wl_egl_display->wl_display);
659 __thread_func_disp_check(tpl_gsource *gsource)
661 tpl_wl_egl_display_t *wl_egl_display =
662 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
663 tpl_bool_t ret = TPL_FALSE;
665 if (!wl_egl_display->prepared)
668 /* If prepared, but last_error is set,
669 * cancel_read is executed and FALSE is returned.
670 * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
671 * and skipping disp_check from prepare to disp_dispatch.
672 * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
673 if (wl_egl_display->prepared && wl_egl_display->last_error) {
674 wl_display_cancel_read(wl_egl_display->wl_display);
678 if (tpl_gsource_check_io_condition(gsource)) {
679 if (wl_display_read_events(wl_egl_display->wl_display) == -1)
680 _wl_display_print_err(wl_egl_display, "read_event");
683 wl_display_cancel_read(wl_egl_display->wl_display);
687 wl_egl_display->prepared = TPL_FALSE;
693 __thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
695 tpl_wl_egl_display_t *wl_egl_display =
696 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
700 /* If there is last_error, SOURCE_REMOVE should be returned
701 * to remove the gsource from the main loop.
702 * This is because wl_egl_display is not valid since last_error was set.*/
703 if (wl_egl_display->last_error) {
707 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
708 if (tpl_gsource_check_io_condition(gsource)) {
709 if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
710 wl_egl_display->ev_queue) == -1) {
711 _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
715 wl_display_flush(wl_egl_display->wl_display);
716 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
722 __thread_func_disp_finalize(tpl_gsource *gsource)
724 tpl_wl_egl_display_t *wl_egl_display =
725 (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
727 if (wl_egl_display->wl_initialized)
728 _thread_wl_display_fini(wl_egl_display);
730 TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)",
731 wl_egl_display, gsource);
737 static tpl_gsource_functions disp_funcs = {
738 .prepare = __thread_func_disp_prepare,
739 .check = __thread_func_disp_check,
740 .dispatch = __thread_func_disp_dispatch,
741 .finalize = __thread_func_disp_finalize,
745 __tpl_wl_egl_display_init(tpl_display_t *display)
747 tpl_wl_egl_display_t *wl_egl_display = NULL;
751 /* Do not allow default display in wayland. */
752 if (!display->native_handle) {
753 TPL_ERR("Invalid native handle for display.");
754 return TPL_ERROR_INVALID_PARAMETER;
757 if (!_check_native_handle_is_wl_display(display->native_handle)) {
758 TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
759 return TPL_ERROR_INVALID_PARAMETER;
762 wl_egl_display = (tpl_wl_egl_display_t *) calloc(1,
763 sizeof(tpl_wl_egl_display_t));
764 if (!wl_egl_display) {
765 TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t.");
766 return TPL_ERROR_OUT_OF_MEMORY;
769 display->backend.data = wl_egl_display;
770 display->bufmgr_fd = -1;
772 wl_egl_display->tdm_initialized = TPL_FALSE;
773 wl_egl_display->wl_initialized = TPL_FALSE;
775 wl_egl_display->ev_queue = NULL;
776 wl_egl_display->wl_display = (struct wl_display *)display->native_handle;
777 wl_egl_display->last_error = 0;
778 wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled
779 wl_egl_display->prepared = TPL_FALSE;
781 /* Wayland Interfaces */
782 wl_egl_display->tss = NULL;
783 wl_egl_display->presentation = NULL;
784 wl_egl_display->explicit_sync = NULL;
785 wl_egl_display->wl_tbm_client = NULL;
787 wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled
789 char *env = tpl_getenv("TPL_WAIT_VBLANK");
790 if (env && !atoi(env)) {
791 wl_egl_display->use_wait_vblank = TPL_FALSE;
795 tpl_gmutex_init(&wl_egl_display->wl_event_mutex);
798 wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
799 (tpl_gthread_func)_thread_init,
800 (void *)wl_egl_display);
801 if (!wl_egl_display->thread) {
802 TPL_ERR("Failed to create wl_egl_thread");
806 wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread,
807 (void *)wl_egl_display,
808 wl_display_get_fd(wl_egl_display->wl_display),
809 &disp_funcs, SOURCE_TYPE_NORMAL);
810 if (!wl_egl_display->disp_source) {
811 TPL_ERR("Failed to add native_display(%p) to thread(%p)",
812 display->native_handle,
813 wl_egl_display->thread);
817 wl_egl_display->tdm_source = tpl_gsource_create(wl_egl_display->thread,
818 (void *)wl_egl_display,
819 wl_egl_display->tdm_display_fd,
820 &tdm_funcs, SOURCE_TYPE_NORMAL);
821 if (!wl_egl_display->tdm_source) {
822 TPL_ERR("Failed to create tdm_gsource\n");
826 TPL_INFO("[DISPLAY_INIT]",
827 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
829 wl_egl_display->thread,
830 wl_egl_display->wl_display);
832 TPL_INFO("[DISPLAY_INIT]",
833 "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)",
834 wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE",
835 wl_egl_display->tss ? "TRUE" : "FALSE",
836 wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE");
838 return TPL_ERROR_NONE;
841 if (wl_egl_display->thread) {
842 if (wl_egl_display->tdm_source)
843 tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
844 if (wl_egl_display->disp_source)
845 tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
847 tpl_gthread_destroy(wl_egl_display->thread);
850 wl_egl_display->thread = NULL;
851 free(wl_egl_display);
853 display->backend.data = NULL;
854 return TPL_ERROR_INVALID_OPERATION;
858 __tpl_wl_egl_display_fini(tpl_display_t *display)
860 tpl_wl_egl_display_t *wl_egl_display;
864 wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data;
865 if (wl_egl_display) {
866 TPL_INFO("[DISPLAY_FINI]",
867 "wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
869 wl_egl_display->thread,
870 wl_egl_display->wl_display);
872 if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) {
873 tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
874 wl_egl_display->tdm_source = NULL;
877 if (wl_egl_display->disp_source) {
878 tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
879 wl_egl_display->disp_source = NULL;
882 if (wl_egl_display->thread) {
883 tpl_gthread_destroy(wl_egl_display->thread);
884 wl_egl_display->thread = NULL;
887 tpl_gmutex_clear(&wl_egl_display->wl_event_mutex);
889 free(wl_egl_display);
892 display->backend.data = NULL;
896 __tpl_wl_egl_display_query_config(tpl_display_t *display,
897 tpl_surface_type_t surface_type,
898 int red_size, int green_size,
899 int blue_size, int alpha_size,
900 int color_depth, int *native_visual_id,
905 if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
906 green_size == 8 && blue_size == 8 &&
907 (color_depth == 32 || color_depth == 24)) {
909 if (alpha_size == 8) {
910 if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
911 if (is_slow) *is_slow = TPL_FALSE;
912 return TPL_ERROR_NONE;
914 if (alpha_size == 0) {
915 if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
916 if (is_slow) *is_slow = TPL_FALSE;
917 return TPL_ERROR_NONE;
921 return TPL_ERROR_INVALID_PARAMETER;
925 __tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
929 TPL_IGNORE(visual_id);
930 TPL_IGNORE(alpha_size);
931 return TPL_ERROR_NONE;
935 __tpl_wl_egl_display_get_window_info(tpl_display_t *display,
936 tpl_handle_t window, int *width,
937 int *height, tbm_format *format,
938 int depth, int a_size)
940 tpl_result_t ret = TPL_ERROR_NONE;
941 struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window;
946 if (!wl_egl_window) {
947 TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window);
948 return TPL_ERROR_INVALID_PARAMETER;
951 if (width) *width = wl_egl_window->width;
952 if (height) *height = wl_egl_window->height;
954 struct tizen_private *tizen_private =
955 (struct tizen_private *)wl_egl_window->driver_private;
956 if (tizen_private && tizen_private->data) {
957 tpl_wl_egl_surface_t *wl_egl_surface =
958 (tpl_wl_egl_surface_t *)tizen_private->data;
959 *format = wl_egl_surface->format;
962 *format = TBM_FORMAT_ARGB8888;
964 *format = TBM_FORMAT_XRGB8888;
972 __tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display,
973 tpl_handle_t pixmap, int *width,
974 int *height, tbm_format *format)
976 tbm_surface_h tbm_surface = NULL;
979 TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap);
980 return TPL_ERROR_INVALID_PARAMETER;
983 tbm_surface = wayland_tbm_server_get_surface(NULL,
984 (struct wl_resource *)pixmap);
986 TPL_ERR("Failed to get tbm_surface from wayland_tbm.");
987 return TPL_ERROR_INVALID_PARAMETER;
990 if (width) *width = tbm_surface_get_width(tbm_surface);
991 if (height) *height = tbm_surface_get_height(tbm_surface);
992 if (format) *format = tbm_surface_get_format(tbm_surface);
994 return TPL_ERROR_NONE;
998 __tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
1000 tbm_surface_h tbm_surface = NULL;
1004 tbm_surface = wayland_tbm_server_get_surface(NULL,
1005 (struct wl_resource *)pixmap);
1007 TPL_ERR("Failed to get tbm_surface_h from wayland_tbm.");
1015 __tpl_display_choose_backend_wl_egl_thread2(tpl_handle_t native_dpy)
1017 struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
1019 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE);
1021 /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
1022 is a memory address pointing the structure of wl_display_interface. */
1023 if (wl_egl_native_dpy == &wl_display_interface)
1026 if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
1027 strlen(wl_display_interface.name)) == 0) {
1034 /* -- BEGIN -- wl_egl_window callback functions */
1036 __cb_destroy_callback(void *private)
1038 struct tizen_private *tizen_private = (struct tizen_private *)private;
1039 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1041 if (!tizen_private) {
1042 TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface");
1046 wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1047 if (wl_egl_surface) {
1048 TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.",
1049 wl_egl_surface->wl_egl_window);
1050 TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface.");
1052 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1053 wl_egl_surface->wl_egl_window->destroy_window_callback = NULL;
1054 wl_egl_surface->wl_egl_window->resize_callback = NULL;
1055 wl_egl_surface->wl_egl_window->driver_private = NULL;
1056 wl_egl_surface->wl_egl_window = NULL;
1057 wl_egl_surface->wl_surface = NULL;
1059 tizen_private->set_window_serial_callback = NULL;
1060 tizen_private->rotate_callback = NULL;
1061 tizen_private->get_rotation_capability = NULL;
1062 tizen_private->set_frontbuffer_callback = NULL;
1063 tizen_private->create_commit_sync_fd = NULL;
1064 tizen_private->create_presentation_sync_fd = NULL;
1065 tizen_private->data = NULL;
1067 free(tizen_private);
1068 tizen_private = NULL;
1069 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1074 __cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
1076 TPL_ASSERT(private);
1077 TPL_ASSERT(wl_egl_window);
1079 struct tizen_private *tizen_private = (struct tizen_private *)private;
1080 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1081 int cur_w, cur_h, req_w, req_h, format;
1083 if (!wl_egl_surface) {
1084 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1089 format = wl_egl_surface->format;
1090 cur_w = wl_egl_surface->width;
1091 cur_h = wl_egl_surface->height;
1092 req_w = wl_egl_window->width;
1093 req_h = wl_egl_window->height;
1095 TPL_INFO("[WINDOW_RESIZE]",
1096 "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)",
1097 wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h);
1099 if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
1100 != TBM_SURFACE_QUEUE_ERROR_NONE) {
1101 TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue);
1105 /* -- END -- wl_egl_window callback functions */
1107 /* -- BEGIN -- wl_egl_window tizen private callback functions */
1109 /* There is no usecase for using prerotation callback below */
1111 __cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
1113 TPL_ASSERT(private);
1114 TPL_ASSERT(wl_egl_window);
1116 struct tizen_private *tizen_private = (struct tizen_private *)private;
1117 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1118 int rotation = tizen_private->rotation;
1120 if (!wl_egl_surface) {
1121 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1126 TPL_INFO("[WINDOW_ROTATE]",
1127 "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)",
1128 wl_egl_surface, wl_egl_window,
1129 wl_egl_surface->rotation, rotation);
1131 wl_egl_surface->rotation = rotation;
1134 /* There is no usecase for using prerotation callback below */
1136 __cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
1139 TPL_ASSERT(private);
1140 TPL_ASSERT(wl_egl_window);
1142 int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
1143 struct tizen_private *tizen_private = (struct tizen_private *)private;
1144 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1146 if (!wl_egl_surface) {
1147 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1149 return rotation_capability;
1152 if (wl_egl_surface->prerotation_capability == TPL_TRUE)
1153 rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
1155 rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
1158 return rotation_capability;
1162 __cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
1163 void *private, unsigned int serial)
1165 TPL_ASSERT(private);
1166 TPL_ASSERT(wl_egl_window);
1168 struct tizen_private *tizen_private = (struct tizen_private *)private;
1169 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1171 if (!wl_egl_surface) {
1172 TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
1177 wl_egl_surface->set_serial_is_used = TPL_TRUE;
1178 wl_egl_surface->serial = serial;
1182 __cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
1184 TPL_ASSERT(private);
1185 TPL_ASSERT(wl_egl_window);
1187 int commit_sync_fd = -1;
1189 struct tizen_private *tizen_private = (struct tizen_private *)private;
1190 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1192 if (!wl_egl_surface) {
1193 TPL_ERR("Invalid parameter. wl_egl_surface(%p) is NULL", wl_egl_surface);
1197 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
1199 if (wl_egl_surface->commit_sync.fd != -1) {
1200 commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
1201 TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
1202 wl_egl_surface->commit_sync.fd, commit_sync_fd);
1203 TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)",
1204 wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd);
1205 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1206 return commit_sync_fd;
1209 wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
1210 if (wl_egl_surface->commit_sync.fd == -1) {
1211 TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)",
1213 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1217 commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
1219 TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
1220 wl_egl_surface->commit_sync.fd, commit_sync_fd);
1221 TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)",
1222 wl_egl_surface, commit_sync_fd);
1224 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1226 return commit_sync_fd;
1230 __cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
1232 TPL_ASSERT(private);
1233 TPL_ASSERT(wl_egl_window);
1235 int presentation_sync_fd = -1;
1237 struct tizen_private *tizen_private = (struct tizen_private *)private;
1238 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
1240 if (!wl_egl_surface) {
1241 TPL_ERR("Invalid parameter. wl_egl_surface is NULL");
1245 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1246 if (wl_egl_surface->presentation_sync.fd != -1) {
1247 presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
1248 TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
1249 wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1250 TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
1251 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1252 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1253 return presentation_sync_fd;
1256 wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
1257 if (wl_egl_surface->presentation_sync.fd == -1) {
1258 TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)",
1260 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1264 presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
1265 TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
1266 wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1267 TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
1268 wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
1270 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1272 return presentation_sync_fd;
1274 /* -- END -- wl_egl_window tizen private callback functions */
1276 /* -- BEGIN -- tizen_surface_shm_flusher_listener */
1277 static void __cb_tss_flusher_flush_callback(void *data,
1278 struct tizen_surface_shm_flusher *tss_flusher)
1280 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1281 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1283 TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
1284 wl_egl_surface, wl_egl_surface->tbm_queue);
1286 tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue);
1287 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1288 TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
1293 static void __cb_tss_flusher_free_flush_callback(void *data,
1294 struct tizen_surface_shm_flusher *tss_flusher)
1296 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1297 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1299 TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
1300 wl_egl_surface, wl_egl_surface->tbm_queue);
1302 tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue);
1303 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
1304 TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
1309 static const struct tizen_surface_shm_flusher_listener
1310 tss_flusher_listener = {
1311 __cb_tss_flusher_flush_callback,
1312 __cb_tss_flusher_free_flush_callback
1314 /* -- END -- tizen_surface_shm_flusher_listener */
1317 /* -- BEGIN -- tbm_surface_queue callback funstions */
1319 __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
1322 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1323 tpl_wl_egl_display_t *wl_egl_display = NULL;
1324 tpl_surface_t *surface = NULL;
1325 tpl_bool_t is_activated = TPL_FALSE;
1328 wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1329 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1331 wl_egl_display = wl_egl_surface->wl_egl_display;
1332 TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
1334 surface = wl_egl_surface->tpl_surface;
1335 TPL_CHECK_ON_NULL_RETURN(surface);
1337 /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
1338 * the changed window size at the next frame. */
1339 width = tbm_surface_queue_get_width(tbm_queue);
1340 height = tbm_surface_queue_get_height(tbm_queue);
1341 if (surface->width != width || surface->height != height) {
1342 TPL_INFO("[QUEUE_RESIZE]",
1343 "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
1344 wl_egl_surface, tbm_queue,
1345 surface->width, surface->height, width, height);
1348 /* When queue_reset_callback is called, if is_activated is different from
1349 * its previous state change the reset flag to TPL_TRUE to get a new buffer
1350 * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
1351 is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client,
1352 wl_egl_surface->tbm_queue);
1353 if (wl_egl_surface->is_activated != is_activated) {
1355 TPL_INFO("[ACTIVATED]",
1356 "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1357 wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
1359 TPL_LOG_T("[DEACTIVATED]",
1360 " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1361 wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
1365 wl_egl_surface->reset = TPL_TRUE;
1367 if (surface->reset_cb)
1368 surface->reset_cb(surface->reset_data);
1372 __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
1375 TPL_IGNORE(tbm_queue);
1377 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
1378 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1380 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1382 tpl_gsource_send_message(wl_egl_surface->surf_source, 2);
1384 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1386 /* -- END -- tbm_surface_queue callback funstions */
1389 _thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
1391 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1393 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1395 TPL_INFO("[SURFACE_FINI]",
1396 "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
1397 wl_egl_surface, wl_egl_surface->wl_egl_window,
1398 wl_egl_surface->wl_surface);
1400 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1402 if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) {
1403 while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) {
1404 struct pst_feedback *pst_feedback =
1405 (struct pst_feedback *)__tpl_list_pop_front(
1406 wl_egl_surface->presentation_feedbacks, NULL);
1408 _write_to_eventfd(pst_feedback->pst_sync_fd);
1409 close(pst_feedback->pst_sync_fd);
1410 pst_feedback->pst_sync_fd = -1;
1412 wp_presentation_feedback_destroy(pst_feedback->presentation_feedback);
1413 pst_feedback->presentation_feedback = NULL;
1419 __tpl_list_free(wl_egl_surface->presentation_feedbacks, NULL);
1420 wl_egl_surface->presentation_feedbacks = NULL;
1423 if (wl_egl_surface->presentation_sync.fd != -1) {
1424 _write_to_eventfd(wl_egl_surface->presentation_sync.fd);
1425 close(wl_egl_surface->presentation_sync.fd);
1426 wl_egl_surface->presentation_sync.fd = -1;
1429 if (wl_egl_surface->vblank_waiting_buffers) {
1430 __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL);
1431 wl_egl_surface->vblank_waiting_buffers = NULL;
1434 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1436 if (wl_egl_surface->surface_sync) {
1437 TPL_INFO("[SURFACE_SYNC_DESTROY]",
1438 "wl_egl_surface(%p) surface_sync(%p)",
1439 wl_egl_surface, wl_egl_surface->surface_sync);
1440 zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync);
1441 wl_egl_surface->surface_sync = NULL;
1444 if (wl_egl_surface->tss_flusher) {
1445 TPL_INFO("[FLUSHER_DESTROY]",
1446 "wl_egl_surface(%p) tss_flusher(%p)",
1447 wl_egl_surface, wl_egl_surface->tss_flusher);
1448 tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher);
1449 wl_egl_surface->tss_flusher = NULL;
1452 if (wl_egl_surface->vblank) {
1453 TPL_INFO("[VBLANK_DESTROY]",
1454 "wl_egl_surface(%p) vblank(%p)",
1455 wl_egl_surface, wl_egl_surface->vblank);
1456 tdm_client_vblank_destroy(wl_egl_surface->vblank);
1457 wl_egl_surface->vblank = NULL;
1460 if (wl_egl_surface->tbm_queue) {
1461 TPL_INFO("[TBM_QUEUE_DESTROY]",
1462 "wl_egl_surface(%p) tbm_queue(%p)",
1463 wl_egl_surface, wl_egl_surface->tbm_queue);
1464 tbm_surface_queue_destroy(wl_egl_surface->tbm_queue);
1465 wl_egl_surface->tbm_queue = NULL;
1468 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1472 __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
1474 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1476 wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
1478 /* Initialize surface */
1480 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1481 TPL_DEBUG("wl_egl_surface(%p) initialize message received!",
1483 _thread_wl_egl_surface_init(wl_egl_surface);
1484 tpl_gcond_signal(&wl_egl_surface->surf_cond);
1485 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1486 } else if (message == 2) {
1487 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1488 TPL_DEBUG("wl_egl_surface(%p) acquirable message received!",
1490 _thread_surface_queue_acquire(wl_egl_surface);
1491 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1498 __thread_func_surf_finalize(tpl_gsource *gsource)
1500 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1502 wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
1503 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1505 _thread_wl_egl_surface_fini(wl_egl_surface);
1507 TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%p)",
1508 gsource, wl_egl_surface);
1511 static tpl_gsource_functions surf_funcs = {
1514 .dispatch = __thread_func_surf_dispatch,
1515 .finalize = __thread_func_surf_finalize,
1519 __tpl_wl_egl_surface_init(tpl_surface_t *surface)
1521 tpl_wl_egl_display_t *wl_egl_display = NULL;
1522 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1523 tpl_gsource *surf_source = NULL;
1525 struct wl_egl_window *wl_egl_window =
1526 (struct wl_egl_window *)surface->native_handle;
1528 TPL_ASSERT(surface);
1529 TPL_ASSERT(surface->display);
1530 TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
1531 TPL_ASSERT(surface->native_handle);
1534 (tpl_wl_egl_display_t *)surface->display->backend.data;
1535 if (!wl_egl_display) {
1536 TPL_ERR("Invalid parameter. wl_egl_display(%p)",
1538 return TPL_ERROR_INVALID_PARAMETER;
1541 wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1,
1542 sizeof(tpl_wl_egl_surface_t));
1543 if (!wl_egl_surface) {
1544 TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
1545 return TPL_ERROR_OUT_OF_MEMORY;
1548 surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface,
1549 -1, &surf_funcs, SOURCE_TYPE_NORMAL);
1551 TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)",
1553 goto surf_source_create_fail;
1556 surface->backend.data = (void *)wl_egl_surface;
1557 surface->width = wl_egl_window->width;
1558 surface->height = wl_egl_window->height;
1559 surface->rotation = 0;
1561 wl_egl_surface->tpl_surface = surface;
1562 wl_egl_surface->width = wl_egl_window->width;
1563 wl_egl_surface->height = wl_egl_window->height;
1564 wl_egl_surface->format = surface->format;
1566 wl_egl_surface->surf_source = surf_source;
1567 wl_egl_surface->wl_egl_window = wl_egl_window;
1568 wl_egl_surface->wl_surface = wl_egl_window->surface;
1570 wl_egl_surface->wl_egl_display = wl_egl_display;
1572 wl_egl_surface->reset = TPL_FALSE;
1573 wl_egl_surface->is_activated = TPL_FALSE;
1574 wl_egl_surface->need_to_enqueue = TPL_TRUE;
1575 wl_egl_surface->prerotation_capability = TPL_FALSE;
1576 wl_egl_surface->vblank_done = TPL_TRUE;
1577 wl_egl_surface->use_render_done_fence = TPL_FALSE;
1578 wl_egl_surface->set_serial_is_used = TPL_FALSE;
1580 wl_egl_surface->latest_transform = 0;
1581 wl_egl_surface->render_done_cnt = 0;
1582 wl_egl_surface->serial = 0;
1584 wl_egl_surface->vblank = NULL;
1585 wl_egl_surface->tss_flusher = NULL;
1586 wl_egl_surface->surface_sync = NULL;
1588 wl_egl_surface->post_interval = surface->post_interval;
1590 wl_egl_surface->commit_sync.fd = -1;
1591 wl_egl_surface->presentation_sync.fd = -1;
1595 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
1596 wl_egl_surface->buffers[i] = NULL;
1597 wl_egl_surface->buffer_cnt = 0;
1601 struct tizen_private *tizen_private = NULL;
1603 if (wl_egl_window->driver_private)
1604 tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
1606 tizen_private = tizen_private_create();
1607 wl_egl_window->driver_private = (void *)tizen_private;
1610 if (tizen_private) {
1611 tizen_private->data = (void *)wl_egl_surface;
1612 tizen_private->rotate_callback = (void *)__cb_rotate_callback;
1613 tizen_private->get_rotation_capability = (void *)
1614 __cb_get_rotation_capability;
1615 tizen_private->set_window_serial_callback = (void *)
1616 __cb_set_window_serial_callback;
1617 tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
1618 tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
1620 wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
1621 wl_egl_window->resize_callback = (void *)__cb_resize_callback;
1625 tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
1626 tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
1628 tpl_gmutex_init(&wl_egl_surface->buffers_mutex);
1630 tpl_gmutex_init(&wl_egl_surface->surf_mutex);
1631 tpl_gcond_init(&wl_egl_surface->surf_cond);
1633 /* Initialize in thread */
1634 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1635 tpl_gsource_send_message(wl_egl_surface->surf_source, 1);
1636 tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
1637 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1639 TPL_ASSERT(wl_egl_surface->tbm_queue);
1641 TPL_INFO("[SURFACE_INIT]",
1642 "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)",
1643 surface, wl_egl_surface, wl_egl_surface->surf_source);
1645 return TPL_ERROR_NONE;
1647 surf_source_create_fail:
1648 free(wl_egl_surface);
1649 surface->backend.data = NULL;
1650 return TPL_ERROR_INVALID_OPERATION;
1653 static tbm_surface_queue_h
1654 _thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface,
1655 struct wayland_tbm_client *wl_tbm_client,
1658 tbm_surface_queue_h tbm_queue = NULL;
1659 tbm_bufmgr bufmgr = NULL;
1660 unsigned int capability;
1662 struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
1663 int width = wl_egl_surface->width;
1664 int height = wl_egl_surface->height;
1665 int format = wl_egl_surface->format;
1667 if (!wl_tbm_client || !wl_surface) {
1668 TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)",
1669 wl_tbm_client, wl_surface);
1673 bufmgr = tbm_bufmgr_init(-1);
1674 capability = tbm_bufmgr_get_capability(bufmgr);
1675 tbm_bufmgr_deinit(bufmgr);
1677 if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
1678 tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
1686 tbm_queue = wayland_tbm_client_create_surface_queue(
1696 TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)",
1701 if (tbm_surface_queue_set_modes(
1702 tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
1703 TBM_SURFACE_QUEUE_ERROR_NONE) {
1704 TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
1706 tbm_surface_queue_destroy(tbm_queue);
1710 if (tbm_surface_queue_add_reset_cb(
1712 __cb_tbm_queue_reset_callback,
1713 (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1714 TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
1716 tbm_surface_queue_destroy(tbm_queue);
1720 if (tbm_surface_queue_add_acquirable_cb(
1722 __cb_tbm_queue_acquirable_callback,
1723 (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
1724 TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
1726 tbm_surface_queue_destroy(tbm_queue);
1733 static tdm_client_vblank*
1734 _thread_create_tdm_client_vblank(tdm_client *tdm_client)
1736 tdm_client_vblank *vblank = NULL;
1737 tdm_client_output *tdm_output = NULL;
1738 tdm_error tdm_err = TDM_ERROR_NONE;
1741 TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
1745 tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
1746 if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
1747 TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
1751 vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
1752 if (!vblank || tdm_err != TDM_ERROR_NONE) {
1753 TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
1757 tdm_client_vblank_set_enable_fake(vblank, 1);
1758 tdm_client_vblank_set_sync(vblank, 0);
1764 _thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
1766 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1768 wl_egl_surface->tbm_queue = _thread_create_tbm_queue(
1770 wl_egl_display->wl_tbm_client,
1772 if (!wl_egl_surface->tbm_queue) {
1773 TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)",
1774 wl_egl_surface, wl_egl_display->wl_tbm_client);
1778 TPL_INFO("[QUEUE_CREATION]",
1779 "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)",
1780 wl_egl_surface, wl_egl_surface->wl_surface,
1781 wl_egl_display->wl_tbm_client);
1782 TPL_INFO("[QUEUE_CREATION]",
1783 "tbm_queue(%p) size(%d x %d) X %d format(%d)",
1784 wl_egl_surface->tbm_queue,
1785 wl_egl_surface->width,
1786 wl_egl_surface->height,
1788 wl_egl_surface->format);
1790 wl_egl_surface->vblank = _thread_create_tdm_client_vblank(
1791 wl_egl_display->tdm_client);
1792 if (wl_egl_surface->vblank) {
1793 TPL_INFO("[VBLANK_INIT]",
1794 "wl_egl_surface(%p) tdm_client(%p) vblank(%p)",
1795 wl_egl_surface, wl_egl_display->tdm_client,
1796 wl_egl_surface->vblank);
1799 if (wl_egl_display->tss) {
1800 wl_egl_surface->tss_flusher =
1801 tizen_surface_shm_get_flusher(wl_egl_display->tss,
1802 wl_egl_surface->wl_surface);
1805 if (wl_egl_surface->tss_flusher) {
1806 tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher,
1807 &tss_flusher_listener,
1809 TPL_INFO("[FLUSHER_INIT]",
1810 "wl_egl_surface(%p) tss_flusher(%p)",
1811 wl_egl_surface, wl_egl_surface->tss_flusher);
1814 if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) {
1815 wl_egl_surface->surface_sync =
1816 zwp_linux_explicit_synchronization_v1_get_synchronization(
1817 wl_egl_display->explicit_sync, wl_egl_surface->wl_surface);
1818 if (wl_egl_surface->surface_sync) {
1819 TPL_INFO("[EXPLICIT_SYNC_INIT]",
1820 "wl_egl_surface(%p) surface_sync(%p)",
1821 wl_egl_surface, wl_egl_surface->surface_sync);
1823 TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)",
1825 wl_egl_display->use_explicit_sync = TPL_FALSE;
1829 wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc();
1830 wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
1834 _tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
1836 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
1837 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
1838 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
1839 tpl_bool_t need_to_release = TPL_FALSE;
1840 tpl_bool_t need_to_cancel = TPL_FALSE;
1843 while (wl_egl_surface->buffer_cnt) {
1844 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
1845 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
1846 wl_egl_buffer = wl_egl_surface->buffers[idx];
1848 if (wl_egl_buffer) {
1849 wl_egl_surface->buffers[idx] = NULL;
1850 wl_egl_surface->buffer_cnt--;
1852 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
1853 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
1858 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
1860 tpl_gmutex_lock(&wl_egl_buffer->mutex);
1862 TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
1864 wl_egl_buffer->tbm_surface,
1865 status_to_string[wl_egl_buffer->status]);
1867 /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
1868 /* It has been acquired but has not yet been released, so this
1869 * buffer must be released. */
1870 need_to_release = (wl_egl_buffer->status == ACQUIRED ||
1871 wl_egl_buffer->status == WAITING_SIGNALED ||
1872 wl_egl_buffer->status == WAITING_VBLANK ||
1873 wl_egl_buffer->status == COMMITTED);
1874 /* After dequeue, it has not been enqueued yet
1875 * so cancel_dequeue must be performed. */
1876 need_to_cancel = wl_egl_buffer->status == DEQUEUED;
1878 if (wl_egl_buffer->status >= ENQUEUED &&
1879 wl_egl_buffer->status < WAITING_VBLANK) {
1880 tpl_result_t wait_result = TPL_ERROR_NONE;
1881 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
1882 wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
1883 &wl_egl_buffer->mutex,
1885 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
1886 if (wait_result == TPL_ERROR_TIME_OUT)
1887 TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
1891 if (need_to_release) {
1892 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
1893 wl_egl_buffer->tbm_surface);
1894 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1895 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
1896 wl_egl_buffer->tbm_surface, tsq_err);
1899 if (need_to_cancel) {
1900 tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
1901 wl_egl_buffer->tbm_surface);
1902 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
1903 TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
1904 wl_egl_buffer->tbm_surface, tsq_err);
1907 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
1909 if (need_to_release || need_to_cancel)
1910 tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
1912 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
1919 __tpl_wl_egl_surface_fini(tpl_surface_t *surface)
1921 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
1922 tpl_wl_egl_display_t *wl_egl_display = NULL;
1924 TPL_ASSERT(surface);
1925 TPL_ASSERT(surface->display);
1927 TPL_CHECK_ON_FALSE_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW);
1929 wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data;
1930 TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
1932 wl_egl_display = wl_egl_surface->wl_egl_display;
1933 TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
1935 TPL_INFO("[SURFACE_FINI][BEGIN]",
1936 "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
1938 wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
1940 _tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
1942 if (wl_egl_surface->surf_source)
1943 tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
1944 wl_egl_surface->surf_source = NULL;
1946 _print_buffer_lists(wl_egl_surface);
1948 if (wl_egl_surface->wl_egl_window) {
1949 struct tizen_private *tizen_private = NULL;
1950 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
1951 TPL_INFO("[WL_EGL_WINDOW_FINI]",
1952 "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
1953 wl_egl_surface, wl_egl_window,
1954 wl_egl_surface->wl_surface);
1955 tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
1956 if (tizen_private) {
1957 tizen_private->set_window_serial_callback = NULL;
1958 tizen_private->rotate_callback = NULL;
1959 tizen_private->get_rotation_capability = NULL;
1960 tizen_private->create_presentation_sync_fd = NULL;
1961 tizen_private->create_commit_sync_fd = NULL;
1962 tizen_private->set_frontbuffer_callback = NULL;
1963 tizen_private->merge_sync_fds = NULL;
1964 tizen_private->data = NULL;
1965 free(tizen_private);
1967 wl_egl_window->driver_private = NULL;
1970 wl_egl_window->destroy_window_callback = NULL;
1971 wl_egl_window->resize_callback = NULL;
1973 wl_egl_surface->wl_egl_window = NULL;
1976 wl_egl_surface->wl_surface = NULL;
1977 wl_egl_surface->wl_egl_display = NULL;
1978 wl_egl_surface->tpl_surface = NULL;
1980 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
1981 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
1982 tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex);
1984 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
1985 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
1986 tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex);
1988 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
1989 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
1990 tpl_gmutex_clear(&wl_egl_surface->surf_mutex);
1991 tpl_gcond_clear(&wl_egl_surface->surf_cond);
1993 TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface);
1995 free(wl_egl_surface);
1996 surface->backend.data = NULL;
2000 __tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
2003 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
2005 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
2007 wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
2009 TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
2011 TPL_INFO("[SET_PREROTATION_CAPABILITY]",
2012 "wl_egl_surface(%p) prerotation capability set to [%s]",
2013 wl_egl_surface, (set ? "TRUE" : "FALSE"));
2015 wl_egl_surface->prerotation_capability = set;
2016 return TPL_ERROR_NONE;
2020 __tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
2023 tpl_wl_egl_surface_t *wl_egl_surface = NULL;
2025 TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
2027 wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
2029 TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
2031 TPL_INFO("[SET_POST_INTERVAL]",
2032 "wl_egl_surface(%p) post_interval(%d -> %d)",
2033 wl_egl_surface, wl_egl_surface->post_interval, post_interval);
2035 wl_egl_surface->post_interval = post_interval;
2037 return TPL_ERROR_NONE;
2041 __tpl_wl_egl_surface_validate(tpl_surface_t *surface)
2043 tpl_bool_t retval = TPL_TRUE;
2045 TPL_ASSERT(surface);
2046 TPL_ASSERT(surface->backend.data);
2048 tpl_wl_egl_surface_t *wl_egl_surface =
2049 (tpl_wl_egl_surface_t *)surface->backend.data;
2051 retval = !(wl_egl_surface->reset);
2057 __tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
2059 tpl_wl_egl_surface_t *wl_egl_surface =
2060 (tpl_wl_egl_surface_t *)surface->backend.data;
2063 *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
2065 *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
2068 #define CAN_DEQUEUE_TIMEOUT_MS 10000
2071 _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
2073 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2075 _print_buffer_lists(wl_egl_surface);
2077 if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue))
2078 != TBM_SURFACE_QUEUE_ERROR_NONE) {
2079 TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
2080 wl_egl_surface->tbm_queue, tsq_err);
2081 return TPL_ERROR_INVALID_OPERATION;
2086 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2087 for (i = 0; i < BUFFER_ARRAY_SIZE; i++) {
2088 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
2089 wl_egl_buffer = wl_egl_surface->buffers[i];
2090 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
2091 if (wl_egl_buffer && wl_egl_buffer->status == COMMITTED) {
2092 wl_egl_buffer->status = RELEASED;
2093 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2094 wl_egl_buffer->tbm_surface);
2095 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2096 TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
2097 wl_egl_buffer->tbm_surface, tsq_err);
2098 tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
2103 TPL_INFO("[FORCE_FLUSH]",
2104 "wl_egl_surface(%p) tbm_queue(%p)",
2105 wl_egl_surface, wl_egl_surface->tbm_queue);
2107 return TPL_ERROR_NONE;
2111 _wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer,
2112 tpl_wl_egl_surface_t *wl_egl_surface)
2114 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2115 struct tizen_private *tizen_private =
2116 (struct tizen_private *)wl_egl_window->driver_private;
2118 TPL_ASSERT(tizen_private);
2120 wl_egl_buffer->draw_done = TPL_FALSE;
2121 wl_egl_buffer->need_to_commit = TPL_TRUE;
2123 wl_egl_buffer->acquire_fence_fd = -1;
2124 wl_egl_buffer->release_fence_fd = -1;
2125 wl_egl_buffer->commit_sync_fd = -1;
2126 wl_egl_buffer->presentation_sync_fd = -1;
2128 wl_egl_buffer->buffer_release = NULL;
2130 wl_egl_buffer->transform = tizen_private->transform;
2132 if (wl_egl_buffer->w_transform != tizen_private->window_transform) {
2133 wl_egl_buffer->w_transform = tizen_private->window_transform;
2134 wl_egl_buffer->w_rotated = TPL_TRUE;
2137 if (wl_egl_surface->set_serial_is_used) {
2138 wl_egl_buffer->serial = wl_egl_surface->serial;
2140 wl_egl_buffer->serial = ++tizen_private->serial;
2143 if (wl_egl_buffer->rects) {
2144 free(wl_egl_buffer->rects);
2145 wl_egl_buffer->rects = NULL;
2146 wl_egl_buffer->num_rects = 0;
2150 static tpl_wl_egl_buffer_t *
2151 _get_wl_egl_buffer(tbm_surface_h tbm_surface)
2153 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2154 tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2155 (void **)&wl_egl_buffer);
2156 return wl_egl_buffer;
2159 static tpl_wl_egl_buffer_t *
2160 _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
2161 tbm_surface_h tbm_surface)
2163 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2164 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2166 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2168 if (!wl_egl_buffer) {
2169 wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t));
2170 TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL);
2172 tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2173 (tbm_data_free)__cb_wl_egl_buffer_free);
2174 tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
2177 wl_egl_buffer->wl_buffer = NULL;
2178 wl_egl_buffer->tbm_surface = tbm_surface;
2179 wl_egl_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
2180 wl_egl_buffer->wl_egl_surface = wl_egl_surface;
2182 wl_egl_buffer->status = RELEASED;
2184 wl_egl_buffer->dx = wl_egl_window->dx;
2185 wl_egl_buffer->dy = wl_egl_window->dy;
2186 wl_egl_buffer->width = tbm_surface_get_width(tbm_surface);
2187 wl_egl_buffer->height = tbm_surface_get_height(tbm_surface);
2189 tpl_gmutex_init(&wl_egl_buffer->mutex);
2190 tpl_gcond_init(&wl_egl_buffer->cond);
2192 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
2195 for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
2196 if (wl_egl_surface->buffers[i] == NULL) break;
2198 wl_egl_surface->buffer_cnt++;
2199 wl_egl_surface->buffers[i] = wl_egl_buffer;
2200 wl_egl_buffer->idx = i;
2202 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
2204 TPL_INFO("[WL_EGL_BUFFER_CREATE]",
2205 "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2206 wl_egl_surface, wl_egl_buffer, tbm_surface,
2207 wl_egl_buffer->bo_name);
2210 _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface);
2212 return wl_egl_buffer;
2215 static tbm_surface_h
2216 __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
2217 int32_t *release_fence)
2219 TPL_ASSERT(surface);
2220 TPL_ASSERT(surface->backend.data);
2221 TPL_ASSERT(surface->display);
2222 TPL_ASSERT(surface->display->backend.data);
2223 TPL_OBJECT_CHECK_RETURN(surface, NULL);
2225 tpl_wl_egl_surface_t *wl_egl_surface =
2226 (tpl_wl_egl_surface_t *)surface->backend.data;
2227 tpl_wl_egl_display_t *wl_egl_display =
2228 (tpl_wl_egl_display_t *)surface->display->backend.data;
2229 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2231 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2232 tpl_bool_t is_activated = 0;
2234 tbm_surface_h tbm_surface = NULL;
2236 TPL_OBJECT_UNLOCK(surface);
2237 tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
2238 wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
2239 TPL_OBJECT_LOCK(surface);
2241 /* After the can dequeue state, lock the wl_event_mutex to prevent other
2242 * events from being processed in wayland_egl_thread
2243 * during below dequeue procedure. */
2244 tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
2246 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
2247 TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
2248 wl_egl_surface->tbm_queue, surface);
2249 if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
2250 TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
2251 wl_egl_surface->tbm_queue, surface);
2252 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2255 tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2259 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2260 TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
2261 wl_egl_surface->tbm_queue, surface);
2262 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2266 /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
2267 * below function [wayland_tbm_client_queue_check_activate()].
2268 * This function has to be called before tbm_surface_queue_dequeue()
2269 * in order to know what state the buffer will be dequeued next.
2271 * ACTIVATED state means non-composite mode. Client can get buffers which
2272 can be displayed directly(without compositing).
2273 * DEACTIVATED state means composite mode. Client's buffer will be displayed
2274 by compositor(E20) with compositing.
2276 is_activated = wayland_tbm_client_queue_check_activate(
2277 wl_egl_display->wl_tbm_client,
2278 wl_egl_surface->tbm_queue);
2280 wl_egl_surface->is_activated = is_activated;
2282 surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
2283 surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
2284 wl_egl_surface->width = surface->width;
2285 wl_egl_surface->height = surface->height;
2287 if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
2288 /* If surface->frontbuffer is already set in frontbuffer mode,
2289 * it will return that frontbuffer if it is still activated,
2290 * otherwise dequeue the new buffer after initializing
2291 * surface->frontbuffer to NULL. */
2292 if (is_activated && !wl_egl_surface->reset) {
2293 bo_name = _get_tbm_surface_bo_name(surface->frontbuffer);
2296 "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
2297 surface->frontbuffer, bo_name);
2298 TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
2299 "[DEQ]~[ENQ] BO_NAME:%d",
2301 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2302 return surface->frontbuffer;
2304 surface->frontbuffer = NULL;
2305 wl_egl_surface->need_to_enqueue = TPL_TRUE;
2308 surface->frontbuffer = NULL;
2311 tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue,
2314 TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d",
2315 wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err);
2316 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2320 tbm_surface_internal_ref(tbm_surface);
2322 wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
2323 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
2325 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2326 wl_egl_buffer->status = DEQUEUED;
2328 /* If wl_egl_buffer->release_fence_fd is -1,
2329 * the tbm_surface can be used immediately.
2330 * If not, user(EGL) have to wait until signaled. */
2331 if (release_fence) {
2332 if (wl_egl_surface->surface_sync) {
2333 *release_fence = wl_egl_buffer->release_fence_fd;
2334 TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)",
2335 wl_egl_surface, wl_egl_buffer, *release_fence);
2337 *release_fence = -1;
2341 if (surface->is_frontbuffer_mode && is_activated)
2342 surface->frontbuffer = tbm_surface;
2344 wl_egl_surface->reset = TPL_FALSE;
2346 TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name);
2347 TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d",
2348 wl_egl_buffer->bo_name);
2349 TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2350 wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name,
2351 release_fence ? *release_fence : -1);
2353 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2354 tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
2360 __tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
2361 tbm_surface_h tbm_surface)
2363 TPL_ASSERT(surface);
2364 TPL_ASSERT(surface->backend.data);
2366 tpl_wl_egl_surface_t *wl_egl_surface =
2367 (tpl_wl_egl_surface_t *)surface->backend.data;
2368 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2369 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2371 if (!tbm_surface_internal_is_valid(tbm_surface)) {
2372 TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
2373 return TPL_ERROR_INVALID_PARAMETER;
2376 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2377 if (wl_egl_buffer) {
2378 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2379 wl_egl_buffer->status = RELEASED;
2380 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2383 tbm_surface_internal_unref(tbm_surface);
2385 tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
2387 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2388 TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
2389 tbm_surface, surface);
2390 return TPL_ERROR_INVALID_OPERATION;
2393 TPL_INFO("[CANCEL_BUFFER]", "wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
2394 wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
2396 return TPL_ERROR_NONE;
2400 __tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
2401 tbm_surface_h tbm_surface,
2402 int num_rects, const int *rects, int32_t acquire_fence)
2404 TPL_ASSERT(surface);
2405 TPL_ASSERT(surface->display);
2406 TPL_ASSERT(surface->backend.data);
2407 TPL_ASSERT(tbm_surface);
2408 TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
2410 tpl_wl_egl_surface_t *wl_egl_surface =
2411 (tpl_wl_egl_surface_t *) surface->backend.data;
2412 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2413 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2416 if (!tbm_surface_internal_is_valid(tbm_surface)) {
2417 TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
2419 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2420 return TPL_ERROR_INVALID_PARAMETER;
2423 bo_name = _get_tbm_surface_bo_name(tbm_surface);
2425 TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
2427 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2429 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2431 /* If there are received region information, save it to wl_egl_buffer */
2432 if (num_rects && rects) {
2433 if (wl_egl_buffer->rects != NULL) {
2434 free(wl_egl_buffer->rects);
2435 wl_egl_buffer->rects = NULL;
2436 wl_egl_buffer->num_rects = 0;
2439 wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
2440 wl_egl_buffer->num_rects = num_rects;
2442 if (!wl_egl_buffer->rects) {
2443 TPL_ERR("Failed to allocate memory fo damage rects info.");
2444 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2445 return TPL_ERROR_OUT_OF_MEMORY;
2448 memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects);
2451 if (!wl_egl_surface->need_to_enqueue ||
2452 !wl_egl_buffer->need_to_commit) {
2453 TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
2454 ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
2455 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2456 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2457 return TPL_ERROR_NONE;
2460 /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
2461 * commit if surface->frontbuffer that is already set and the tbm_surface
2462 * client want to enqueue are the same.
2464 if (surface->is_frontbuffer_mode) {
2465 /* The first buffer to be activated in frontbuffer mode must be
2466 * committed. Subsequence frames do not need to be committed because
2467 * the buffer is already displayed.
2469 if (surface->frontbuffer == tbm_surface)
2470 wl_egl_surface->need_to_enqueue = TPL_FALSE;
2472 if (acquire_fence != -1) {
2473 close(acquire_fence);
2478 if (wl_egl_buffer->acquire_fence_fd != -1)
2479 close(wl_egl_buffer->acquire_fence_fd);
2481 wl_egl_buffer->acquire_fence_fd = acquire_fence;
2483 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2484 if (wl_egl_surface->presentation_sync.fd != -1) {
2485 wl_egl_buffer->presentation_sync_fd = wl_egl_surface->presentation_sync.fd;
2486 wl_egl_surface->presentation_sync.fd = -1;
2488 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2490 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
2491 if (wl_egl_surface->commit_sync.fd != -1) {
2492 wl_egl_buffer->commit_sync_fd = wl_egl_surface->commit_sync.fd;
2493 wl_egl_surface->commit_sync.fd = -1;
2494 TRACE_ASYNC_BEGIN(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
2495 _get_tbm_surface_bo_name(tbm_surface));
2497 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
2499 wl_egl_buffer->status = ENQUEUED;
2501 "[ENQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2502 wl_egl_buffer, tbm_surface, bo_name, acquire_fence);
2504 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2506 tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
2508 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2509 tbm_surface_internal_unref(tbm_surface);
2510 TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d",
2511 tbm_surface, wl_egl_surface, tsq_err);
2512 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2513 return TPL_ERROR_INVALID_OPERATION;
2516 tbm_surface_internal_unref(tbm_surface);
2518 TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
2520 return TPL_ERROR_NONE;
2524 __thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message)
2526 tpl_wl_egl_buffer_t *wl_egl_buffer =
2527 (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
2528 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2529 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2530 tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
2532 wl_egl_surface->render_done_cnt++;
2534 TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
2535 wl_egl_buffer->acquire_fence_fd);
2537 TPL_DEBUG("[RENDER DONE] wl_egl_buffer(%p) tbm_surface(%p)",
2538 wl_egl_buffer, tbm_surface);
2540 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2541 tpl_gcond_signal(&wl_egl_buffer->cond);
2542 wl_egl_buffer->status = WAITING_VBLANK;
2543 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2545 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2547 if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done)
2548 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2550 __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers,
2553 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2559 __thread_func_waiting_source_finalize(tpl_gsource *gsource)
2561 tpl_wl_egl_buffer_t *wl_egl_buffer =
2562 (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
2564 TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
2565 wl_egl_buffer, wl_egl_buffer->waiting_source,
2566 wl_egl_buffer->acquire_fence_fd);
2568 close(wl_egl_buffer->acquire_fence_fd);
2569 wl_egl_buffer->acquire_fence_fd = -1;
2570 wl_egl_buffer->waiting_source = NULL;
2573 static tpl_gsource_functions buffer_funcs = {
2576 .dispatch = __thread_func_waiting_source_dispatch,
2577 .finalize = __thread_func_waiting_source_finalize,
2581 _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
2583 tbm_surface_h tbm_surface = NULL;
2584 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
2585 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2586 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2587 tpl_bool_t ready_to_commit = TPL_FALSE;
2589 while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) {
2590 tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue,
2592 if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
2593 TPL_ERR("Failed to acquire from tbm_queue(%p)",
2594 wl_egl_surface->tbm_queue);
2595 return TPL_ERROR_INVALID_OPERATION;
2598 tbm_surface_internal_ref(tbm_surface);
2600 wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
2601 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
2602 "wl_egl_buffer sould be not NULL");
2604 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2606 wl_egl_buffer->status = ACQUIRED;
2608 TPL_LOG_T("WL_EGL", "[ACQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
2609 wl_egl_buffer, tbm_surface,
2610 _get_tbm_surface_bo_name(tbm_surface));
2612 if (wl_egl_buffer->wl_buffer == NULL) {
2613 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2614 wl_egl_buffer->wl_buffer =
2615 (struct wl_proxy *)wayland_tbm_client_create_buffer(
2616 wl_egl_display->wl_tbm_client, tbm_surface);
2618 if (!wl_egl_buffer->wl_buffer) {
2619 TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
2620 wl_egl_display->wl_tbm_client, tbm_surface);
2624 if (wl_egl_buffer->acquire_fence_fd != -1) {
2625 if (wl_egl_surface->surface_sync)
2626 ready_to_commit = TPL_TRUE;
2628 if (wl_egl_buffer->waiting_source) {
2629 tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
2630 wl_egl_buffer->waiting_source = NULL;
2633 wl_egl_buffer->waiting_source =
2634 tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
2635 wl_egl_buffer->acquire_fence_fd, &buffer_funcs,
2636 SOURCE_TYPE_DISPOSABLE);
2637 wl_egl_buffer->status = WAITING_SIGNALED;
2639 TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
2640 wl_egl_buffer->acquire_fence_fd);
2642 ready_to_commit = TPL_FALSE;
2646 if (ready_to_commit) {
2647 if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done)
2648 ready_to_commit = TPL_TRUE;
2650 wl_egl_buffer->status = WAITING_VBLANK;
2651 __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, wl_egl_buffer);
2652 ready_to_commit = TPL_FALSE;
2656 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2658 if (ready_to_commit)
2659 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2662 return TPL_ERROR_NONE;
2665 /* -- BEGIN -- tdm_client vblank callback function */
2667 __cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
2668 unsigned int sequence, unsigned int tv_sec,
2669 unsigned int tv_usec, void *user_data)
2671 tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data;
2672 tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
2674 TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK");
2675 TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface);
2677 if (error == TDM_ERROR_TIMEOUT)
2678 TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)",
2681 wl_egl_surface->vblank_done = TPL_TRUE;
2683 tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
2684 wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(
2685 wl_egl_surface->vblank_waiting_buffers,
2688 _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
2689 tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
2691 /* -- END -- tdm_client vblank callback function */
2694 __cb_buffer_fenced_release(void *data,
2695 struct zwp_linux_buffer_release_v1 *release, int32_t fence)
2697 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
2698 tbm_surface_h tbm_surface = NULL;
2700 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
2702 tbm_surface = wl_egl_buffer->tbm_surface;
2704 if (tbm_surface_internal_is_valid(tbm_surface)) {
2705 if (wl_egl_buffer->status == COMMITTED) {
2706 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2707 tbm_surface_queue_error_e tsq_err;
2709 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2711 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
2712 wl_egl_buffer->buffer_release = NULL;
2714 wl_egl_buffer->release_fence_fd = fence;
2715 wl_egl_buffer->status = RELEASED;
2717 TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
2718 _get_tbm_surface_bo_name(tbm_surface),
2720 TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
2721 _get_tbm_surface_bo_name(tbm_surface));
2724 "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
2725 wl_egl_buffer->wl_buffer, tbm_surface,
2726 _get_tbm_surface_bo_name(tbm_surface),
2729 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2731 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2733 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2734 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2736 tbm_surface_internal_unref(tbm_surface);
2739 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2744 __cb_buffer_immediate_release(void *data,
2745 struct zwp_linux_buffer_release_v1 *release)
2747 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
2748 tbm_surface_h tbm_surface = NULL;
2750 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
2752 tbm_surface = wl_egl_buffer->tbm_surface;
2754 if (tbm_surface_internal_is_valid(tbm_surface)) {
2755 if (wl_egl_buffer->status == COMMITTED) {
2756 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2757 tbm_surface_queue_error_e tsq_err;
2759 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2761 zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
2762 wl_egl_buffer->buffer_release = NULL;
2764 wl_egl_buffer->release_fence_fd = -1;
2765 wl_egl_buffer->status = RELEASED;
2767 TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
2768 _get_tbm_surface_bo_name(tbm_surface));
2769 TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
2770 _get_tbm_surface_bo_name(tbm_surface));
2773 "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2774 wl_egl_buffer->wl_buffer, tbm_surface,
2775 _get_tbm_surface_bo_name(tbm_surface));
2777 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2779 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2781 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2782 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2784 tbm_surface_internal_unref(tbm_surface);
2787 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2791 static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
2792 __cb_buffer_fenced_release,
2793 __cb_buffer_immediate_release,
2797 __cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
2799 tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
2800 tbm_surface_h tbm_surface = NULL;
2802 TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer)
2804 tbm_surface = wl_egl_buffer->tbm_surface;
2806 if (tbm_surface_internal_is_valid(tbm_surface)) {
2807 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
2808 tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
2810 tpl_gmutex_lock(&wl_egl_buffer->mutex);
2812 if (wl_egl_buffer->status == COMMITTED) {
2814 tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
2816 if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
2817 TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
2819 wl_egl_buffer->status = RELEASED;
2821 TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
2822 TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
2823 _get_tbm_surface_bo_name(tbm_surface));
2825 TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
2826 wl_egl_buffer->wl_buffer, tbm_surface,
2827 _get_tbm_surface_bo_name(tbm_surface));
2830 tpl_gmutex_unlock(&wl_egl_buffer->mutex);
2832 if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
2833 tbm_surface_internal_unref(tbm_surface);
2835 TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
2839 static const struct wl_buffer_listener wl_buffer_release_listener = {
2840 (void *)__cb_wl_buffer_release,
2844 __cb_presentation_feedback_sync_output(void *data,
2845 struct wp_presentation_feedback *presentation_feedback,
2846 struct wl_output *output)
2849 TPL_IGNORE(presentation_feedback);
2855 __cb_presentation_feedback_presented(void *data,
2856 struct wp_presentation_feedback *presentation_feedback,
2860 uint32_t refresh_nsec,
2865 TPL_IGNORE(tv_sec_hi);
2866 TPL_IGNORE(tv_sec_lo);
2867 TPL_IGNORE(tv_nsec);
2868 TPL_IGNORE(refresh_nsec);
2873 struct pst_feedback *pst_feedback = (struct pst_feedback *)data;
2874 tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface;
2876 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2878 TPL_DEBUG("[FEEDBACK][PRESENTED] pst_feedback(%p) presentation_feedback(%p) bo(%d)",
2879 pst_feedback, presentation_feedback, pst_feedback->bo_name);
2881 if (pst_feedback->pst_sync_fd != -1) {
2882 int ret = _write_to_eventfd(pst_feedback->pst_sync_fd);
2884 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
2885 pst_feedback->pst_sync_fd);
2888 TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
2889 "[PRESENTATION_SYNC] bo(%d)",
2890 pst_feedback->bo_name);
2892 close(pst_feedback->pst_sync_fd);
2893 pst_feedback->pst_sync_fd = -1;
2896 wp_presentation_feedback_destroy(presentation_feedback);
2898 pst_feedback->presentation_feedback = NULL;
2899 pst_feedback->wl_egl_surface = NULL;
2900 pst_feedback->bo_name = 0;
2902 __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
2907 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2911 __cb_presentation_feedback_discarded(void *data,
2912 struct wp_presentation_feedback *presentation_feedback)
2914 struct pst_feedback *pst_feedback = (struct pst_feedback *)data;
2915 tpl_wl_egl_surface_t *wl_egl_surface = pst_feedback->wl_egl_surface;
2917 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
2919 TPL_DEBUG("[FEEDBACK][DISCARDED] pst_feedback(%p) presentation_feedback(%p) bo(%d)",
2920 pst_feedback, presentation_feedback, pst_feedback->bo_name);
2922 if (pst_feedback->pst_sync_fd != -1) {
2923 int ret = _write_to_eventfd(pst_feedback->pst_sync_fd);
2925 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
2926 pst_feedback->pst_sync_fd);
2929 TRACE_ASYNC_END(pst_feedback->pst_sync_fd,
2930 "[PRESENTATION_SYNC] bo(%d)",
2931 pst_feedback->bo_name);
2933 close(pst_feedback->pst_sync_fd);
2934 pst_feedback->pst_sync_fd = -1;
2937 wp_presentation_feedback_destroy(presentation_feedback);
2939 pst_feedback->presentation_feedback = NULL;
2940 pst_feedback->wl_egl_surface = NULL;
2941 pst_feedback->bo_name = 0;
2943 __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, pst_feedback,
2948 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
2951 static const struct wp_presentation_feedback_listener feedback_listener = {
2952 __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
2953 __cb_presentation_feedback_presented,
2954 __cb_presentation_feedback_discarded
2958 _thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface)
2960 tdm_error tdm_err = TDM_ERROR_NONE;
2961 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2963 if (wl_egl_surface->vblank == NULL) {
2964 wl_egl_surface->vblank =
2965 _thread_create_tdm_client_vblank(wl_egl_display->tdm_client);
2966 if (!wl_egl_surface->vblank) {
2967 TPL_WARN("Failed to create vblank. wl_egl_surface(%p)",
2969 return TPL_ERROR_OUT_OF_MEMORY;
2973 tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank,
2974 wl_egl_surface->post_interval,
2975 __cb_tdm_client_vblank,
2976 (void *)wl_egl_surface);
2978 if (tdm_err == TDM_ERROR_NONE) {
2979 wl_egl_surface->vblank_done = TPL_FALSE;
2980 TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK");
2982 TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
2983 return TPL_ERROR_INVALID_OPERATION;
2986 return TPL_ERROR_NONE;
2990 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
2991 tpl_wl_egl_buffer_t *wl_egl_buffer)
2993 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
2994 struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
2995 struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
2998 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
2999 "wl_egl_buffer sould be not NULL");
3001 if (wl_egl_buffer->wl_buffer == NULL) {
3002 wl_egl_buffer->wl_buffer =
3003 (struct wl_proxy *)wayland_tbm_client_create_buffer(
3004 wl_egl_display->wl_tbm_client,
3005 wl_egl_buffer->tbm_surface);
3007 TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
3008 "[FATAL] Failed to create wl_buffer");
3010 wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer,
3011 &wl_buffer_release_listener, wl_egl_buffer);
3013 version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
3015 /* create presentation feedback and add listener */
3016 tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
3017 if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) {
3019 struct pst_feedback *pst_feedback = NULL;
3020 pst_feedback = (struct pst_feedback *) calloc(1, sizeof(struct pst_feedback));
3022 pst_feedback->presentation_feedback =
3023 wp_presentation_feedback(wl_egl_display->presentation,
3026 pst_feedback->wl_egl_surface = wl_egl_surface;
3027 pst_feedback->bo_name = wl_egl_buffer->bo_name;
3029 pst_feedback->pst_sync_fd = wl_egl_buffer->presentation_sync_fd;
3030 wl_egl_buffer->presentation_sync_fd = -1;
3032 wp_presentation_feedback_add_listener(pst_feedback->presentation_feedback,
3033 &feedback_listener, pst_feedback);
3034 __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, pst_feedback);
3035 TRACE_ASYNC_BEGIN(pst_feedback->pst_sync_fd,
3036 "[PRESENTATION_SYNC] bo(%d)",
3037 pst_feedback->bo_name);
3039 TPL_ERR("Failed to create presentation feedback. wl_egl_buffer(%p)",
3041 _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
3042 close(wl_egl_buffer->presentation_sync_fd);
3043 wl_egl_buffer->presentation_sync_fd = -1;
3046 tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
3048 if (wl_egl_buffer->w_rotated == TPL_TRUE) {
3049 wayland_tbm_client_set_buffer_transform(
3050 wl_egl_display->wl_tbm_client,
3051 (void *)wl_egl_buffer->wl_buffer,
3052 wl_egl_buffer->w_transform);
3053 wl_egl_buffer->w_rotated = TPL_FALSE;
3056 if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) {
3057 wl_egl_surface->latest_transform = wl_egl_buffer->transform;
3058 wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
3061 if (wl_egl_window) {
3062 wl_egl_window->attached_width = wl_egl_buffer->width;
3063 wl_egl_window->attached_height = wl_egl_buffer->height;
3066 wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer,
3067 wl_egl_buffer->dx, wl_egl_buffer->dy);
3069 if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) {
3071 wl_surface_damage(wl_surface,
3072 wl_egl_buffer->dx, wl_egl_buffer->dy,
3073 wl_egl_buffer->width, wl_egl_buffer->height);
3075 wl_surface_damage_buffer(wl_surface,
3077 wl_egl_buffer->width, wl_egl_buffer->height);
3081 for (i = 0; i < wl_egl_buffer->num_rects; i++) {
3083 wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] +
3084 wl_egl_buffer->rects[i * 4 + 3]);
3086 wl_surface_damage(wl_surface,
3087 wl_egl_buffer->rects[i * 4 + 0],
3089 wl_egl_buffer->rects[i * 4 + 2],
3090 wl_egl_buffer->rects[i * 4 + 3]);
3092 wl_surface_damage_buffer(wl_surface,
3093 wl_egl_buffer->rects[i * 4 + 0],
3095 wl_egl_buffer->rects[i * 4 + 2],
3096 wl_egl_buffer->rects[i * 4 + 3]);
3101 wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client,
3102 (void *)wl_egl_buffer->wl_buffer,
3103 wl_egl_buffer->serial);
3105 if (wl_egl_display->use_explicit_sync &&
3106 wl_egl_surface->surface_sync) {
3108 zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync,
3109 wl_egl_buffer->acquire_fence_fd);
3110 TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)",
3111 wl_egl_surface, wl_egl_buffer->tbm_surface, wl_egl_buffer->acquire_fence_fd);
3112 close(wl_egl_buffer->acquire_fence_fd);
3113 wl_egl_buffer->acquire_fence_fd = -1;
3115 wl_egl_buffer->buffer_release =
3116 zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync);
3117 if (!wl_egl_buffer->buffer_release) {
3118 TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface);
3120 zwp_linux_buffer_release_v1_add_listener(
3121 wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer);
3122 TPL_DEBUG("add explicit_sync_release_listener.");
3126 wl_surface_commit(wl_surface);
3128 wl_display_flush(wl_egl_display->wl_display);
3130 TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
3131 wl_egl_buffer->bo_name);
3133 wl_egl_buffer->need_to_commit = TPL_FALSE;
3134 wl_egl_buffer->status = COMMITTED;
3137 "[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
3138 wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface,
3139 wl_egl_buffer->bo_name);
3141 if (wl_egl_display->use_wait_vblank &&
3142 _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
3143 TPL_ERR("Failed to set wait vblank.");
3145 tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
3147 if (wl_egl_buffer->commit_sync_fd != -1) {
3148 int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
3150 TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd);
3153 TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
3154 wl_egl_buffer->bo_name);
3155 TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)",
3156 wl_egl_surface, wl_egl_buffer->commit_sync_fd);
3158 close(wl_egl_buffer->commit_sync_fd);
3159 wl_egl_buffer->commit_sync_fd = -1;
3162 tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
3166 _write_to_eventfd(int eventfd)
3171 if (eventfd == -1) {
3172 TPL_ERR("Invalid fd(-1)");
3176 ret = write(eventfd, &value, sizeof(uint64_t));
3178 TPL_ERR("failed to write to fd(%d)", eventfd);
3186 __tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend)
3188 TPL_ASSERT(backend);
3190 backend->type = TPL_BACKEND_WAYLAND_THREAD;
3191 backend->data = NULL;
3193 backend->init = __tpl_wl_egl_display_init;
3194 backend->fini = __tpl_wl_egl_display_fini;
3195 backend->query_config = __tpl_wl_egl_display_query_config;
3196 backend->filter_config = __tpl_wl_egl_display_filter_config;
3197 backend->get_window_info = __tpl_wl_egl_display_get_window_info;
3198 backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
3199 backend->get_buffer_from_native_pixmap =
3200 __tpl_wl_egl_display_get_buffer_from_native_pixmap;
3204 __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
3206 TPL_ASSERT(backend);
3208 backend->type = TPL_BACKEND_WAYLAND_THREAD;
3209 backend->data = NULL;
3211 backend->init = __tpl_wl_egl_surface_init;
3212 backend->fini = __tpl_wl_egl_surface_fini;
3213 backend->validate = __tpl_wl_egl_surface_validate;
3214 backend->cancel_dequeued_buffer =
3215 __tpl_wl_egl_surface_cancel_dequeued_buffer;
3216 backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
3217 backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
3218 backend->set_rotation_capability =
3219 __tpl_wl_egl_surface_set_rotation_capability;
3220 backend->set_post_interval =
3221 __tpl_wl_egl_surface_set_post_interval;
3223 __tpl_wl_egl_surface_get_size;
3227 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
3229 tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
3230 tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
3232 TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
3233 wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);
3235 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3236 if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) {
3237 wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL;
3238 wl_egl_surface->buffer_cnt--;
3240 wl_egl_buffer->idx = -1;
3242 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
3244 wl_display_flush(wl_egl_display->wl_display);
3246 if (wl_egl_buffer->wl_buffer)
3247 wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
3248 (void *)wl_egl_buffer->wl_buffer);
3250 if (wl_egl_buffer->waiting_source) {
3251 tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
3252 wl_egl_buffer->waiting_source = NULL;
3255 if (wl_egl_buffer->commit_sync_fd != -1) {
3256 int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
3258 TPL_ERR("Failed to send commit_sync signal to fd(%d)",
3259 wl_egl_buffer->commit_sync_fd);
3260 close(wl_egl_buffer->commit_sync_fd);
3261 wl_egl_buffer->commit_sync_fd = -1;
3264 if (wl_egl_buffer->presentation_sync_fd != -1) {
3265 int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
3267 TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
3268 wl_egl_buffer->presentation_sync_fd);
3269 close(wl_egl_buffer->presentation_sync_fd);
3270 wl_egl_buffer->presentation_sync_fd = -1;
3273 if (wl_egl_buffer->rects) {
3274 free(wl_egl_buffer->rects);
3275 wl_egl_buffer->rects = NULL;
3276 wl_egl_buffer->num_rects = 0;
3279 wl_egl_buffer->tbm_surface = NULL;
3280 wl_egl_buffer->bo_name = -1;
3282 free(wl_egl_buffer);
3286 _get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
3288 return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
3292 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
3296 tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
3297 TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)",
3298 wl_egl_surface, wl_egl_surface->buffer_cnt);
3299 for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
3300 tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
3301 if (wl_egl_buffer) {
3303 "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
3304 idx, wl_egl_buffer, wl_egl_buffer->tbm_surface,
3305 wl_egl_buffer->bo_name,
3306 status_to_string[wl_egl_buffer->status]);
3309 tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);