tpl_surface.c \
tpl_utils_hlist.c \
tpl_utils_map.c \
- tpl_utils_gthread.c
+ tpl_utils_gthread.c
# Wayland
if WITH_WAYLAND
libtpl_egl_la_SOURCES += tpl_wayland_egl.c \
tpl_wl_egl_thread.c \
- tpl_wayland_egl_thread.c \
tpl_wl_vk_thread.c \
wayland-vulkan/wayland-vulkan-protocol.c
endif
+++ /dev/null
-#include <sys/eventfd.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <wayland-client.h>
-#include <wayland-tbm-server.h>
-#include <wayland-tbm-client.h>
-#include <tdm_client.h>
-#include <glib.h>
-#include <glib-unix.h>
-#include <tizen-surface-client-protocol.h>
-#include <wayland-egl-backend.h>
-#include <presentation-time-client-protocol.h>
-#include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
-
-#include "tpl_utils.h"
-#include "tpl_internal.h"
-#include "wayland-egl-tizen/wayland-egl-tizen.h"
-#include "wayland-egl-tizen/wayland-egl-tizen-priv.h"
-#include "tpl_wayland_egl_thread.h"
-#include "wayland-vulkan/wayland-vulkan-client-protocol.h"
-#include "tpl_utils.h"
-
-static int buffer_info_key;
-#define KEY_BUFFER_INFO (unsigned long)(&buffer_info_key)
-
-#define CLIENT_QUEUE_SIZE 3
-#define VK_CLIENT_QUEUE_SIZE 3
-
-/* backend name will be optimized */
-#define BACKEND "WL_VK_GL"
-
-typedef struct _twe_wl_disp_source twe_wl_disp_source;
-typedef struct _twe_wl_surf_source twe_wl_surf_source;
-typedef struct _twe_wl_buffer_info twe_wl_buffer_info;
-typedef struct _twe_tdm_source twe_tdm_source;
-typedef struct _twe_del_source twe_del_source;
-typedef struct _twe_fence_wait_source twe_fence_wait_source;
-
-struct _twe_thread_context {
- GThread *twe_thread;
- GMainLoop *twe_loop;
-
- int ref_cnt;
-
- tpl_bool_t use_wait_vblank;
- twe_tdm_source *tdm_source;
- twe_del_source *tdm_del_source;
-
- GMutex thread_mutex;
- GCond thread_cond;
-};
-
-struct _twe_thread {
- twe_thread_context *ctx;
- /* TODO : display list */
-};
-
-struct _twe_tdm_source {
- GSource gsource;
- gpointer tag;
- tdm_client *tdm_client;
- int tdm_fd;
-};
-
-struct feedback_info {
- struct wp_presentation_feedback *feedback;
- twe_wl_surf_source *surf_source;
-};
-
-struct _twe_wl_disp_source {
- GSource gsource;
- GPollFD gfd;
- struct wl_display *disp;
- struct wl_event_queue *ev_queue;
- struct wayland_tbm_client *wl_tbm_client;
- struct tizen_surface_shm *tss; /* used for surface buffer_flush */
- struct wp_presentation *presentation;
- struct zwp_linux_explicit_synchronization_v1 *explicit_sync;
- tpl_bool_t use_explicit_sync;
- struct {
- int min_buffer;
- int max_buffer;
- int present_modes;
- } surface_capabilities;
- struct wayland_vulkan *wl_vk_client;
- tpl_bool_t is_vulkan_dpy;
- tpl_bool_t prepared;
- twe_del_source *disp_del_source;
- twe_thread *thread;
- GMutex wl_event_mutex;
-
- int last_error; /* errno of the last wl_display error*/
- /* TODO : surface list */
-};
-
-struct _twe_del_source {
- GSource gsource;
- gpointer tag;
- int event_fd;
- void* target_source;
- void (*destroy_target_source_func)(void *);
-};
-
-
-struct sync_info {
- tbm_surface_h tbm_surface;
- int sync_fd;
-};
-
-struct _twe_wl_surf_source {
- GSource gsource;
- gpointer tag;
- int event_fd;
- struct wl_surface *surf;
- struct wl_egl_window *wl_egl_window;
- int latest_transform;
- int rotation;
- void *cb_data;
- int format;
- struct {
- int width, height;
- int buffer_count;
- int present_mode;
- } swapchain_properties;
- tpl_surface_cb_func_t rotate_cb;
- tpl_bool_t rotation_capability;
- tpl_list_t *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */
- tpl_list_t *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */
- tpl_list_t *fence_waiting_sources; /* Trace fence_wait_source from ENQUEUE to fence signaled */
- tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
- tpl_list_t *render_done_fences; /* for attaching to twe_thread with fences passed by enqueue */
- tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
- tbm_surface_h draw_done_buffer; /* for MAILBOX mode */
- int render_done_cnt;
-
- tdm_client_vblank *vblank;
- tpl_bool_t vblank_done;
- tpl_bool_t is_destroying;
- tpl_bool_t set_serial_is_used; /* Will be deprecated */
- unsigned int serial;
- struct tizen_surface_shm_flusher *tss_flusher;
- tbm_surface_queue_h tbm_queue;
- twe_wl_disp_source *disp_source;
- twe_del_source *surf_del_source;
-
- struct {
- GMutex mutex;
- int fd;
- } commit_sync;
-
- struct {
- GMutex mutex;
- int fd;
- } presentation_sync;
-
- GMutex surf_mutex;
-
- GMutex free_queue_mutex;
- GCond free_queue_cond;
-
- /* for waiting draw done */
- tpl_bool_t use_sync_fence;
-
- /* to use zwp_linux_surface_synchronization */
- tpl_bool_t use_surface_sync;
-
- int post_interval;
-
- struct zwp_linux_surface_synchronization_v1 *surface_sync;
-
-};
-
-struct _twe_wl_buffer_info {
- struct wl_proxy *wl_buffer;
- int dx, dy;
- int width, height;
- /* for wayland_tbm_client_set_buffer_transform */
- int w_transform;
- tpl_bool_t w_rotated;
- /* for wl_surface_set_buffer_transform */
- int transform;
- /* for damage region */
- int num_rects;
- int *rects;
- tpl_bool_t need_to_commit;
-
- /* for checking need release */
- tpl_bool_t need_to_release;
-
- /* for checking draw done */
- tpl_bool_t draw_done;
-
- /* for checking released from display server */
- tbm_fd sync_timeline;
- unsigned int sync_timestamp;
- tbm_fd sync_fd;
- tpl_bool_t is_vk_image;
-
- tbm_surface_h tbm_surface;
-
- twe_wl_surf_source *surf_source;
-
- /* for wayland_tbm_client_set_buffer_serial */
- unsigned int serial;
-
- /* to get release event via zwp_linux_buffer_release_v1 */
- struct zwp_linux_buffer_release_v1 *buffer_release;
-
- /* each buffers own its release_fence_fd, until it passes ownership
- * to it to EGL */
- int release_fence_fd;
-
- /* each buffers own its acquire_fence_fd. until it passes ownership
- * to it to SERVER */
- int acquire_fence_fd;
-
- int commit_sync_fd;
-
- struct wp_presentation_feedback *presentation_feedback;
- int presentation_sync_fd;
-
-};
-
-struct _twe_fence_wait_source {
- GSource gsource;
- gpointer tag;
- tbm_fd fence_fd;
- tbm_surface_h tbm_surface;
- twe_wl_surf_source *surf_source;
-};
-
-static twe_thread_context *_twe_ctx;
-static twe_tdm_source *
-_twe_thread_tdm_source_create(void);
-static void
-_twe_thread_tdm_source_destroy(void *source);
-twe_del_source *
-_twe_del_source_init(twe_thread_context *ctx, void *target_source);
-void
-_twe_del_source_fini(twe_del_source *source);
-static void
-_twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source,
- tbm_surface_h tbm_surface);
-static void
-_twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source);
-static void
-__cb_buffer_remove_from_list(void *data);
-static tpl_result_t
-_twe_surface_wait_vblank(twe_wl_surf_source *surf_source);
-static struct tizen_private *
-_get_tizen_private(struct wl_egl_window *);
-
-tpl_result_t
-_twe_thread_fence_wait_source_attach(twe_wl_surf_source *surf_source,
- tbm_surface_h tbm_surface, tbm_fd sync_fd);
-
-static int
-_get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
-{
- return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
-}
-
-static gpointer
-_twe_thread_loop(gpointer data)
-{
- twe_thread_context *ctx = data;
-
- g_mutex_lock(&ctx->thread_mutex);
-
- if (ctx->use_wait_vblank) {
- twe_tdm_source *tdm_source = _twe_thread_tdm_source_create();
-
- if (tdm_source) {
- g_source_attach(&tdm_source->gsource,
- g_main_loop_get_context(ctx->twe_loop));
- }
-
- _twe_ctx->tdm_source = tdm_source;
-
- if (!ctx->tdm_source) {
- TPL_WARN("Failed to create tdm_source. TPL_WAIT_VLANK:DISABLED");
- }
- }
-
- g_cond_signal(&ctx->thread_cond);
- g_mutex_unlock(&ctx->thread_mutex);
-
- g_main_loop_run(ctx->twe_loop);
-
- return ctx;
-}
-
-static gboolean
-_twe_thread_del_source_dispatch(GSource *source, GSourceFunc cb, gpointer data)
-{
- twe_del_source *del_source = (twe_del_source *)source;
- tpl_result_t res = TPL_ERROR_NONE;
- GIOCondition cond;
-
- g_mutex_lock(&_twe_ctx->thread_mutex);
-
- cond = g_source_query_unix_fd(source, del_source->tag);
-
- if (cond & G_IO_IN) {
- ssize_t s;
- uint64_t u;
-
- s = read(del_source->event_fd, &u, sizeof(uint64_t));
- if (s != sizeof(uint64_t)) {
- TPL_ERR("Failed to read from event_fd(%d)",
- del_source->event_fd);
- res = TPL_ERROR_INVALID_CONNECTION;
- }
-
- if (del_source->destroy_target_source_func)
- del_source->destroy_target_source_func(del_source->target_source);
- }
-
- if (cond && !(cond & G_IO_IN)) {
- TPL_ERR("eventfd(%d) cannot wake up with other condition. cond(%d)",
- del_source->event_fd, cond);
- res = TPL_ERROR_INVALID_CONNECTION;
- }
-
- if (res != TPL_ERROR_NONE) {
- g_source_remove_unix_fd(source, del_source->tag);
-
- TPL_WARN("event_fd(%d) of del_source(%p) has been closed. it will be recreated.",
- del_source->event_fd, del_source);
-
- close(del_source->event_fd);
-
- del_source->event_fd = eventfd(0, EFD_CLOEXEC);
- if (del_source->event_fd < 0) {
- TPL_ERR("Failed to create eventfd. errno(%d)", errno);
- } else {
- del_source->tag = g_source_add_unix_fd(&del_source->gsource,
- del_source->event_fd,
- G_IO_IN);
- }
- TPL_DEBUG("[RECREATED] eventfd(%d) tag(%p)", del_source->event_fd, del_source->tag);
- }
-
- g_cond_signal(&_twe_ctx->thread_cond);
- g_mutex_unlock(&_twe_ctx->thread_mutex);
-
- return G_SOURCE_CONTINUE;
-}
-
-static void
-_twe_thread_del_source_finalize(GSource *source)
-{
- twe_del_source *del_source = (twe_del_source *)source;
-
- TPL_LOG_T(BACKEND, "gsource(%p) event_fd(%d)",
- source, del_source->event_fd);
-
- close(del_source->event_fd);
-
- del_source->tag = NULL;
- del_source->event_fd = -1;
-
- return;
-}
-
-static GSourceFuncs _twe_del_source_funcs = {
- .prepare = NULL,
- .check = NULL,
- .dispatch = _twe_thread_del_source_dispatch,
- .finalize = _twe_thread_del_source_finalize,
-};
-
-static void
-_twe_thread_del_source_trigger(twe_del_source *del_source)
-{
- uint64_t value = 1;
- int ret;
-
- ret = write(del_source->event_fd, &value, sizeof(uint64_t));
- if (ret == -1) {
- TPL_ERR("failed to send delete event. twe_del_source(%p)",
- del_source);
- return;
- }
-}
-
-twe_del_source *
-_twe_del_source_init(twe_thread_context *ctx, void *target_source)
-{
- twe_del_source *source = NULL;
-
- if (!ctx) {
- TPL_ERR("Invalid parameter. twe_thread_context is NULL");
- return NULL;
- }
-
- if (!target_source) {
- TPL_ERR("Invalid parameter. target_source is NULL");
- return NULL;
- }
-
- source = (twe_del_source *)g_source_new(&_twe_del_source_funcs,
- sizeof(twe_del_source));
- if (!source) {
- TPL_ERR("[THREAD] Failed to create twe_del_source.");
- return NULL;
- }
-
- source->event_fd = eventfd(0, EFD_CLOEXEC);
- if (source->event_fd < 0) {
- TPL_ERR("[THREAD] Failed to create eventfd. errno(%d)", errno);
- g_source_unref(&source->gsource);
- return NULL;
- }
-
- source->tag = g_source_add_unix_fd(&source->gsource,
- source->event_fd,
- G_IO_IN);
- source->target_source = target_source;
-
- g_source_attach(&source->gsource, g_main_loop_get_context(ctx->twe_loop));
-
- return source;
-}
-
-void
-_twe_del_source_fini(twe_del_source *source)
-{
- g_source_remove_unix_fd(&source->gsource, source->tag);
- g_source_destroy(&source->gsource);
- g_source_unref(&source->gsource);
-}
-
-static gboolean
-_twe_thread_tdm_source_dispatch(GSource *source, GSourceFunc cb, gpointer data)
-{
- twe_tdm_source *tdm_source = (twe_tdm_source *)source;
- tdm_error tdm_err = TDM_ERROR_NONE;
- GIOCondition cond;
-
- cond = g_source_query_unix_fd(source, tdm_source->tag);
-
- if (cond & G_IO_IN) {
- tdm_err = tdm_client_handle_events(tdm_source->tdm_client);
- }
-
- /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
- * When tdm_source is no longer available due to an unexpected situation,
- * twe_thread must remove it from the thread and destroy it.
- * In that case, tdm_vblank can no longer be used for surfaces and displays
- * that used this tdm_source. */
- if (tdm_err != TDM_ERROR_NONE) {
- TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
- tdm_err);
- TPL_WARN("tdm_source(%p) will be removed from thread.", tdm_source);
-
- g_source_remove_unix_fd(&tdm_source->gsource, tdm_source->tag);
- g_source_destroy(&tdm_source->gsource);
- g_source_unref(&tdm_source->gsource);
-
- _twe_ctx->tdm_source = NULL;
-
- if (_twe_ctx->tdm_del_source) {
- _twe_del_source_fini(_twe_ctx->tdm_del_source);
- _twe_ctx->tdm_del_source = NULL;
- }
-
- return G_SOURCE_REMOVE;
- }
-
- return G_SOURCE_CONTINUE;
-}
-
-static void
-_twe_thread_tdm_source_finalize(GSource *source)
-{
- twe_tdm_source *tdm_source = (twe_tdm_source *)source;
-
- TPL_LOG_T(BACKEND, "tdm_destroy| tdm_source(%p) tdm_client(%p)",
- tdm_source, tdm_source->tdm_client);
-
- if (tdm_source->tdm_client) {
- tdm_client_destroy(tdm_source->tdm_client);
- tdm_source->tdm_client = NULL;
- }
-
- tdm_source->tdm_fd = -1;
-}
-
-static GSourceFuncs _twe_tdm_funcs = {
- .prepare = NULL,
- .check = NULL,
- .dispatch = _twe_thread_tdm_source_dispatch,
- .finalize = _twe_thread_tdm_source_finalize,
-};
-
-static twe_tdm_source *
-_twe_thread_tdm_source_create(void)
-{
- twe_tdm_source *tdm_source = NULL;
- tdm_client *client = NULL;
- int tdm_fd = -1;
- tdm_error tdm_err = TDM_ERROR_NONE;
-
- client = tdm_client_create(&tdm_err);
- if (!client || tdm_err != TDM_ERROR_NONE) {
- TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
- return NULL;
- }
-
- tdm_err = tdm_client_get_fd(client, &tdm_fd);
- if (tdm_fd < 0 || tdm_err != TDM_ERROR_NONE) {
- TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
- tdm_client_destroy(client);
- return NULL;
- }
-
- tdm_source = (twe_tdm_source *)g_source_new(&_twe_tdm_funcs,
- sizeof(twe_tdm_source));
- if (!tdm_source) {
- TPL_ERR("Failed to create tdm_gsource\n");
- tdm_client_destroy(client);
- return NULL;
- }
-
- tdm_source->tdm_client = client;
- tdm_source->tdm_fd = tdm_fd;
- tdm_source->tag = g_source_add_unix_fd(&tdm_source->gsource,
- tdm_fd,
- G_IO_IN);
-
- TPL_LOG_T(BACKEND, "TPL_WAIT_VBLANK:DEFAULT_ENABLED");
- TPL_LOG_T(BACKEND, "tdm_source(%p) tdm_client(%p) tdm_fd(%d)",
- tdm_source, client, tdm_fd);
-
- return tdm_source;
-}
-
-static void
-_twe_thread_tdm_source_destroy(void *source)
-{
- twe_tdm_source *tdm_source = (twe_tdm_source *)source;
-
- _twe_ctx->tdm_source = NULL;
-
- g_source_remove_unix_fd(&tdm_source->gsource, tdm_source->tag);
- g_source_destroy(&tdm_source->gsource);
- g_source_unref(&tdm_source->gsource);
-}
-
-static int
-_write_to_eventfd(int eventfd)
-{
- uint64_t value = 1;
- int ret;
-
- if (eventfd == -1) {
- TPL_ERR("Invalid fd(-1)");
- return -1;
- }
-
- ret = write(eventfd, &value, sizeof(uint64_t));
- if (ret == -1) {
- TPL_ERR("failed to write to fd(%d)", eventfd);
- return ret;
- }
-
- return ret;
-}
-
-twe_thread*
-twe_thread_create(void)
-{
- twe_thread *thread = NULL;
- char *env = NULL;
-
- thread = calloc(1, sizeof(twe_thread));
- if (!thread) {
- TPL_ERR("Failed to allocate twe_thread");
- return NULL;
- }
-
- if (!_twe_ctx) {
- GMainContext *context;
-
- _twe_ctx = calloc(1, sizeof(twe_thread_context));
- if (!_twe_ctx) {
- TPL_ERR("Failed to allocate _twe_ctx");
- if (thread)
- free(thread);
- return NULL;
- }
-
- context = g_main_context_new();
- _twe_ctx->twe_loop = g_main_loop_new(context, FALSE);
- g_main_context_unref(context);
-
- g_mutex_init(&_twe_ctx->thread_mutex);
- g_cond_init(&_twe_ctx->thread_cond);
-
- _twe_ctx->use_wait_vblank = TPL_TRUE;
-
- env = tpl_getenv("TPL_WAIT_VBLANK");
- if (env && !atoi(env)) {
- _twe_ctx->use_wait_vblank = TPL_FALSE;
- }
-
- g_mutex_lock(&_twe_ctx->thread_mutex);
- _twe_ctx->twe_thread = g_thread_new("twe_thread", _twe_thread_loop,
- _twe_ctx);
- g_cond_wait(&_twe_ctx->thread_cond,
- &_twe_ctx->thread_mutex);
-
- if (_twe_ctx->tdm_source) {
- twe_tdm_source *tdm_source = _twe_ctx->tdm_source;
-
- _twe_ctx->tdm_del_source = _twe_del_source_init(_twe_ctx, tdm_source);
- if (_twe_ctx->tdm_del_source)
- _twe_ctx->tdm_del_source->destroy_target_source_func
- = _twe_thread_tdm_source_destroy;
- }
-
- g_mutex_unlock(&_twe_ctx->thread_mutex);
-
- _twe_ctx->ref_cnt = 0;
- }
-
- thread->ctx = _twe_ctx;
- _twe_ctx->ref_cnt++;
-
- TPL_LOG_T(BACKEND, "_twe_ctx(%p) twe_thread(%p)", _twe_ctx, thread);
- return thread;
-}
-
-void
-twe_thread_destroy(twe_thread* thread)
-{
- thread->ctx->ref_cnt--;
-
- if (thread->ctx->ref_cnt == 0) {
- twe_del_source *tdm_del_source = _twe_ctx->tdm_del_source;
-
- if (_twe_ctx->tdm_source) {
- g_mutex_lock(&_twe_ctx->thread_mutex);
-
- if (tdm_del_source) {
- _twe_thread_del_source_trigger(tdm_del_source);
- g_cond_wait(&_twe_ctx->thread_cond, &_twe_ctx->thread_mutex);
- }
-
- g_mutex_unlock(&_twe_ctx->thread_mutex);
- }
-
- if (tdm_del_source)
- _twe_del_source_fini(tdm_del_source);
-
- _twe_ctx->tdm_del_source = NULL;
-
- g_main_loop_quit(thread->ctx->twe_loop);
- g_thread_join(thread->ctx->twe_thread);
- g_main_loop_unref(thread->ctx->twe_loop);
-
- g_mutex_clear(&thread->ctx->thread_mutex);
- g_cond_clear(&thread->ctx->thread_cond);
-
- free(_twe_ctx);
- _twe_ctx = NULL;
- }
-
- TPL_LOG_T(BACKEND, "[THREAD DESTROY] twe_thread(%p)", thread);
-
- thread->ctx = NULL;
- free(thread);
-}
-
-static void
-_twe_display_print_err(twe_wl_disp_source *disp_source,
- const char *func_name)
-{
- int dpy_err;
- char buf[1024];
- strerror_r(errno, buf, sizeof(buf));
-
- if (disp_source->last_error == errno)
- return;
-
- TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
-
- dpy_err = wl_display_get_error(disp_source->disp);
- if (dpy_err == EPROTO) {
- const struct wl_interface *err_interface;
- uint32_t err_proxy_id, err_code;
- err_code = wl_display_get_protocol_error(disp_source->disp,
- &err_interface,
- &err_proxy_id);
- TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
- err_interface->name, err_code, err_proxy_id);
- }
-
- disp_source->last_error = errno;
-}
-
-static void
-_twe_print_buffer_list(twe_wl_surf_source *surf_source)
-{
- int count = 0;
- int idx = 0;
- tpl_list_node_t *node = NULL;
- tbm_surface_h tbm_surface = NULL;
-
- /* vblank waiting list */
- count = __tpl_list_get_count(surf_source->vblank_waiting_buffers);
- TPL_DEBUG("VBLANK WAITING BUFFERS | surf_source(%p) list(%p) count(%d)",
- surf_source, surf_source->vblank_waiting_buffers, count);
-
- while ((!node &&
- (node = __tpl_list_get_front_node(surf_source->vblank_waiting_buffers))) ||
- (node && (node = __tpl_list_node_next(node)))) {
- tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
- TPL_DEBUG("VBLANK WAITING BUFFERS | %d | tbm_surface(%p) bo(%d)",
- idx, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
- idx++;
- }
-
- idx = 0;
- node = NULL;
-
- /* in use buffers list */
- count = __tpl_list_get_count(surf_source->in_use_buffers);
- TPL_DEBUG("DEQUEUED BUFFERS | surf_source(%p) list(%p) count(%d)",
- surf_source, surf_source->in_use_buffers, count);
-
- while ((!node &&
- (node = __tpl_list_get_front_node(surf_source->in_use_buffers))) ||
- (node && (node = __tpl_list_node_next(node)))) {
- tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
- TPL_DEBUG("DEQUEUED BUFFERS | %d | tbm_surface(%p) bo(%d)",
- idx, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
- idx++;
- }
-
- idx = 0;
- node = NULL;
-
- /* committed buffers list */
- count = __tpl_list_get_count(surf_source->committed_buffers);
- TPL_DEBUG("COMMITTED BUFFERS | surf_source(%p) list(%p) count(%d)",
- surf_source, surf_source->committed_buffers, count);
-
- while ((!node &&
- (node = __tpl_list_get_front_node(surf_source->committed_buffers))) ||
- (node && (node = __tpl_list_node_next(node)))) {
- tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
- TPL_DEBUG("COMMITTED BUFFERS | %d | tbm_surface(%p) bo(%d)",
- idx, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
- idx++;
- }
-}
-
-static gboolean
-_twe_thread_wl_disp_prepare(GSource *source, gint *time)
-{
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source;
-
- /* If disp_source is already prepared, do nothing in this function. */
- if (disp_source->prepared)
- return FALSE;
-
- /* If there is a last_error, there is no need to poll,
- * so skip directly to dispatch.
- * prepare -> dispatch */
- if (disp_source->last_error)
- return TRUE;
-
- while (wl_display_prepare_read_queue(disp_source->disp,
- disp_source->ev_queue) != 0) {
- if (wl_display_dispatch_queue_pending(disp_source->disp,
- disp_source->ev_queue) == -1) {
- _twe_display_print_err(disp_source, "dispatch_queue_pending");
- }
- }
-
- disp_source->prepared = TPL_TRUE;
-
- wl_display_flush(disp_source->disp);
- *time = -1;
-
- return FALSE;
-}
-
-static gboolean
-_twe_thread_wl_disp_check(GSource *source)
-{
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source;
- gboolean ret = FALSE;
-
- if (!disp_source->prepared)
- return ret;
-
- /* If prepared, but last_error is set,
- * cancel_read is executed and FALSE is returned.
- * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
- * and skipping disp_check from prepare to disp_dispatch.
- * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
- if (disp_source->prepared && disp_source->last_error) {
- wl_display_cancel_read(disp_source->disp);
- return ret;
- }
-
- if (disp_source->gfd.revents & G_IO_IN) {
- if (wl_display_read_events(disp_source->disp) == -1)
- _twe_display_print_err(disp_source, "read_event.");
- ret = TRUE;
- } else {
- wl_display_cancel_read(disp_source->disp);
- ret = FALSE;
- }
-
- disp_source->prepared = TPL_FALSE;
-
- return ret;
-}
-
-static gboolean
-_twe_thread_wl_disp_dispatch(GSource *source, GSourceFunc cb, gpointer data)
-{
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source;
-
- /* If there is last_error, G_SOURCE_REMOVE should be returned
- * to remove the gsource from the main loop.
- * This is because disp_source is not valid since last_error was set.*/
- if (disp_source->last_error) {
- return G_SOURCE_REMOVE;
- }
-
- g_mutex_lock(&disp_source->wl_event_mutex);
- if (disp_source->gfd.revents & G_IO_IN) {
- if (wl_display_dispatch_queue_pending(disp_source->disp,
- disp_source->ev_queue) == -1) {
- _twe_display_print_err(disp_source, "dispatch_queue_pending");
- }
- }
-
- wl_display_flush(disp_source->disp);
- g_mutex_unlock(&disp_source->wl_event_mutex);
-
- return G_SOURCE_CONTINUE;
-}
-
-static void
-_twe_thread_wl_disp_finalize(GSource *source)
-{
- TPL_LOG_T(BACKEND, "finalize| disp_source(%p)", source);
-
- return;
-}
-
-static GSourceFuncs _twe_wl_disp_funcs = {
- .prepare = _twe_thread_wl_disp_prepare,
- .check = _twe_thread_wl_disp_check,
- .dispatch = _twe_thread_wl_disp_dispatch,
- .finalize = _twe_thread_wl_disp_finalize,
-};
-
-static struct wayland_tbm_client*
-_twe_display_init_wl_tbm_client(struct wl_display *display,
- struct wl_event_queue *ev_queue)
-{
- struct wl_proxy *wl_tbm = NULL;
- struct wayland_tbm_client *wl_tbm_client = NULL;
-
- wl_tbm_client = wayland_tbm_client_init(display);
- if (!wl_tbm_client) {
- TPL_ERR("Failed to initialize wl_tbm_client.");
- return NULL;
- }
-
- wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
- if (!wl_tbm) {
- TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
- wayland_tbm_client_deinit(wl_tbm_client);
- return NULL;
- }
-
- wl_proxy_set_queue(wl_tbm, ev_queue);
-
- TPL_LOG_T(BACKEND, "wl_tbm_client init| wl_tbm_client(%p)", wl_tbm_client);
- return wl_tbm_client;
-}
-
-static void
-_twe_display_fini_wl_tbm_client(struct wayland_tbm_client *wl_tbm_client)
-{
- struct wl_proxy *wl_tbm = NULL;
-
- wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
- if (wl_tbm) {
- wl_proxy_set_queue(wl_tbm, NULL);
- }
-
- TPL_LOG_T(BACKEND, "wl_tbm_client deinit| wl_tbm_client(%p)", wl_tbm_client);
- wayland_tbm_client_deinit(wl_tbm_client);
-}
-
-static void
-__cb_wl_vk_support_present_mode_listener(void *data,
- struct wayland_vulkan *wayland_vulkan,
- uint32_t mode)
-{
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)data;
-
- switch (mode) {
- case WAYLAND_VULKAN_PRESENT_MODE_TYPE_IMMEDIATE:
- disp_source->surface_capabilities.present_modes
- |= TPL_DISPLAY_PRESENT_MODE_IMMEDIATE;
- break;
- case WAYLAND_VULKAN_PRESENT_MODE_TYPE_MAILBOX:
- disp_source->surface_capabilities.present_modes
- |= TPL_DISPLAY_PRESENT_MODE_MAILBOX;
- break;
- case WAYLAND_VULKAN_PRESENT_MODE_TYPE_FIFO:
- disp_source->surface_capabilities.present_modes
- |= TPL_DISPLAY_PRESENT_MODE_FIFO;
- break;
- case WAYLAND_VULKAN_PRESENT_MODE_TYPE_FIFO_RELAXED:
- disp_source->surface_capabilities.present_modes
- |= TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED;
- break;
- default:
- TPL_WARN("server sent unknown present type: %d", mode);
- }
-}
-
-static struct wayland_vulkan_listener wl_vk_listener = {
- __cb_wl_vk_support_present_mode_listener,
-};
-
-#define IMPL_TIZEN_SURFACE_SHM_VERSION 2
-
-void
-__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
- uint32_t name, const char *interface,
- uint32_t version)
-{
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)data;
-
- if (!strcmp(interface, "tizen_surface_shm")) {
- disp_source->tss = wl_registry_bind(wl_registry,
- name,
- &tizen_surface_shm_interface,
- ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ?
- version : IMPL_TIZEN_SURFACE_SHM_VERSION));
- } else if (disp_source->is_vulkan_dpy
- && !strcmp(interface, "wayland_vulkan")) {
- disp_source->wl_vk_client =
- wl_registry_bind(wl_registry, name,
- &wayland_vulkan_interface,
- version);
- } else if (!strcmp(interface, wp_presentation_interface.name)) {
- disp_source->presentation =
- wl_registry_bind(wl_registry,
- name, &wp_presentation_interface, 1);
- TPL_DEBUG("bind wp_presentation_interface");
- } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
- char *env = tpl_getenv("TPL_EFS");
- if (env && atoi(env)) {
- disp_source->explicit_sync =
- wl_registry_bind(wl_registry, name,
- &zwp_linux_explicit_synchronization_v1_interface, 1);
- disp_source->use_explicit_sync = TPL_TRUE;
- TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
- } else {
- disp_source->use_explicit_sync = TPL_FALSE;
- }
- }
-}
-
-void
-__cb_wl_resistry_global_remove_callback(void *data,
- struct wl_registry *wl_registry,
- uint32_t name)
-{
-}
-
-static const struct wl_registry_listener registry_listener = {
- __cb_wl_resistry_global_callback,
- __cb_wl_resistry_global_remove_callback
-};
-
-static void
-_twe_display_wayland_init(twe_wl_disp_source *disp_source)
-{
- struct wl_registry *registry = NULL;
- struct wl_event_queue *queue = NULL;
- struct wl_display *display_wrapper = NULL;
- int ret;
-
- queue = wl_display_create_queue(disp_source->disp);
- if (!queue) {
- TPL_ERR("Failed to create wl_queue");
- goto fini;
- }
-
- display_wrapper = wl_proxy_create_wrapper(disp_source->disp);
- if (!display_wrapper) {
- TPL_ERR("Failed to create a proxy wrapper of wl_display");
- goto fini;
- }
-
- wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
-
- registry = wl_display_get_registry(display_wrapper);
- if (!registry) {
- TPL_ERR("Failed to create wl_registry");
- goto fini;
- }
-
- wl_proxy_wrapper_destroy(display_wrapper);
- display_wrapper = NULL;
-
- if (wl_registry_add_listener(registry, ®istry_listener,
- disp_source)) {
- TPL_ERR("Failed to wl_registry_add_listener");
- goto fini;
- }
-
- ret = wl_display_roundtrip_queue(disp_source->disp, queue);
- if (ret == -1) {
- _twe_display_print_err(disp_source, "roundtrip_queue");
- goto fini;
- }
-
- /* set tizen_surface_shm's queue as client's private queue */
- if (disp_source->tss) {
- wl_proxy_set_queue((struct wl_proxy *)disp_source->tss,
- disp_source->ev_queue);
- TPL_LOG_T(BACKEND, "tizen_surface_shm(%p) init.", disp_source->tss);
- }
-
- if (disp_source->wl_vk_client) {
- wayland_vulkan_add_listener(disp_source->wl_vk_client,
- &wl_vk_listener, disp_source);
-
- ret = wl_display_roundtrip_queue(disp_source->disp, queue);
- if (ret == -1) {
- _twe_display_print_err(disp_source, "roundtrip_queue");
- goto fini;
- }
-
- wl_proxy_set_queue((struct wl_proxy *)disp_source->wl_vk_client,
- disp_source->ev_queue);
- TPL_LOG_T(BACKEND, "wl_vk_client(%p) init.", disp_source->wl_vk_client);
- }
-
- if (disp_source->presentation) {
- wl_proxy_set_queue((struct wl_proxy *)disp_source->presentation,
- disp_source->ev_queue);
- TPL_LOG_T(BACKEND, "wp_presentation(%p) init.", disp_source->presentation);
- }
-
- if (disp_source->explicit_sync) {
- wl_proxy_set_queue((struct wl_proxy *)disp_source->explicit_sync,
- disp_source->ev_queue);
- TPL_LOG_T(BACKEND, "zwp_linux_explicit_synchronization_v1(%p) init.",
- disp_source->explicit_sync);
- }
-
-fini:
- if (display_wrapper)
- wl_proxy_wrapper_destroy(display_wrapper);
- if (registry)
- wl_registry_destroy(registry);
- if (queue)
- wl_event_queue_destroy(queue);
-}
-
-static void
-_twe_display_wayland_fini(twe_wl_disp_source *disp_source)
-{
- if (disp_source->tss) {
- TPL_LOG_T(BACKEND, "tizen_surface_shm(%p) fini.", disp_source->tss);
- tizen_surface_shm_destroy(disp_source->tss);
- disp_source->tss = NULL;
- }
-
- if (disp_source->wl_vk_client) {
- TPL_LOG_T(BACKEND, "wl_vk_client(%p) fini.", disp_source->wl_vk_client);
- wayland_vulkan_destroy(disp_source->wl_vk_client);
- disp_source->wl_vk_client = NULL;
- }
-
- if (disp_source->presentation) {
- TPL_LOG_T(BACKEND, "wp_presentation(%p) fini.", disp_source->presentation);
- wp_presentation_destroy(disp_source->presentation);
- disp_source->presentation = NULL;
- }
-
- if (disp_source->explicit_sync) {
- TPL_LOG_T(BACKEND, "zwp_linux_explicit_synchronization_v1(%p) fini.",
- disp_source->explicit_sync);
- zwp_linux_explicit_synchronization_v1_destroy(disp_source->explicit_sync);
- disp_source->explicit_sync = NULL;
- }
-}
-
-static void
-_twe_thread_wl_disp_source_destroy(void *source)
-{
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source;
- if (g_source_is_destroyed(&disp_source->gsource)) {
- TPL_ERR("disp_source(%p) already destroyed.", disp_source);
- return;
- }
-
- g_mutex_lock(&disp_source->wl_event_mutex);
-
- /* If disp_source is in prepared state, cancel it */
- if (disp_source->prepared) {
- wl_display_cancel_read(disp_source->disp);
- disp_source->prepared = TPL_FALSE;
- }
-
- if (wl_display_dispatch_queue_pending(disp_source->disp,
- disp_source->ev_queue) == -1) {
- _twe_display_print_err(disp_source, "dispatch_queue_pending");
- }
-
- wl_event_queue_destroy(disp_source->ev_queue);
- g_mutex_unlock(&disp_source->wl_event_mutex);
-
- TPL_INFO("[DISPLAY_DEL]", "twe_display(%p) wl_display(%p)",
- disp_source, disp_source->disp);
-
- g_mutex_clear(&disp_source->wl_event_mutex);
-
- g_source_remove_poll(&disp_source->gsource, &disp_source->gfd);
- g_source_destroy(&disp_source->gsource);
- g_source_unref(&disp_source->gsource);
-}
-
-twe_display_h
-twe_display_add(twe_thread* thread,
- struct wl_display *display,
- tpl_backend_type_t backend)
-{
- twe_thread_context *ctx = thread->ctx;
- twe_wl_disp_source *source;
- struct wayland_tbm_client *wl_tbm_client = NULL;
- struct wl_event_queue *ev_queue = NULL;
-
- ev_queue = wl_display_create_queue(display);
- if (!ev_queue) {
- TPL_ERR("Failed to create wl_event_queue.");
- return NULL;
- }
-
- wl_tbm_client = _twe_display_init_wl_tbm_client(display, ev_queue);
- if (!wl_tbm_client) {
- TPL_ERR("Failed to create wl_tbm_client.");
- wl_event_queue_destroy(ev_queue);
- return NULL;
- }
-
- source = (twe_wl_disp_source *)g_source_new(&_twe_wl_disp_funcs,
- sizeof(twe_wl_disp_source));
- if (!source) {
- TPL_ERR("Failed to create twe_wl_disp_source.");
- return NULL;
- }
-
- source->disp = display;
- source->last_error = 0;
- source->ev_queue = ev_queue;
- source->wl_tbm_client = wl_tbm_client;
- source->prepared = TPL_FALSE;
- source->gfd.fd = wl_display_get_fd(display);
- source->gfd.events = G_IO_IN | G_IO_ERR;
- source->gfd.revents = 0;
- g_mutex_init(&source->wl_event_mutex);
-
- if (backend == TPL_BACKEND_WAYLAND_VULKAN_WSI ||
- backend == TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD) {
- source->is_vulkan_dpy = TPL_TRUE;
-
- } else { /* wayland_egl backend */
- /* These are not used. It just be initialized. */
- source->is_vulkan_dpy = TPL_FALSE;
- }
-
- /* It will be changed to TPL_TRUE when zwp_linux_explicit_synchronization_v1
- * succeeds to bind. */
- source->use_explicit_sync = TPL_FALSE;
-
- source->surface_capabilities.min_buffer = 2;
- source->surface_capabilities.max_buffer = VK_CLIENT_QUEUE_SIZE;
- source->surface_capabilities.present_modes =
- TPL_DISPLAY_PRESENT_MODE_FIFO;
- _twe_display_wayland_init(source);
-
- source->disp_del_source = _twe_del_source_init(ctx, source);
- source->disp_del_source->destroy_target_source_func
- = _twe_thread_wl_disp_source_destroy;
-
- g_source_set_callback(&source->gsource, NULL, display, NULL);
- g_source_add_poll(&source->gsource, &source->gfd);
- g_source_attach(&source->gsource, g_main_loop_get_context(ctx->twe_loop));
-
- TPL_INFO("[DISPLAY_ADD]", "gsource(%p) ev_queue(%p) wl_display(%p)",
- source, source->ev_queue, display);
-
- return (twe_display_h)source;
-}
-
-tpl_result_t
-twe_display_del(twe_display_h twe_display)
-{
- gboolean is_destroyed = FALSE;
- twe_wl_disp_source *source = (twe_wl_disp_source *)twe_display;
- twe_del_source *disp_del_source = NULL;
-
- if (!source ||
- (is_destroyed = g_source_is_destroyed(&source->gsource))) {
- TPL_ERR("twe_display(%p) is invalid. | is_destroyed(%s)",
- twe_display, (is_destroyed ? "TRUE" : "FALSE"));
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- disp_del_source = source->disp_del_source;
-
- _twe_display_wayland_fini(source);
-
- _twe_display_fini_wl_tbm_client(source->wl_tbm_client);
- source->wl_tbm_client = NULL;
-
- g_mutex_lock(&_twe_ctx->thread_mutex);
-
- TPL_INFO("[DISPLAY_DEL]", "twe_display(%p) will be destroyed in thread",
- twe_display);
- _twe_thread_del_source_trigger(disp_del_source);
- g_cond_wait(&_twe_ctx->thread_cond, &_twe_ctx->thread_mutex);
- g_mutex_unlock(&_twe_ctx->thread_mutex);
-
- _twe_del_source_fini(disp_del_source);
-
- return TPL_ERROR_NONE;
-}
-
-tpl_result_t
-twe_display_lock(twe_display_h display)
-{
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)display;
- if (!disp_source || g_source_is_destroyed(&disp_source->gsource)) {
- TPL_ERR("Invalid parameter. display(%p)", display);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- g_mutex_lock(&disp_source->wl_event_mutex);
-
- return TPL_ERROR_NONE;
-}
-
-void
-twe_display_unlock(twe_display_h display)
-{
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)display;
- if (!disp_source || g_source_is_destroyed(&disp_source->gsource)) {
- TPL_ERR("Invalid parameter. display(%p)", display);
- return;
- }
-
- g_mutex_unlock(&disp_source->wl_event_mutex);
-}
-
-tpl_result_t
-twe_display_get_buffer_count(twe_display_h display,
- int *min, int *max)
-{
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)display;
- if (!disp_source || g_source_is_destroyed(&disp_source->gsource)) {
- TPL_ERR("Invalid parameter. display(%p)", display);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (min) *min = disp_source->surface_capabilities.min_buffer;
- if (max) *max = disp_source->surface_capabilities.max_buffer;
-
- return TPL_ERROR_NONE;
-}
-
-tpl_result_t
-twe_display_get_present_mode(twe_display_h display,
- int *present_modes)
-{
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)display;
- if (!disp_source || g_source_is_destroyed(&disp_source->gsource)) {
- TPL_ERR("Invalid parameter. display(%p)", display);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (present_modes) {
- *present_modes = TPL_DISPLAY_PRESENT_MODE_MAILBOX |
- TPL_DISPLAY_PRESENT_MODE_IMMEDIATE |
- TPL_DISPLAY_PRESENT_MODE_FIFO |
- TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED |
- disp_source->surface_capabilities.present_modes;
- }
-
- return TPL_ERROR_NONE;
-}
-
-static struct tizen_private *
-_get_tizen_private(struct wl_egl_window * wl_egl_window)
-{
- if (wl_egl_window && wl_egl_window->driver_private)
- return (struct tizen_private *)wl_egl_window->driver_private;
-
- return NULL;
-}
-
-static void
-__cb_destroy_callback(void *private)
-{
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- twe_wl_surf_source *surf_source = NULL;
-
- if (!tizen_private) {
- TPL_LOG_T(BACKEND, "[DESTROY_CB] Already destroyed surface");
- return;
- }
-
- surf_source = (twe_wl_surf_source *)tizen_private->data;
- if (surf_source) {
- TPL_LOG_T(BACKEND, "[DESTROY_CB] wl_egl_window(%p) surf_source(%p)",
- surf_source->wl_egl_window, surf_source);
- g_mutex_lock(&surf_source->surf_mutex);
- surf_source->wl_egl_window->destroy_window_callback = NULL;
- surf_source->wl_egl_window->resize_callback = NULL;
- surf_source->wl_egl_window->driver_private = NULL;
- surf_source->wl_egl_window = NULL;
- surf_source->surf = NULL;
- surf_source->is_destroying = TPL_TRUE;
-
- tizen_private->set_window_serial_callback = NULL;
- tizen_private->rotate_callback = NULL;
- tizen_private->get_rotation_capability = NULL;
- tizen_private->data = NULL;
- free(tizen_private);
- tizen_private = NULL;
- g_mutex_unlock(&surf_source->surf_mutex);
- }
-
- if (tizen_private) {
- tizen_private->set_window_serial_callback = NULL;
- tizen_private->rotate_callback = NULL;
- tizen_private->get_rotation_capability = NULL;
- tizen_private->data = NULL;
- free(tizen_private);
- tizen_private = NULL;
- }
-}
-
-static void
-__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
-{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
-
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- twe_wl_surf_source *source = (twe_wl_surf_source *)tizen_private->data;
- int cur_w, cur_h, req_w, req_h, format;
-
- if (!source) {
- TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
- wl_egl_window);
- return;
- }
-
- format = tbm_surface_queue_get_format(source->tbm_queue);
- cur_w = tbm_surface_queue_get_width(source->tbm_queue);
- cur_h = tbm_surface_queue_get_height(source->tbm_queue);
- req_w = wl_egl_window->width;
- req_h = wl_egl_window->height;
-
- TPL_LOG_T(BACKEND, "[RESIZE_CB] wl_egl_window(%p) (%dx%d) -> (%dx%d)",
- wl_egl_window, cur_w, cur_h, req_w, req_h);
-
- if (tbm_surface_queue_reset(source->tbm_queue, req_w, req_h, format)
- != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to reset tbm_surface_queue(%p)", source->tbm_queue);
- return;
- }
-}
-
-static void
-__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
-{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
-
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- twe_wl_surf_source *source = (twe_wl_surf_source *)tizen_private->data;
- int rotation = tizen_private->rotation;
-
- if (!source) {
- TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
- wl_egl_window);
- return;
- }
-
- TPL_LOG_T(BACKEND, "[ROTATE_CB] wl_egl_window(%p) (%d) -> (%d)",
- wl_egl_window, source->rotation, rotation);
-
- source->rotation = rotation;
-
- if (source->rotate_cb)
- source->rotate_cb(source->cb_data);
-}
-
-static int
-__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
- void *private)
-{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
-
- int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- twe_wl_surf_source *source = (twe_wl_surf_source *)tizen_private->data;
-
- if (!source) {
- TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
- wl_egl_window);
- return rotation_capability;
- }
-
- if (source->rotation_capability == TPL_TRUE)
- rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
- else
- rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
-
-
- return rotation_capability;
-}
-
-static void
-__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
- void *private, unsigned int serial)
-{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
-
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- twe_wl_surf_source *source = (twe_wl_surf_source *)tizen_private->data;
-
- if (!source) {
- TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
- wl_egl_window);
- return;
- }
-
- source->set_serial_is_used = TPL_TRUE;
- source->serial = serial;
-}
-
-static int
-__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
-{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
-
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- twe_wl_surf_source *surf_source = NULL;
-
- int commit_sync_fd = -1;
-
- surf_source = (twe_wl_surf_source *)tizen_private->data;
- if (!surf_source) {
- TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source);
- return -1;
- }
-
- g_mutex_lock(&surf_source->commit_sync.mutex);
-
- if (surf_source->commit_sync.fd != -1) {
- commit_sync_fd = dup(surf_source->commit_sync.fd);
- TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
- surf_source->commit_sync.fd, commit_sync_fd);
- TPL_DEBUG("[DUP_COMMIT_SYNC] surf_source(%p) commit_sync_fd(%d) dup(%d)",
- surf_source, surf_source->commit_sync.fd, commit_sync_fd);
- g_mutex_unlock(&surf_source->commit_sync.mutex);
- return commit_sync_fd;
- }
-
- surf_source->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
- if (surf_source->commit_sync.fd == -1) {
- TPL_ERR("Failed to create commit_sync_fd. twe_surface(%p)", surf_source);
- g_mutex_unlock(&surf_source->commit_sync.mutex);
- return -1;
- }
-
- commit_sync_fd = dup(surf_source->commit_sync.fd);
-
- TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
- surf_source->commit_sync.fd, commit_sync_fd);
- TPL_DEBUG("[CREATE_COMMIT_SYNC] surf_source(%p) commit_sync_fd(%d)",
- surf_source, commit_sync_fd);
-
- g_mutex_unlock(&surf_source->commit_sync.mutex);
-
- return commit_sync_fd;
-}
-
-static int
-__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
-{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
-
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- twe_wl_surf_source *surf_source = NULL;
-
- int presentation_sync_fd = -1;
-
- surf_source = (twe_wl_surf_source *)tizen_private->data;
- if (!surf_source) {
- TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source);
- return -1;
- }
-
- g_mutex_lock(&surf_source->presentation_sync.mutex);
- if (surf_source->presentation_sync.fd != -1) {
- presentation_sync_fd = dup(surf_source->presentation_sync.fd);
- TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
- surf_source->presentation_sync.fd, presentation_sync_fd);
- TPL_DEBUG("[DUP_PRESENTATION_SYNC] surf_source(%p) presentation_sync_fd(%d) dup(%d)",
- surf_source, surf_source->presentation_sync.fd, presentation_sync_fd);
- g_mutex_unlock(&surf_source->presentation_sync.mutex);
- return presentation_sync_fd;
- }
-
- surf_source->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
- if (surf_source->presentation_sync.fd == -1) {
- TPL_ERR("Failed to create presentation_sync_fd. twe_surface(%p)", surf_source);
- g_mutex_unlock(&surf_source->presentation_sync.mutex);
- return -1;
- }
-
- presentation_sync_fd = dup(surf_source->presentation_sync.fd);
- TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
- surf_source->presentation_sync.fd, presentation_sync_fd);
- TPL_DEBUG("[CREATE_PRESENTATION_SYNC] surf_source(%p) presentation_sync_fd(%d) dup(%d)",
- surf_source, surf_source->presentation_sync.fd, presentation_sync_fd);
-
- g_mutex_unlock(&surf_source->presentation_sync.mutex);
-
- return presentation_sync_fd;
-}
-
-static void __cb_tss_flusher_flush_callback(void *data,
- struct tizen_surface_shm_flusher *tss_flusher)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
-
- TPL_LOG_T(BACKEND, "[FLUSH_CB] surf_source(%p)", surf_source);
-
- if (surf_source->disp_source->is_vulkan_dpy) {
- TPL_WARN("Vulkan do not support buffer flush");
- return;
- }
-
- tsq_err = tbm_surface_queue_flush(surf_source->tbm_queue);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to free flush tbm_queue(%p)", surf_source->tbm_queue);
- return;
- }
-}
-
-static void __cb_tss_flusher_free_flush_callback(void *data,
- struct tizen_surface_shm_flusher *tss_flusher)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
-
- TPL_LOG_T(BACKEND, "[FREE_FLUSH_CB] surf_source(%p)", surf_source);
-
- if (surf_source->disp_source->is_vulkan_dpy) {
- TPL_WARN("Vulkan do not support buffer flush");
- return;
- }
-
- tsq_err = tbm_surface_queue_free_flush(surf_source->tbm_queue);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to free flush tbm_queue(%p)", surf_source->tbm_queue);
- return;
- }
-}
-
-
-static const struct tizen_surface_shm_flusher_listener
-tss_flusher_listener = {
- __cb_tss_flusher_flush_callback,
- __cb_tss_flusher_free_flush_callback
-};
-
-void
-__cb_twe_buffer_free_callback(twe_wl_buffer_info *buf_info)
-{
- twe_wl_surf_source *surf_source = buf_info->surf_source;
- twe_wl_disp_source *disp_source = surf_source->disp_source;
-
- TPL_INFO("[BUFFER_FREE]", "buf_info(%p) wl_buffer(%p) tbm_surface(%p)",
- buf_info, buf_info->wl_buffer, buf_info->tbm_surface);
-
- wl_display_flush(disp_source->disp);
-
- if (buf_info->wl_buffer)
- wayland_tbm_client_destroy_buffer(disp_source->wl_tbm_client,
- (void *)buf_info->wl_buffer);
-
- if (buf_info->commit_sync_fd != -1) {
- int ret = _write_to_eventfd(buf_info->commit_sync_fd);
- if (ret == -1)
- TPL_ERR("Failed to send commit_sync signal to fd(%d)",
- buf_info->commit_sync_fd);
- close(buf_info->commit_sync_fd);
- buf_info->commit_sync_fd = -1;
- }
-
- if (buf_info->presentation_sync_fd != -1) {
- int ret = _write_to_eventfd(buf_info->presentation_sync_fd);
- if (ret == -1)
- TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
- buf_info->presentation_sync_fd);
- close(buf_info->presentation_sync_fd);
- buf_info->presentation_sync_fd = -1;
-
- if (buf_info->presentation_feedback)
- wp_presentation_feedback_destroy(buf_info->presentation_feedback);
- buf_info->presentation_feedback = NULL;
- }
-
- if (buf_info->sync_timeline != -1) {
- close(buf_info->sync_timeline);
- buf_info->sync_timeline = -1;
- }
-
- if (buf_info->rects) {
- free(buf_info->rects);
- buf_info->rects = NULL;
- buf_info->num_rects = 0;
- }
-
- buf_info->tbm_surface = NULL;
-
- free(buf_info);
-}
-
-static void
-__cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer)
-{
- twe_wl_buffer_info *buf_info = NULL;
- tbm_surface_h tbm_surface = (tbm_surface_h)data;
-
- if (tbm_surface_internal_is_valid(tbm_surface)) {
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
-
- if (buf_info && buf_info->need_to_release) {
- twe_wl_surf_source *surf_source = buf_info->surf_source;
- tbm_surface_queue_error_e tsq_err;
-
- if (buf_info->sync_fd == -1) {
- tsq_err = tbm_surface_queue_release(surf_source->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
- }
-
- if (surf_source->committed_buffers) {
- g_mutex_lock(&surf_source->surf_mutex);
- __tpl_list_remove_data(surf_source->committed_buffers,
- (void *)tbm_surface,
- TPL_FIRST, NULL);
- g_mutex_unlock(&surf_source->surf_mutex);
- }
-
- buf_info->need_to_release = TPL_FALSE;
-
- TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
-
- TPL_LOG_T(BACKEND, "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
- buf_info->wl_buffer, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
- tbm_surface_internal_unref(tbm_surface);
-
- }
- } else {
- TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
- }
-}
-
-static const struct wl_buffer_listener wl_buffer_release_listener = {
- (void *)__cb_buffer_release_callback,
-};
-
-static void
-__cb_buffer_fenced_release(void *data,
- struct zwp_linux_buffer_release_v1 *release, int32_t fence)
-{
- twe_wl_buffer_info *buf_info = NULL;
- tbm_surface_h tbm_surface = (tbm_surface_h)data;
-
- if (tbm_surface_internal_is_valid(tbm_surface)) {
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
-
- if (buf_info && buf_info->need_to_release) {
- twe_wl_surf_source *surf_source = buf_info->surf_source;
- tbm_surface_queue_error_e tsq_err;
-
- if (surf_source->committed_buffers) {
- g_mutex_lock(&surf_source->surf_mutex);
- __tpl_list_remove_data(surf_source->committed_buffers,
- (void *)tbm_surface,
- TPL_FIRST, NULL);
- g_mutex_unlock(&surf_source->surf_mutex);
- }
-
- buf_info->need_to_release = TPL_FALSE;
-
- zwp_linux_buffer_release_v1_destroy(buf_info->buffer_release);
- buf_info->buffer_release = NULL;
- buf_info->release_fence_fd = fence;
-
- TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
- _get_tbm_surface_bo_name(tbm_surface),
- fence);
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
-
- TPL_LOG_T(BACKEND,
- "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
- buf_info->wl_buffer, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface),
- fence);
-
- tsq_err = tbm_surface_queue_release(surf_source->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
-
- tbm_surface_internal_unref(tbm_surface);
- }
- } else {
- TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
- }
-}
-
-static void
-__cb_buffer_immediate_release(void *data,
- struct zwp_linux_buffer_release_v1 *release)
-{
- twe_wl_buffer_info *buf_info = NULL;
- tbm_surface_h tbm_surface = (tbm_surface_h)data;
-
- if (tbm_surface_internal_is_valid(tbm_surface)) {
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
-
- if (buf_info && buf_info->need_to_release) {
- twe_wl_surf_source *surf_source = buf_info->surf_source;
- tbm_surface_queue_error_e tsq_err;
-
- if (surf_source->committed_buffers) {
- g_mutex_lock(&surf_source->surf_mutex);
- __tpl_list_remove_data(surf_source->committed_buffers,
- (void *)tbm_surface,
- TPL_FIRST, NULL);
- g_mutex_unlock(&surf_source->surf_mutex);
- }
-
- buf_info->need_to_release = TPL_FALSE;
-
- zwp_linux_buffer_release_v1_destroy(buf_info->buffer_release);
- buf_info->buffer_release = NULL;
- buf_info->release_fence_fd = -1;
-
- TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
-
- TPL_LOG_T(BACKEND,
- "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)",
- buf_info->wl_buffer, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
-
- tsq_err = tbm_surface_queue_release(surf_source->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
-
- tbm_surface_internal_unref(tbm_surface);
- }
- } else {
- TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
- }
-}
-
-static const struct zwp_linux_buffer_release_v1_listener explicit_sync_release_listner = {
- __cb_buffer_fenced_release,
- __cb_buffer_immediate_release,
-};
-
-static void
-_twe_surface_set_wl_buffer_info(twe_wl_surf_source *surf_source,
- tbm_surface_h tbm_surface)
-{
- twe_wl_buffer_info *buf_info = NULL;
- struct wl_egl_window *wl_egl_window = NULL;
- struct tizen_private *tizen_private = NULL;
-
- if (!surf_source || g_source_is_destroyed(&surf_source->gsource)) {
- TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source);
- return;
- }
-
- wl_egl_window = surf_source->wl_egl_window;
- tizen_private = _get_tizen_private(wl_egl_window);
-
- if (!tbm_surface || !tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface);
- return;
- }
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- /* If buf_info is already existed, reuse it. */
- if (buf_info) {
- if (tizen_private) {
- if (buf_info->w_transform != tizen_private->window_transform) {
- buf_info->w_transform = tizen_private->window_transform;
- buf_info->w_rotated = TPL_TRUE;
- }
-
- buf_info->transform = tizen_private->transform;
- buf_info->dx = wl_egl_window->dx;
- buf_info->dy = wl_egl_window->dy;
- if (surf_source->set_serial_is_used) {
- buf_info->serial = surf_source->serial;
- } else {
- ++tizen_private->serial;
- buf_info->serial = tizen_private->serial;
- }
- }
-
- if (buf_info->rects) {
- free(buf_info->rects);
- buf_info->rects = NULL;
- buf_info->num_rects = 0;
- }
-
- buf_info->draw_done = TPL_FALSE;
- buf_info->need_to_commit = TPL_TRUE;
- buf_info->sync_fd = -1;
- buf_info->acquire_fence_fd = -1;
- buf_info->commit_sync_fd = -1;
-
- buf_info->presentation_sync_fd = -1;
- buf_info->presentation_feedback = NULL;
-
- if (surf_source->in_use_buffers) {
- g_mutex_lock(&surf_source->surf_mutex);
- __tpl_list_push_back(surf_source->in_use_buffers,
- (void *)tbm_surface);
- g_mutex_unlock(&surf_source->surf_mutex);
- }
-
- TRACE_MARK("[SET_BUFFER_INFO] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
-
- TPL_LOG_T(BACKEND,
- "[REUSE_BUF] buf_info(%p) tbm_surface(%p) bo(%d) (%dx%d) "
- "transform(%d) w_transform(%d)",
- buf_info, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface),
- buf_info->width, buf_info->height,
- buf_info->transform, buf_info->w_transform);
- return;
- }
-
- buf_info = (twe_wl_buffer_info *)calloc(1, sizeof(twe_wl_buffer_info));
- if (!buf_info) {
- TPL_ERR("Failed to allocate memory for twe_wl_buffer_info.");
- return;
- }
-
- buf_info->wl_buffer =
- (struct wl_proxy *)wayland_tbm_client_create_buffer(
- surf_source->disp_source->wl_tbm_client, tbm_surface);
-
- if (!buf_info->wl_buffer) {
- TPL_ERR("Failed to create wl_buffer from tbm_surface(%p)",
- tbm_surface);
- free(buf_info);
- return;
- }
-
- if (wl_egl_window && tizen_private) {
- buf_info->dx = wl_egl_window->dx;
- buf_info->dy = wl_egl_window->dy;
- buf_info->width = wl_egl_window->width;
- buf_info->height = wl_egl_window->height;
-
- if (buf_info->w_transform != tizen_private->window_transform) {
- buf_info->w_transform = tizen_private->window_transform;
- buf_info->w_rotated = TPL_TRUE;
- }
-
- buf_info->transform = tizen_private->transform;
-
- if (surf_source->set_serial_is_used) {
- buf_info->serial = surf_source->serial;
- } else {
- ++tizen_private->serial;
- buf_info->serial = tizen_private->serial;
- }
-
- if (surf_source->in_use_buffers) {
- g_mutex_lock(&surf_source->surf_mutex);
- __tpl_list_push_back(surf_source->in_use_buffers,
- (void *)tbm_surface);
- g_mutex_unlock(&surf_source->surf_mutex);
- }
- } else {
- buf_info->dx = 0;
- buf_info->dy = 0;
- buf_info->width = surf_source->swapchain_properties.width;
- buf_info->height = surf_source->swapchain_properties.height;
- buf_info->w_transform = 0;
- buf_info->w_rotated = TPL_FALSE;
- buf_info->transform = 0;
- buf_info->serial = 0;
- }
-
- buf_info->sync_timestamp = 0;
- buf_info->surf_source = surf_source;
- buf_info->num_rects = 0;
- buf_info->rects = NULL;
- buf_info->need_to_commit = TPL_TRUE;
- buf_info->draw_done = TPL_FALSE;
- buf_info->tbm_surface = tbm_surface;
- buf_info->sync_fd = -1;
- buf_info->sync_timeline = -1;
- buf_info->is_vk_image = surf_source->disp_source->is_vulkan_dpy;
- buf_info->release_fence_fd = -1;
- buf_info->acquire_fence_fd = -1;
- buf_info->commit_sync_fd = -1;
- buf_info->presentation_sync_fd = -1;
- buf_info->presentation_feedback = NULL;
-
-
- wl_buffer_add_listener((void *)buf_info->wl_buffer,
- &wl_buffer_release_listener, tbm_surface);
-
- if (buf_info->is_vk_image) {
- buf_info->sync_timeline = tbm_sync_timeline_create();
- if (buf_info->sync_timeline == -1) {
- char buf[1024];
- strerror_r(errno, buf, sizeof(buf));
- TPL_WARN("Failed to create TBM sync timeline: %d(%s)", errno, buf);
- }
-
- wayland_tbm_client_set_sync_timeline(surf_source->disp_source->wl_tbm_client,
- (void *)buf_info->wl_buffer,
- buf_info->sync_timeline);
- }
-
- tbm_surface_internal_add_user_data(tbm_surface, KEY_BUFFER_INFO,
- (tbm_data_free)__cb_twe_buffer_free_callback);
- tbm_surface_internal_set_user_data(tbm_surface, KEY_BUFFER_INFO,
- buf_info);
-
- TRACE_MARK("[SET_BUFFER_INFO] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- TPL_INFO("[NEW_BUFFER_CREATED]",
- "buf_info(%p) tbm_surface(%p) bo(%d) (%dx%d) transform(%d) w_transform(%d)",
- buf_info, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface),
- buf_info->width, buf_info->height,
- buf_info->transform, buf_info->w_transform);
-}
-
-static void
-_twe_surface_cancel_dequeued_buffer(twe_wl_surf_source *surf_source,
- tbm_surface_h tbm_surface)
-{
- if (!surf_source) {
- TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source);
- return;
- }
-
- TPL_LOG_T(BACKEND,
- "[CANCEL_BUFFER] Stop tracking of canceled tbm_surface(%p)",
- tbm_surface);
-
- if (surf_source->in_use_buffers) {
- g_mutex_lock(&surf_source->surf_mutex);
- /* Stop tracking of this canceled tbm_surface */
- __tpl_list_remove_data(surf_source->in_use_buffers,
- (void *)tbm_surface, TPL_FIRST, NULL);
- g_mutex_unlock(&surf_source->surf_mutex);
- }
-}
-
-static void
-_twe_surface_trace_enqueue_buffer(twe_wl_surf_source *surf_source,
- tbm_surface_h tbm_surface)
-{
- twe_wl_buffer_info *buf_info = NULL;
-
- if (!surf_source) {
- TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source);
- return;
- }
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (buf_info) {
- g_mutex_lock(&surf_source->commit_sync.mutex);
- buf_info->commit_sync_fd = surf_source->commit_sync.fd;
- surf_source->commit_sync.fd = -1;
- TRACE_ASYNC_BEGIN(buf_info->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- g_mutex_unlock(&surf_source->commit_sync.mutex);
-
- g_mutex_lock(&surf_source->presentation_sync.mutex);
- buf_info->presentation_sync_fd = surf_source->presentation_sync.fd;
- surf_source->presentation_sync.fd = -1;
- g_mutex_unlock(&surf_source->presentation_sync.mutex);
- }
-
- if (surf_source->in_use_buffers) {
- g_mutex_lock(&surf_source->surf_mutex);
- /* Stop tracking of this canceled tbm_surface */
- __tpl_list_remove_data(surf_source->in_use_buffers,
- (void *)tbm_surface, TPL_FIRST, NULL);
- g_mutex_unlock(&surf_source->surf_mutex);
- }
-}
-
-static void
-__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
- void *data)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
-
- if (!surf_source || g_source_is_destroyed(&surf_source->gsource)) {
- TPL_ERR("Invalid parameter. surf_source(%p)", surf_source);
- return;
- }
-
- surf_source->swapchain_properties.width =
- tbm_surface_queue_get_width(tbm_queue);
- surf_source->swapchain_properties.height =
- tbm_surface_queue_get_height(tbm_queue);
- surf_source->swapchain_properties.buffer_count =
- tbm_surface_queue_get_size(tbm_queue);
- surf_source->format = tbm_surface_queue_get_format(tbm_queue);
-
- g_mutex_lock(&surf_source->free_queue_mutex);
- g_cond_signal(&surf_source->free_queue_cond);
- g_mutex_unlock(&surf_source->free_queue_mutex);
-
- TPL_LOG_T(BACKEND, "tbm_queue(%p) has been reset!", tbm_queue);
-}
-
-static void
-__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h surface_queue,
- void *data)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
- uint64_t value = 1;
- int ret;
-
- g_mutex_lock(&surf_source->surf_mutex);
-
- ret = write(surf_source->event_fd, &value, sizeof(uint64_t));
- if (ret == -1) {
- TPL_ERR("failed to send acquirable event. twe_wl_surf_source(%p)",
- surf_source);
- g_mutex_unlock(&surf_source->surf_mutex);
- return;
- }
-
- g_mutex_unlock(&surf_source->surf_mutex);
-}
-
-static void __cb_tbm_queue_trace_callback(tbm_surface_queue_h tbm_queue,
- tbm_surface_h tbm_surface,
- tbm_surface_queue_trace trace,
- void *data)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
-
- switch (trace) {
- case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
- _twe_surface_set_wl_buffer_info(surf_source, tbm_surface);
- break;
- case TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE:
- _twe_surface_cancel_dequeued_buffer(surf_source, tbm_surface);
- break;
- case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
- _twe_surface_trace_enqueue_buffer(surf_source, tbm_surface);
- break;
- default:
- break;
- }
-}
-
-static void __cb_tbm_queue_dequeueable_callback(tbm_surface_queue_h tbm_queue,
- void *data)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
-
- if (!surf_source || g_source_is_destroyed(&surf_source->gsource)) {
- TPL_ERR("Invalid parameter. surf_source(%p)", surf_source);
- return;
- }
-
- g_mutex_lock(&surf_source->free_queue_mutex);
-
- TPL_LOG_T(BACKEND, "[DEQUEUEABLE_CB] surf_source(%p) tbm_queue(%p)",
- surf_source, surf_source->tbm_queue);
-
- g_cond_signal(&surf_source->free_queue_cond);
- g_mutex_unlock(&surf_source->free_queue_mutex);
-}
-
-static void
-_twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source,
- tbm_surface_h tbm_surface);
-static void
-__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
- unsigned int sequence, unsigned int tv_sec,
- unsigned int tv_usec, void *user_data)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)user_data;
- twe_wl_disp_source *disp_source = NULL;
-
- if (!surf_source) {
- TPL_ERR("Invalid parameter. user_data(%p)", user_data);
- return;
- }
-
- if (g_source_is_destroyed(&surf_source->gsource)) {
- TPL_WARN("surf_source already destroyed.");
- return;
- }
-
- TRACE_ASYNC_END((int)surf_source, "WAIT_VBLANK");
-
- if (error == TDM_ERROR_TIMEOUT)
- TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. surf_source(%p)",
- surf_source);
-
- disp_source = surf_source->disp_source;
-
- surf_source->vblank_done = TPL_TRUE;
-
- g_mutex_lock(&surf_source->surf_mutex);
- if (!disp_source->is_vulkan_dpy) {
- if (surf_source->vblank_waiting_buffers) {
- tbm_surface_h tbm_surface = NULL;
- tbm_surface = (tbm_surface_h)__tpl_list_pop_front(
- surf_source->vblank_waiting_buffers,
- NULL);
- if (tbm_surface)
- _twe_thread_wl_surface_commit(surf_source, tbm_surface);
- }
- } else {
- switch (surf_source->swapchain_properties.present_mode) {
- case TPL_DISPLAY_PRESENT_MODE_MAILBOX:
- if (surf_source->draw_done_buffer) {
- _twe_thread_wl_vk_surface_commit(surf_source,
- surf_source->draw_done_buffer);
- surf_source->draw_done_buffer = NULL;
- }
- break;
-
- case TPL_DISPLAY_PRESENT_MODE_FIFO:
- case TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED:
- if (surf_source->vblank_waiting_buffers) {
- tbm_surface_h tbm_surface = NULL;
- tbm_surface = (tbm_surface_h)__tpl_list_pop_front(
- surf_source->vblank_waiting_buffers,
- NULL);
- if (tbm_surface)
- _twe_thread_wl_vk_surface_commit(surf_source, tbm_surface);
- }
-
- break;
- }
- }
- g_mutex_unlock(&surf_source->surf_mutex);
-}
-
-static tdm_client_vblank*
-_twe_surface_create_vblank(tdm_client *tdm_client);
-
-static tpl_result_t
-_twe_surface_wait_vblank(twe_wl_surf_source *surf_source)
-{
- tdm_error tdm_err = TDM_ERROR_NONE;
-
- if (!_twe_ctx->tdm_source) {
- TPL_WARN("tdm_vblank feature is disabled.");
-
- if (surf_source->vblank) {
- tdm_client_vblank_destroy(surf_source->vblank);
- surf_source->vblank = NULL;
- surf_source->vblank_done = TPL_TRUE;
- }
-
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- if (!surf_source->vblank) {
- surf_source->vblank =
- _twe_surface_create_vblank(_twe_ctx->tdm_source->tdm_client);
- if (!surf_source->vblank) {
- TPL_WARN("Failed to create vblank. surf_source(%p)",
- surf_source);
- return TPL_ERROR_OUT_OF_MEMORY;
- }
- }
-
- tdm_err = tdm_client_vblank_wait(surf_source->vblank,
- surf_source->post_interval, /* TODO: interval */
- __cb_tdm_client_vblank,
- (void *)surf_source);
-
- if (tdm_err == TDM_ERROR_NONE) {
- surf_source->vblank_done = TPL_FALSE;
- TRACE_ASYNC_BEGIN((int)surf_source, "WAIT_VBLANK");
- } else {
- TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- return TPL_ERROR_NONE;
-}
-
-static void
-_twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source,
- tbm_surface_h tbm_surface)
-{
- twe_wl_buffer_info *buf_info = NULL;
- struct wl_surface *wl_surface = surf_source->surf;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- uint32_t version;
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (!buf_info) {
- TPL_ERR("Failed to get wl_buffer_info from tbm_surface(%p)",
- tbm_surface);
- return;
- }
-
- version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
- wl_surface_attach(wl_surface, (void *)buf_info->wl_buffer,
- 0, 0);
-
- if (buf_info->num_rects < 1 || buf_info->rects == NULL) {
- if (version < 4) {
- wl_surface_damage(wl_surface, 0, 0,
- surf_source->swapchain_properties.width,
- surf_source->swapchain_properties.height);
- } else {
- wl_surface_damage_buffer(wl_surface, 0, 0,
- surf_source->swapchain_properties.width,
- surf_source->swapchain_properties.height);
- }
- } else {
- int i;
- for (i = 0; i < buf_info->num_rects; i++) {
- if (version < 4) {
- wl_surface_damage(wl_surface,
- buf_info->rects[i * 4 + 0],
- buf_info->rects[i * 4 + 1],
- buf_info->rects[i * 4 + 2],
- buf_info->rects[i * 4 + 3]);
- } else {
- wl_surface_damage_buffer(wl_surface,
- buf_info->rects[i * 4 + 0],
- buf_info->rects[i * 4 + 1],
- buf_info->rects[i * 4 + 2],
- buf_info->rects[i * 4 + 3]);
- }
- }
- }
-
- /* Dependent on wl_buffer release event. */
- buf_info->need_to_release = TPL_TRUE;
-
- wl_surface_commit(wl_surface);
-
- wl_display_flush(surf_source->disp_source->disp);
-
- TRACE_MARK("[COMMIT] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
- TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
-
- buf_info->sync_timestamp++;
-
- TPL_LOG_T(BACKEND, "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)",
- buf_info->wl_buffer, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
-
- if (surf_source->swapchain_properties.present_mode
- == TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED ||
- surf_source->swapchain_properties.present_mode
- == TPL_DISPLAY_PRESENT_MODE_FIFO) {
- if ((_twe_ctx->tdm_source || surf_source->vblank) &&
- _twe_surface_wait_vblank(surf_source) != TPL_ERROR_NONE)
- TPL_ERR("Failed to set wait vblank");
- }
-
- if (surf_source->committed_buffers) {
- __tpl_list_push_back(surf_source->committed_buffers, tbm_surface);
- }
-
- /* Presented buffer's sync operating dependent on tdm timeline fence. */
- if (buf_info->sync_fd != -1) {
- TPL_LOG_T(BACKEND, "[RELEASE_IMMEDIATELY] tbm_surface(%p) bo(%d) sync_fd(%d)",
- tbm_surface, _get_tbm_surface_bo_name(tbm_surface),
- buf_info->sync_fd);
- TRACE_MARK("[RELEASE_IMMEDIATELY] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("Failed to release tbm_surface(%p) when vk_surface_commit.",
- tbm_surface);
- }
-}
-
-static void
-__cb_presentation_feedback_sync_output(void *data,
- struct wp_presentation_feedback *presentation_feedback,
- struct wl_output *output)
-{
- TPL_IGNORE(data);
- TPL_IGNORE(presentation_feedback);
- TPL_IGNORE(output);
-}
-
-static void
-__cb_presentation_feedback_presented(void *data,
- struct wp_presentation_feedback *presentation_feedback,
- uint32_t tv_sec_hi,
- uint32_t tv_sec_lo,
- uint32_t tv_nsec,
- uint32_t refresh_nsec,
- uint32_t seq_hi,
- uint32_t seq_lo,
- uint32_t flags)
-{
- TPL_IGNORE(tv_sec_hi);
- TPL_IGNORE(tv_sec_lo);
- TPL_IGNORE(tv_nsec);
- TPL_IGNORE(refresh_nsec);
- TPL_IGNORE(seq_hi);
- TPL_IGNORE(seq_lo);
- TPL_IGNORE(flags);
-
- tbm_surface_h tbm_surface = (tbm_surface_h)data;
- twe_wl_buffer_info *buf_info = NULL;
- twe_wl_surf_source *surf_source = NULL;
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (!buf_info) {
- TPL_ERR("Failed to get twe_wl_buffer_info from tbm_surface(%p)",
- tbm_surface);
- return;
- }
-
- surf_source = buf_info->surf_source;
-
- g_mutex_lock(&surf_source->presentation_sync.mutex);
-
- TPL_DEBUG("[FEEDBACK][PRESENTED] surf_source(%p) tbm_surface(%p) bo(%d)",
- surf_source, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
-
- if (buf_info->presentation_sync_fd != -1) {
- int ret = _write_to_eventfd(buf_info->presentation_sync_fd);
- if (ret == -1) {
- TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
- buf_info->presentation_sync_fd);
- }
-
- TRACE_ASYNC_END(buf_info->presentation_sync_fd,
- "[PRESENTATION_SYNC] bo(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
-
- close(buf_info->presentation_sync_fd);
- buf_info->presentation_sync_fd = -1;
- }
-
- if (buf_info->presentation_feedback)
- wp_presentation_feedback_destroy(buf_info->presentation_feedback);
-
- buf_info->presentation_feedback = NULL;
-
- __tpl_list_remove_data(surf_source->presentation_feedbacks, tbm_surface,
- TPL_FIRST, NULL);
-
- g_mutex_unlock(&surf_source->presentation_sync.mutex);
-}
-
-static void
-__cb_presentation_feedback_discarded(void *data,
- struct wp_presentation_feedback *presentation_feedback)
-{
- tbm_surface_h tbm_surface = (tbm_surface_h)data;
- twe_wl_buffer_info *buf_info = NULL;
- twe_wl_surf_source *surf_source = NULL;
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (!buf_info) {
- TPL_ERR("Failed to get twe_wl_buffer_info from tbm_surface(%p)",
- tbm_surface);
- return;
- }
-
- surf_source = buf_info->surf_source;
-
- g_mutex_lock(&surf_source->presentation_sync.mutex);
-
- TPL_DEBUG("[FEEDBACK][DISCARDED] surf_source(%p) tbm_surface(%p) bo(%d)",
- surf_source, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
-
- if (buf_info->presentation_sync_fd != -1) {
- int ret = _write_to_eventfd(buf_info->presentation_sync_fd);
- if (ret == -1) {
- TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
- buf_info->presentation_sync_fd);
- }
-
- TRACE_ASYNC_END(buf_info->presentation_sync_fd,
- "[PRESENTATION_SYNC] bo(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
-
- close(buf_info->presentation_sync_fd);
- buf_info->presentation_sync_fd = -1;
- }
-
- if (buf_info->presentation_feedback)
- wp_presentation_feedback_destroy(buf_info->presentation_feedback);
-
- buf_info->presentation_feedback = NULL;
-
- __tpl_list_remove_data(surf_source->presentation_feedbacks, tbm_surface,
- TPL_FIRST, NULL);
-
- g_mutex_unlock(&surf_source->presentation_sync.mutex);
-}
-
-static const struct wp_presentation_feedback_listener feedback_listener = {
- __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
- __cb_presentation_feedback_presented,
- __cb_presentation_feedback_discarded
-};
-
-static void
-_twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source,
- tbm_surface_h tbm_surface)
-{
- twe_wl_buffer_info *buf_info = NULL;
- twe_wl_disp_source *disp_source = surf_source->disp_source;
- struct wl_surface *wl_surface = surf_source->surf;
- struct wl_egl_window *wl_egl_window = surf_source->wl_egl_window;
- uint32_t version;
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (!buf_info) {
- TPL_ERR("Failed to get twe_wl_buffer_info from tbm_surface(%p)",
- tbm_surface);
- return;
- }
-
- version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
-
- g_mutex_lock(&surf_source->presentation_sync.mutex);
- if (disp_source->presentation && buf_info->presentation_sync_fd != -1) {
- buf_info->presentation_feedback =
- wp_presentation_feedback(disp_source->presentation,
- wl_surface);
- wp_presentation_feedback_add_listener(buf_info->presentation_feedback,
- &feedback_listener, tbm_surface);
- __tpl_list_push_back(surf_source->presentation_feedbacks, tbm_surface);
- TRACE_ASYNC_BEGIN(buf_info->presentation_sync_fd,
- "[PRESENTATION_SYNC] bo(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- }
- g_mutex_unlock(&surf_source->presentation_sync.mutex);
-
- if (buf_info->w_rotated == TPL_TRUE) {
- wayland_tbm_client_set_buffer_transform(
- disp_source->wl_tbm_client,
- (void *)buf_info->wl_buffer,
- buf_info->w_transform);
- buf_info->w_rotated = TPL_FALSE;
- }
-
- if (surf_source->latest_transform != buf_info->transform) {
- surf_source->latest_transform = buf_info->transform;
- wl_surface_set_buffer_transform(wl_surface, buf_info->transform);
- }
-
- if (wl_egl_window) {
- wl_egl_window->attached_width = buf_info->width;
- wl_egl_window->attached_height = buf_info->height;
- }
-
- wl_surface_attach(wl_surface, (void *)buf_info->wl_buffer,
- buf_info->dx, buf_info->dy);
-
- if (buf_info->num_rects < 1 || buf_info->rects == NULL) {
- if (version < 4) {
- wl_surface_damage(wl_surface,
- buf_info->dx, buf_info->dy,
- buf_info->width, buf_info->height);
- } else {
- wl_surface_damage_buffer(wl_surface,
- 0, 0,
- buf_info->width, buf_info->height);
- }
- } else {
- int i;
- for (i = 0; i < buf_info->num_rects; i++) {
- int inverted_y =
- buf_info->height - (buf_info->rects[i * 4 + 1] +
- buf_info->rects[i * 4 + 3]);
- if (version < 4) {
- wl_surface_damage(wl_surface,
- buf_info->rects[i * 4 + 0],
- inverted_y,
- buf_info->rects[i * 4 + 2],
- buf_info->rects[i * 4 + 3]);
- } else {
- wl_surface_damage_buffer(wl_surface,
- buf_info->rects[i * 4 + 0],
- inverted_y,
- buf_info->rects[i * 4 + 2],
- buf_info->rects[i * 4 + 3]);
- }
- }
- }
- wayland_tbm_client_set_buffer_serial(disp_source->wl_tbm_client,
- (void *)buf_info->wl_buffer,
- buf_info->serial);
-
- buf_info->need_to_release = TPL_TRUE;
-
- if (surf_source->disp_source->use_explicit_sync &&
- surf_source->use_surface_sync) {
-
- zwp_linux_surface_synchronization_v1_set_acquire_fence(surf_source->surface_sync,
- buf_info->acquire_fence_fd);
- TPL_DEBUG("[SET_ACQUIRE_FENCE] surf_source(%p) tbm_surface(%p) acquire_fence(%d)",
- surf_source, tbm_surface, buf_info->acquire_fence_fd);
- close(buf_info->acquire_fence_fd);
- buf_info->acquire_fence_fd = -1;
-
- buf_info->buffer_release =
- zwp_linux_surface_synchronization_v1_get_release(surf_source->surface_sync);
- if (!buf_info->buffer_release) {
- TPL_ERR("Failed to get buffer_release. twe_surface(%p)", surf_source);
- } else {
- zwp_linux_buffer_release_v1_add_listener(
- buf_info->buffer_release, &explicit_sync_release_listner, tbm_surface);
- TPL_DEBUG("add explicit_sync_release_listener.");
- }
- }
-
- wl_surface_commit(wl_surface);
-
- wl_display_flush(surf_source->disp_source->disp);
-
- TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
-
- buf_info->need_to_commit = TPL_FALSE;
-
- TPL_LOG_T(BACKEND, "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)",
- buf_info->wl_buffer, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
-
- if ((_twe_ctx->tdm_source || surf_source->vblank) &&
- _twe_surface_wait_vblank(surf_source) != TPL_ERROR_NONE)
- TPL_ERR("Failed to set wait vblank.");
-
-
- if (surf_source->committed_buffers) {
- __tpl_list_push_back(surf_source->committed_buffers, tbm_surface);
- }
-
- g_mutex_lock(&surf_source->commit_sync.mutex);
-
- if (buf_info->commit_sync_fd != -1) {
- int ret = _write_to_eventfd(buf_info->commit_sync_fd);
- if (ret == -1) {
- TPL_ERR("Failed to send commit_sync signal to fd(%d)", buf_info->commit_sync_fd);
- }
-
- TRACE_ASYNC_END(buf_info->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- TPL_DEBUG("[COMMIT_SYNC][SEND] surf_source(%p) commit_sync_fd(%d)",
- surf_source, buf_info->commit_sync_fd);
-
- close(buf_info->commit_sync_fd);
- buf_info->commit_sync_fd = -1;
- }
-
- g_mutex_unlock(&surf_source->commit_sync.mutex);
-}
-
-/* The following function _twe_thread_wl_surface_acquire_and_commit can be
- * called in both situations.
- * One is when acquirable event is received from the main thread,
- * and the other is when __cb_tdm_client_vblank callback is called.
- * The reason for calling the next function in the two situations described
- * above is to make only one commit for one vblank.
- */
-static void
-_twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source)
-{
- twe_wl_disp_source *disp_source = surf_source->disp_source;
- tbm_surface_h tbm_surface = NULL;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- twe_wl_buffer_info *buf_info = NULL;
-
- if (surf_source->is_destroying) {
- TPL_WARN("surf_source(%p) native window is already destroyed.",
- surf_source);
- return;
- }
-
- /* If there are multiple buffers in the drity_queue of tbm_queue
- * as render done state, this function should decide whether
- * to commit or pending, depending on whether vblank_done
- * after acquire as much as possible. */
- while (tbm_surface_queue_can_acquire(surf_source->tbm_queue, 0)) {
-
- /* queue_acquire should be performed only when render_done_cnt
- * is greater than 0 when using sync_fence even in the case of
- * queue_can_acquire. */
- if (surf_source->use_sync_fence && !(surf_source->render_done_cnt > 0)) {
- return;
- }
-
- tsq_err = tbm_surface_queue_acquire(surf_source->tbm_queue, &tbm_surface);
- if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to acquire from tbm_queue(%p)",
- surf_source->tbm_queue);
- return;
- }
-
- surf_source->render_done_cnt--;
-
- tbm_surface_internal_ref(tbm_surface);
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
-
- if (!disp_source->is_vulkan_dpy) { /* wayland_egl */
- if (surf_source->vblank_done) {
- TPL_LOG_T(BACKEND, "[ACQ] tbm_surface(%p) bo(%d)",
- tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
-
- _twe_thread_wl_surface_commit(surf_source, tbm_surface);
- } else {
- /* If the current surface needs to wait for vblank
- * to commit after acquire, keep the acquired buffer
- * in the vblank_waiting_buffers list. */
- if (surf_source->vblank_waiting_buffers) {
- __tpl_list_push_back(surf_source->vblank_waiting_buffers,
- (void *)tbm_surface);
- TPL_LOG_T(BACKEND,
- "[ACQ][COMMIT_PENDING] tbm_surface(%p) bo(%d)",
- tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
- } else {
- _twe_thread_wl_surface_commit(surf_source, tbm_surface);
- }
- }
-
- } else { /* wayland_vulkan */
- TPL_LOG_T(BACKEND, "[ACQ] tbm_surface(%p) bo(%d)",
- tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
-
- switch (surf_source->swapchain_properties.present_mode) {
- case TPL_DISPLAY_PRESENT_MODE_IMMEDIATE:
- _twe_thread_wl_vk_surface_commit(surf_source, tbm_surface);
- break;
-
- case TPL_DISPLAY_PRESENT_MODE_MAILBOX:
- if (surf_source->draw_done_buffer) {
- TPL_LOG_T(BACKEND, "[SKIP] tbm_surface(%p) bo(%d)",
- tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
- tbm_surface_internal_unref(surf_source->draw_done_buffer);
- tbm_surface_queue_release(surf_source->tbm_queue,
- surf_source->draw_done_buffer);
- }
-
- surf_source->draw_done_buffer = tbm_surface;
-
- if (surf_source->vblank_done) {
- if ((_twe_ctx->tdm_source || surf_source->vblank) &&
- _twe_surface_wait_vblank(surf_source) != TPL_ERROR_NONE)
- TPL_ERR("Failed to set wait vblank");
- }
- break;
-
- case TPL_DISPLAY_PRESENT_MODE_FIFO:
- case TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED:
- if (surf_source->vblank_done) {
- _twe_thread_wl_vk_surface_commit(surf_source, tbm_surface);
- } else {
- if (surf_source->vblank_waiting_buffers) {
- __tpl_list_push_back(surf_source->vblank_waiting_buffers,
- (void *)tbm_surface);
- } else {
- TPL_ERR("Invalid list. vblank_waiting_buffers is NULL.");
- }
- }
- break;
- }
- }
- }
-}
-
-static gboolean
-_twe_thread_wl_surface_dispatch(GSource *source, GSourceFunc cb, gpointer data)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)source;
- tpl_result_t res = TPL_ERROR_NONE;
- GIOCondition cond;
-
- g_mutex_lock(&surf_source->surf_mutex);
-
- cond = g_source_query_unix_fd(source, surf_source->tag);
-
- if (cond & G_IO_IN) {
- ssize_t s;
- uint64_t u;
-
- s = read(surf_source->event_fd, &u, sizeof(uint64_t));
- if (s != sizeof(uint64_t)) {
- TPL_ERR("Failed to read from event_fd(%d)",
- surf_source->event_fd);
- res = TPL_ERROR_INVALID_CONNECTION;
- }
-
- if (surf_source->use_sync_fence &&
- surf_source->render_done_fences) {
-
- while (__tpl_list_get_count(surf_source->render_done_fences)) {
- struct sync_info *sync = __tpl_list_pop_front(surf_source->render_done_fences,
- NULL);
- if (sync) {
- res = _twe_thread_fence_wait_source_attach(surf_source,
- sync->tbm_surface,
- sync->sync_fd);
- if (res != TPL_ERROR_NONE) {
- TPL_ERR("Failed to attach source with fence_fd(%d) result(%d)",
- sync->sync_fd, res);
- surf_source->use_sync_fence = TPL_FALSE;
- }
-
- sync->sync_fd = -1;
- sync->tbm_surface = NULL;
- free(sync);
- }
- }
- } else {
- _twe_thread_wl_surface_acquire_and_commit(surf_source);
- }
- }
-
- if (cond && !(cond & G_IO_IN)) {
- TPL_ERR("eventfd(%d) cannot wake up with other condition. cond(%d)",
- surf_source->event_fd, cond);
- res = TPL_ERROR_INVALID_CONNECTION;
- }
-
- if (res != TPL_ERROR_NONE) {
- g_source_remove_unix_fd(source, surf_source->tag);
- close(surf_source->event_fd);
-
- TPL_WARN("event_fd of surf_source(%p) has been closed. it will be recreated.",
- surf_source);
- surf_source->event_fd = eventfd(0, EFD_CLOEXEC);
- if (surf_source->event_fd < 0) {
- TPL_ERR("Failed to create eventfd. errno(%d)", errno);
- } else {
- surf_source->tag = g_source_add_unix_fd(&surf_source->gsource,
- surf_source->event_fd,
- G_IO_IN);
- }
- TPL_DEBUG("[RECREATED] eventfd(%d) tag(%p)", surf_source->event_fd, surf_source->tag);
- }
-
- g_mutex_unlock(&surf_source->surf_mutex);
-
- return G_SOURCE_CONTINUE;
-}
-
-static void
-_twe_thread_wl_surface_finalize(GSource *source)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)source;
-
- TPL_LOG_T(BACKEND, "gsource(%p) event_fd(%d)",
- source, surf_source->event_fd);
-
- close(surf_source->event_fd);
- surf_source->event_fd = -1;
-
- return;
-}
-
-static GSourceFuncs _twe_wl_surface_funcs = {
- .prepare = NULL,
- .check = NULL,
- .dispatch = _twe_thread_wl_surface_dispatch,
- .finalize = _twe_thread_wl_surface_finalize,
-};
-
-static void
-_twe_surface_buffer_flusher_init(twe_wl_surf_source *surf_source)
-{
- twe_wl_disp_source *disp_source = surf_source->disp_source;
-
- if (!disp_source->tss)
- return;
-
- surf_source->tss_flusher =
- tizen_surface_shm_get_flusher(disp_source->tss, surf_source->surf);
-
- tizen_surface_shm_flusher_add_listener(surf_source->tss_flusher,
- &tss_flusher_listener,
- surf_source);
- TPL_LOG_T(BACKEND,
- "tss_flusher init. surf_source(%p) tss_flusher(%p)",
- surf_source, surf_source->tss_flusher);
-}
-
-static void
-_twe_surface_buffer_flusher_fini(twe_wl_surf_source *surf_source)
-{
- if (surf_source->tss_flusher) {
- TPL_LOG_T(BACKEND,
- "tss_flusher fini. surf_source(%p) tss_flusher(%p)",
- surf_source, surf_source->tss_flusher);
- tizen_surface_shm_flusher_destroy(surf_source->tss_flusher);
- surf_source->tss_flusher = NULL;
- }
-}
-
-static tdm_client_vblank*
-_twe_surface_create_vblank(tdm_client *tdm_client)
-{
- tdm_client_vblank *vblank = NULL;
- tdm_client_output *tdm_output = NULL;
- tdm_error tdm_err = TDM_ERROR_NONE;
-
- if (!tdm_client) {
- TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
- return NULL;
- }
-
- tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
- if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
- TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
- return NULL;
- }
-
- vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
- if (!vblank || tdm_err != TDM_ERROR_NONE) {
- TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
- return NULL;
- }
-
- tdm_client_vblank_set_enable_fake(vblank, 1);
- tdm_client_vblank_set_sync(vblank, 0);
-
- TPL_LOG_T(BACKEND, "[VBLANK INIT] vblank(%p)", vblank);
-
- return vblank;
-}
-
-static tbm_surface_queue_h
-_twe_surface_create_tbm_queue(twe_wl_surf_source *source,
- struct wayland_tbm_client *wl_tbm_client,
- tpl_handle_t native_handle,
- int format, int num_buffers)
-{
- tbm_surface_queue_h tbm_queue = NULL;
- struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)native_handle;
- tbm_bufmgr bufmgr = NULL;
- unsigned int capability;
-
- if (!wl_tbm_client || !wl_egl_window) {
- TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_egl_window(%p)",
- wl_tbm_client, wl_egl_window);
- return NULL;
- }
-
- bufmgr = tbm_bufmgr_init(-1);
- capability = tbm_bufmgr_get_capability(bufmgr);
- tbm_bufmgr_deinit(bufmgr);
-
- if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
- tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
- wl_tbm_client,
- wl_egl_window->surface,
- num_buffers,
- wl_egl_window->width,
- wl_egl_window->height,
- format);
- } else {
- tbm_queue = wayland_tbm_client_create_surface_queue(
- wl_tbm_client,
- wl_egl_window->surface,
- num_buffers,
- wl_egl_window->width,
- wl_egl_window->height,
- format);
- }
-
- if (!tbm_queue) {
- TPL_ERR("Failed to create tbm_surface_queue.");
- return NULL;
- }
-
- if (tbm_surface_queue_set_modes(
- tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
- TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
- tbm_queue);
- tbm_surface_queue_destroy(tbm_queue);
- return NULL;
- }
-
- if (tbm_surface_queue_add_reset_cb(tbm_queue,
- __cb_tbm_queue_reset_callback,
- (void *)source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
- tbm_queue);
- tbm_surface_queue_destroy(tbm_queue);
- return NULL;
- }
-
- if (tbm_surface_queue_add_trace_cb(tbm_queue,
- __cb_tbm_queue_trace_callback,
- (void *)source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to register trace callback to tbm_surface_queue(%p)",
- tbm_queue);
- tbm_surface_queue_destroy(tbm_queue);
- return NULL;
- }
-
- if (tbm_surface_queue_add_acquirable_cb(tbm_queue,
- __cb_tbm_queue_acquirable_callback,
- (void *)source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
- tbm_queue);
- tbm_surface_queue_destroy(tbm_queue);
- return NULL;
- }
-
- return tbm_queue;
-}
-
-
-tbm_surface_queue_h
-twe_surface_get_tbm_queue(twe_surface_h twe_surface)
-{
- twe_wl_surf_source *source = (twe_wl_surf_source *)twe_surface;
- if (!source) {
- TPL_ERR("Invalid parameters. twe_surface(%p)", source);
- return NULL;
- }
-
- if (!source->tbm_queue) {
- TPL_ERR("Invalid parameters. twe_surface(%p) tbm_queue(%p)",
- source, source->tbm_queue);
- return NULL;
- }
-
- return source->tbm_queue;
-}
-
-static void
-__cb_buffer_remove_from_list(void *data)
-{
- tbm_surface_h tbm_surface = (tbm_surface_h)data;
-
- if (tbm_surface && tbm_surface_internal_is_valid(tbm_surface))
- tbm_surface_internal_unref(tbm_surface);
-}
-
-static void
-_twe_thread_wl_surf_source_destroy(void *source)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)source;
- twe_wl_disp_source *disp_source = NULL;
- gboolean is_destroyed = FALSE;
-
- if (!surf_source || g_source_is_destroyed(&surf_source->gsource)) {
- TPL_ERR("twe_surface(%p) is already destroyed.", surf_source);
- return;
- }
-
- disp_source = surf_source->disp_source;
- if (!disp_source ||
- (is_destroyed = g_source_is_destroyed(&disp_source->gsource))) {
- TPL_ERR("twe_display(%p) is invalid. | is_destroyed(%s)",
- disp_source, (is_destroyed ? "TRUE" : "FALSE"));
- return;
- }
-
- g_mutex_lock(&disp_source->wl_event_mutex);
-
- g_mutex_lock(&surf_source->surf_mutex);
-
- g_mutex_lock(&surf_source->presentation_sync.mutex);
-
- TPL_INFO("[TWE_SURFACE_DESTROY]",
- "surf_source(%p) wl_egl_window(%p) wl_surface(%p)",
- surf_source, surf_source->wl_egl_window, surf_source->surf);
-
- if (disp_source->presentation && surf_source->presentation_feedbacks) {
- while (!__tpl_list_is_empty(surf_source->presentation_feedbacks)) {
- tbm_surface_h tbm_surface =
- __tpl_list_pop_front(surf_source->presentation_feedbacks, NULL);
- if (tbm_surface_internal_is_valid(tbm_surface)) {
- twe_wl_buffer_info *buf_info = NULL;
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (buf_info && buf_info->presentation_sync_fd != -1 &&
- buf_info->presentation_feedback) {
-
- _write_to_eventfd(buf_info->presentation_sync_fd);
- close(buf_info->presentation_sync_fd);
- buf_info->presentation_sync_fd = -1;
-
- wp_presentation_feedback_destroy(buf_info->presentation_feedback);
- buf_info->presentation_feedback = NULL;
- }
- }
- }
- }
-
- if (surf_source->presentation_sync.fd != -1) {
- _write_to_eventfd(surf_source->presentation_sync.fd);
- close(surf_source->presentation_sync.fd);
- surf_source->presentation_sync.fd = -1;
- }
- g_mutex_unlock(&surf_source->presentation_sync.mutex);
- g_mutex_clear(&surf_source->presentation_sync.mutex);
-
- if (surf_source->in_use_buffers) {
- __tpl_list_free(surf_source->in_use_buffers,
- (tpl_free_func_t)__cb_buffer_remove_from_list);
- surf_source->in_use_buffers = NULL;
- }
-
- if (surf_source->surface_sync) {
- TPL_INFO("[SURFACE_SYNC FINI]", "twe_wl_surf_source(%p) surface_sync(%p)",
- surf_source, surf_source->surface_sync);
- zwp_linux_surface_synchronization_v1_destroy(surf_source->surface_sync);
- surf_source->surface_sync = NULL;
- }
-
- if (surf_source->committed_buffers) {
- while (!__tpl_list_is_empty(surf_source->committed_buffers)) {
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- tbm_surface_h tbm_surface =
- __tpl_list_pop_front(surf_source->committed_buffers,
- (tpl_free_func_t)__cb_buffer_remove_from_list);
-
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
- tbm_surface, tsq_err);
- }
- __tpl_list_free(surf_source->committed_buffers, NULL);
- surf_source->committed_buffers = NULL;
- }
-
- if (surf_source->vblank_waiting_buffers) {
- while (!__tpl_list_is_empty(surf_source->vblank_waiting_buffers)) {
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- tbm_surface_h tbm_surface =
- __tpl_list_pop_front(surf_source->vblank_waiting_buffers,
- (tpl_free_func_t)__cb_buffer_remove_from_list);
-
- tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
- tbm_surface, tsq_err);
- }
- __tpl_list_free(surf_source->vblank_waiting_buffers, NULL);
- surf_source->vblank_waiting_buffers = NULL;
- }
-
- if (surf_source->use_sync_fence && surf_source->fence_waiting_sources) {
- while (!__tpl_list_is_empty(surf_source->fence_waiting_sources)) {
- twe_fence_wait_source *wait_source =
- __tpl_list_pop_front(surf_source->fence_waiting_sources,
- NULL);
- if (wait_source && !g_source_is_destroyed(&wait_source->gsource)) {
- tbm_surface_internal_unref(wait_source->tbm_surface);
- wait_source->tbm_surface = NULL;
-
- close(wait_source->fence_fd);
- wait_source->fence_fd = -1;
-
- g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag);
- g_source_destroy(&wait_source->gsource);
- g_source_unref(&wait_source->gsource);
- }
- }
- }
-
- _twe_surface_buffer_flusher_fini(surf_source);
-
- if (surf_source->tbm_queue) {
- tbm_surface_queue_destroy(surf_source->tbm_queue);
- surf_source->tbm_queue = NULL;
- }
-
- if (surf_source->vblank) {
- TPL_INFO("[VBLANK FINI]",
- "twe_wl_surf_source(%p) vblank(%p)",
- surf_source, surf_source->vblank);
- tdm_client_vblank_destroy(surf_source->vblank);
- surf_source->vblank = NULL;
- }
-
- surf_source->cb_data = NULL;
- surf_source->rotate_cb = NULL;
-
- if (surf_source->wl_egl_window) {
- struct tizen_private *tizen_private = NULL;
- TPL_INFO("[WINDOW_FINI]", "twe_surface(%p) wl_egl_window(%p) wl_surface(%p)",
- surf_source, surf_source->wl_egl_window, surf_source->surf);
- tizen_private = _get_tizen_private(surf_source->wl_egl_window);
- if (tizen_private) {
- tizen_private->set_window_serial_callback = NULL;
- tizen_private->rotate_callback = NULL;
- tizen_private->get_rotation_capability = NULL;
- tizen_private->data = NULL;
- free(tizen_private);
- }
-
- surf_source->wl_egl_window->destroy_window_callback = NULL;
- surf_source->wl_egl_window->resize_callback = NULL;
- surf_source->wl_egl_window->driver_private = NULL;
- surf_source->wl_egl_window = NULL;
- surf_source->surf = NULL;
- }
-
- g_mutex_lock(&surf_source->commit_sync.mutex);
- g_mutex_unlock(&surf_source->commit_sync.mutex);
- g_mutex_clear(&surf_source->commit_sync.mutex);
-
- g_mutex_unlock(&surf_source->surf_mutex);
- g_mutex_clear(&surf_source->surf_mutex);
-
- g_mutex_unlock(&disp_source->wl_event_mutex);
-
- g_cond_clear(&surf_source->free_queue_cond);
- g_mutex_clear(&surf_source->free_queue_mutex);
-
- g_source_remove_unix_fd(&surf_source->gsource, surf_source->tag);
- g_source_destroy(&surf_source->gsource);
- g_source_unref(&surf_source->gsource);
-}
-
-twe_surface_h
-twe_surface_add(twe_thread* thread,
- twe_display_h twe_display,
- tpl_handle_t native_handle,
- int format, int num_buffers)
-{
- twe_thread_context *ctx = thread->ctx;
- twe_wl_surf_source *source = NULL;
- twe_wl_disp_source *disp_source = (twe_wl_disp_source *)twe_display;
- gboolean is_destroyed = FALSE;
- tbm_surface_queue_h tbm_queue = NULL;
-
- if (!twe_display ||
- (is_destroyed = g_source_is_destroyed(&disp_source->gsource))) {
- TPL_ERR("twe_display(%p) is invalid. | is_destroyed(%s)",
- twe_display, (is_destroyed ? "TRUE" : "FALSE"));
- return NULL;
- }
-
- source = (twe_wl_surf_source *)g_source_new(&_twe_wl_surface_funcs,
- sizeof(twe_wl_surf_source));
- if (!source) {
- TPL_ERR("[THREAD] Failed to create twe_wl_surf_source");
- return NULL;
- }
-
- source->event_fd = eventfd(0, EFD_CLOEXEC);
- if (source->event_fd < 0) {
- TPL_ERR("[THREAD] Failed to create eventfd. errno(%d)", errno);
- g_source_unref(&source->gsource);
- return NULL;
- }
-
- if (!disp_source->is_vulkan_dpy &&
- !(tbm_queue = _twe_surface_create_tbm_queue(source,
- disp_source->wl_tbm_client,
- native_handle,
- format, num_buffers))) {
- TPL_ERR("Failed to create tbm_surface_queue.");
- g_source_unref(&source->gsource);
- return NULL;
- }
-
- source->tag = g_source_add_unix_fd(&source->gsource,
- source->event_fd,
- G_IO_IN);
- source->tbm_queue = tbm_queue;
- source->disp_source = (twe_wl_disp_source *)twe_display;
- source->latest_transform = 0;
- source->rotation = 0;
- source->rotation_capability = TPL_FALSE;
- source->vblank = NULL;
- source->vblank_done = TPL_TRUE;
- source->is_destroying = TPL_FALSE;
- source->committed_buffers = __tpl_list_alloc();
- source->in_use_buffers = __tpl_list_alloc();
- source->fence_waiting_sources = __tpl_list_alloc();
- source->render_done_fences = __tpl_list_alloc();
- source->render_done_cnt = 0;
-
- source->cb_data = NULL;
- source->rotate_cb = NULL;
- source->format = format;
- source->use_sync_fence = TPL_FALSE;
- source->use_surface_sync = TPL_FALSE;
-
- /* for vulkan swapchain */
- source->vblank_waiting_buffers = NULL;
- source->draw_done_buffer = NULL;
-
- source->set_serial_is_used = TPL_FALSE;
- source->serial = 0;
-
- source->post_interval = 1;
-
- source->commit_sync.fd = -1;
- g_mutex_init(&source->commit_sync.mutex);
-
- source->presentation_sync.fd = -1;
- g_mutex_init(&source->presentation_sync.mutex);
- if (disp_source->presentation)
- source->presentation_feedbacks = __tpl_list_alloc();
- else
- source->presentation_feedbacks = NULL;
-
- if (!disp_source->is_vulkan_dpy) {
- struct wl_egl_window *wl_egl_window =
- (struct wl_egl_window *)native_handle;
- struct tizen_private *private = NULL;
-
- if (wl_egl_window->driver_private)
- private = (struct tizen_private *)wl_egl_window->driver_private;
- else {
- private = tizen_private_create();
- wl_egl_window->driver_private = (void *)private;
- }
-
- if (private) {
- private->data = (void *)source;
- private->rotate_callback = (void *)__cb_rotate_callback;
- private->get_rotation_capability = (void *)
- __cb_get_rotation_capability;
- private->set_window_serial_callback = (void *)
- __cb_set_window_serial_callback;
- private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
- private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
-
- wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
- wl_egl_window->resize_callback = (void *)__cb_resize_callback;
- }
-
- source->wl_egl_window = wl_egl_window;
- source->surf = wl_egl_window->surface;
- source->vblank_waiting_buffers = __tpl_list_alloc();
-
- } else {
- struct wl_surface *wl_surf = (struct wl_surface *)native_handle;
-
- source->wl_egl_window = NULL;
- source->surf = wl_surf;
- }
-
- _twe_surface_buffer_flusher_init(source);
-
- if (disp_source->explicit_sync && disp_source->use_explicit_sync) {
- source->surface_sync =
- zwp_linux_explicit_synchronization_v1_get_synchronization(
- disp_source->explicit_sync, source->surf);
- if (!source->surface_sync) {
- TPL_WARN("Failed to create surf_sync. | surf_source(%p)", source);
- } else {
- source->use_surface_sync = TPL_TRUE;
- }
- }
-
- source->surf_del_source = _twe_del_source_init(ctx, source);
- if (source->surf_del_source) {
- source->surf_del_source->destroy_target_source_func
- = _twe_thread_wl_surf_source_destroy;
- }
-
- g_source_attach(&source->gsource, g_main_loop_get_context(ctx->twe_loop));
-
- g_mutex_init(&source->surf_mutex);
-
- g_mutex_init(&source->free_queue_mutex);
- g_cond_init(&source->free_queue_cond);
-
- TPL_INFO("[SURFACE_ADD]", "gsource(%p) wl_surface(%p) event_fd(%d)",
- source, source->surf, source->event_fd);
-
- return (twe_surface_h)source;
-}
-
-tpl_result_t
-twe_surface_del(twe_surface_h twe_surface)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
- twe_wl_disp_source *disp_source = NULL;
- twe_del_source *surf_del_source = NULL;
- gboolean is_destroyed = FALSE;
-
- if (!surf_source ||
- (is_destroyed = g_source_is_destroyed(&surf_source->gsource))) {
- TPL_ERR("twe_surface(%p) is invalid. | is_destroyed(%s)",
- twe_surface, (is_destroyed ? "TRUE" : "FALSE"));
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- disp_source = surf_source->disp_source;
- if (!disp_source ||
- (is_destroyed = g_source_is_destroyed(&disp_source->gsource))) {
- TPL_ERR("twe_display(%p) is invalid. | is_destroyed(%s)",
- disp_source, (is_destroyed ? "TRUE" : "FALSE"));
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (surf_source->use_sync_fence && surf_source->fence_waiting_sources) {
- TPL_DEBUG("twe_surface(%p) is waiting for all fences to be signaled.",
- surf_source);
- while (!__tpl_list_is_empty(surf_source->fence_waiting_sources)) {
- __tpl_util_sys_yield();
- }
- }
-
- TPL_INFO("[SURFACE_DEL]", "twe_surface(%p) will be destroyed in thread",
- twe_surface);
- surf_del_source = surf_source->surf_del_source;
-
- g_mutex_lock(&_twe_ctx->thread_mutex);
-
- _twe_thread_del_source_trigger(surf_del_source);
- g_cond_wait(&_twe_ctx->thread_cond,
- &_twe_ctx->thread_mutex);
- g_mutex_unlock(&_twe_ctx->thread_mutex);
-
- _twe_del_source_fini(surf_del_source);
-
- return TPL_ERROR_NONE;
-}
-
-tpl_result_t
-twe_surface_create_swapchain(twe_surface_h twe_surface,
- int width, int height, int format,
- int buffer_count, int present_mode)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
- twe_wl_disp_source *disp_source = NULL;
- tbm_bufmgr bufmgr = NULL;
- unsigned int capability;
-
- if (surf_source->tbm_queue) {
- TPL_LOG_B(BACKEND, "[REUSE SWAPCHAIN] surf_source(%p) tbm_queue(%p)",
- surf_source, surf_source->tbm_queue);
- return TPL_ERROR_NONE;
- }
-
- disp_source = surf_source->disp_source;
-
- TPL_ASSERT(disp_source);
-
- if ((buffer_count < disp_source->surface_capabilities.min_buffer)
- || (buffer_count > disp_source->surface_capabilities.max_buffer)) {
- TPL_ERR("Invalid buffer_count(%d)! min_buffer(%d) max_buffer(%d)",
- buffer_count,
- disp_source->surface_capabilities.min_buffer,
- disp_source->surface_capabilities.max_buffer);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if ((present_mode & disp_source->surface_capabilities.present_modes) == 0) {
- /* server not supported current mode check client mode */
- switch (present_mode) {
- case TPL_DISPLAY_PRESENT_MODE_FIFO:
- case TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED:
- case TPL_DISPLAY_PRESENT_MODE_MAILBOX:
- case TPL_DISPLAY_PRESENT_MODE_IMMEDIATE:
- break;
- default:
- TPL_ERR("Unsupported present mode: %d", present_mode);
- return TPL_ERROR_INVALID_PARAMETER;
- }
- }
-
- bufmgr = tbm_bufmgr_init(-1);
- capability = tbm_bufmgr_get_capability(bufmgr);
- tbm_bufmgr_deinit(bufmgr);
-
- if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
- surf_source->tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
- disp_source->wl_tbm_client,
- surf_source->surf,
- buffer_count,
- width, height,
- TBM_FORMAT_ARGB8888);
- } else {
- surf_source->tbm_queue = wayland_tbm_client_create_surface_queue(
- disp_source->wl_tbm_client,
- surf_source->surf,
- buffer_count,
- width, height,
- TBM_FORMAT_ARGB8888);
- }
-
- if (!surf_source->tbm_queue) {
- TPL_ERR("TBM surface queue creation failed!");
- return TPL_ERROR_OUT_OF_MEMORY;
- }
-
- if (tbm_surface_queue_add_reset_cb(surf_source->tbm_queue,
- __cb_tbm_queue_reset_callback,
- surf_source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
- surf_source->tbm_queue);
- tbm_surface_queue_destroy(surf_source->tbm_queue);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- if (tbm_surface_queue_set_modes(surf_source->tbm_queue,
- TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
- TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
- surf_source->tbm_queue);
- tbm_surface_queue_destroy(surf_source->tbm_queue);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- if (tbm_surface_queue_add_trace_cb(surf_source->tbm_queue,
- __cb_tbm_queue_trace_callback,
- (void *)surf_source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to register trace callback to tbm_surface_queue(%p)",
- surf_source->tbm_queue);
- tbm_surface_queue_destroy(surf_source->tbm_queue);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- if (tbm_surface_queue_add_acquirable_cb(surf_source->tbm_queue,
- __cb_tbm_queue_acquirable_callback,
- (void *)surf_source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
- surf_source->tbm_queue);
- tbm_surface_queue_destroy(surf_source->tbm_queue);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- if (tbm_surface_queue_add_dequeuable_cb(surf_source->tbm_queue,
- __cb_tbm_queue_dequeueable_callback,
- (void *)surf_source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to register dequeueable callback to tbm_surface_queue(%p)",
- surf_source->tbm_queue);
- tbm_surface_queue_destroy(surf_source->tbm_queue);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- if (present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO
- || present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED) {
- surf_source->vblank_waiting_buffers = __tpl_list_alloc();
- }
-
- surf_source->format = format;
- surf_source->swapchain_properties.width = width;
- surf_source->swapchain_properties.height = height;
- surf_source->swapchain_properties.present_mode = present_mode;
- surf_source->swapchain_properties.buffer_count = buffer_count;
-
- TPL_LOG_T(BACKEND, "[SWAPCHAIN_CREATE][1/2] twe_surface(%p) tbm_queue(%p)",
- twe_surface, surf_source->tbm_queue);
- TPL_LOG_T(BACKEND,
- "[SWAPCHAIN_CREATE][2/2] w(%d) h(%d) f(%d) p(%d) b_cnt(%d)",
- width, height, format, present_mode, buffer_count);
-
- return TPL_ERROR_NONE;
-}
-
-tpl_result_t
-twe_surface_destroy_swapchain(twe_surface_h twe_surface)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
-
- TPL_LOG_T(BACKEND, "[SWAPCHAIN_DESTROY] twe_surface(%p) tbm_queue(%p)",
- twe_surface, surf_source->tbm_queue);
-
- /* Waiting for vblank to commit all draw done buffers.*/
- while (surf_source->vblank_waiting_buffers &&
- !__tpl_list_is_empty(surf_source->vblank_waiting_buffers)) {
- __tpl_util_sys_yield();
- }
-
- if (surf_source->committed_buffers) {
- g_mutex_lock(&surf_source->surf_mutex);
- while (!__tpl_list_is_empty(surf_source->committed_buffers)) {
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- tbm_surface_h tbm_surface =
- __tpl_list_pop_front(surf_source->committed_buffers,
- (tpl_free_func_t)__cb_buffer_remove_from_list);
-
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
- tbm_surface, tsq_err);
- }
- g_mutex_unlock(&surf_source->surf_mutex);
- }
-
- if (surf_source->tbm_queue) {
- tbm_surface_queue_destroy(surf_source->tbm_queue);
- surf_source->tbm_queue = NULL;
- }
-
- return TPL_ERROR_NONE;
-}
-
-tpl_result_t
-twe_surface_get_swapchain_buffers(twe_surface_h twe_surface,
- tbm_surface_h *surfaces,
- int *buffer_count)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
- twe_wl_disp_source *disp_source = NULL;
- int ret = 1;
-
- if (!buffer_count) {
- TPL_ERR("Invalid parameter. buffer_count is NULL.");
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (!surfaces) {
- *buffer_count = tbm_surface_queue_get_size(surf_source->tbm_queue);
- return TPL_ERROR_NONE;
- }
-
- disp_source = surf_source->disp_source;
-
- ret = wayland_tbm_client_queue_get_surfaces(
- disp_source->wl_tbm_client,
- surf_source->tbm_queue,
- surfaces, buffer_count);
- if (!ret) {
- TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
- disp_source->wl_tbm_client, surf_source->tbm_queue);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- return TPL_ERROR_NONE;
-}
-
-tpl_result_t
-twe_surface_set_rotate_callback(twe_surface_h twe_surface,
- void *data, tpl_surface_cb_func_t rotate_cb)
-{
- twe_wl_surf_source *source = (twe_wl_surf_source *)twe_surface;
- if (!source) {
- TPL_ERR("Invalid parameter. twe_surface is NULL.");
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (!data || !rotate_cb) {
- TPL_ERR("Invalid parameter. data(%p) rotate_cb(%p)",
- data, rotate_cb);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- source->cb_data = data;
- source->rotate_cb = rotate_cb;
-
- return TPL_ERROR_NONE;
-}
-
-int
-twe_surface_get_rotation(twe_surface_h twe_surface)
-{
- twe_wl_surf_source *source = (twe_wl_surf_source *)twe_surface;
- if (!source) {
- TPL_ERR("Invalid parameter. twe_surface(%p)", twe_surface);
- return -1;
- }
-
- return source->rotation;
-}
-
-void
-twe_surface_set_rotation_capablity(twe_surface_h twe_surface, tpl_bool_t set)
-{
- twe_wl_surf_source *source = (twe_wl_surf_source *)twe_surface;
- if (!source) {
- TPL_ERR("Invalid parameter. twe_surface(%p)", twe_surface);
- return;
- }
-
- TPL_LOG_T(BACKEND, "twe_surface(%p) rotation capability set to [%s]",
- source, (set ? "TRUE" : "FALSE"));
-
- source->rotation_capability = set;
-}
-
-tpl_result_t
-twe_surface_set_damage_region(tbm_surface_h tbm_surface,
- int num_rects,
- const int *rects)
-{
- twe_wl_buffer_info *buf_info = NULL;
-
- if (!tbm_surface || !tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (num_rects == 0 || rects == NULL) {
- return TPL_ERROR_NONE;
- }
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (!buf_info) {
- TPL_ERR("Failed to get twe_wl_buffer_info from tbm_surface(%p)",
- tbm_surface);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- /* Destroy old region if there are old region info. */
- if (buf_info->rects != NULL) {
- free(buf_info->rects);
- buf_info->rects = NULL;
- buf_info->num_rects = 0;
- }
-
- buf_info->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
- buf_info->num_rects = num_rects;
-
- if (!buf_info->rects) {
- TPL_ERR("Failed to allocate memory fo damage rects info.");
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- memcpy((char *)buf_info->rects, (char *)rects, sizeof(int) * 4 * num_rects);
-
- return TPL_ERROR_NONE;
-}
-
-tpl_bool_t
-twe_surface_check_activated(twe_surface_h twe_surface)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
- twe_wl_disp_source *disp_source = surf_source->disp_source;
-
- return wayland_tbm_client_queue_check_activate(disp_source->wl_tbm_client,
- surf_source->tbm_queue);
-}
-
-tpl_bool_t
-twe_surface_check_commit_needed(twe_surface_h twe_surface,
- tbm_surface_h tbm_surface)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
- twe_wl_buffer_info *buf_info = NULL;
-
- if (!surf_source) {
- TPL_ERR("Invalid parameter. twe_surface(%p)", twe_surface);
- return TPL_FALSE;
- }
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (!buf_info) {
- TPL_ERR("Failed to get buf_info from tbm_surface(%p).", tbm_surface);
- return TPL_FALSE;
- }
-
- return buf_info->need_to_commit;
-}
-
-static gboolean
-_twe_thread_fence_wait_source_dispatch(GSource *source, GSourceFunc cb, gpointer data)
-{
- twe_fence_wait_source *wait_source = (twe_fence_wait_source *)source;
- twe_wl_surf_source *surf_source = wait_source->surf_source;
- tbm_surface_h tbm_surface = wait_source->tbm_surface;
- GIOCondition cond = g_source_query_unix_fd(source, wait_source->tag);
-
- if (cond & G_IO_IN) {
- TPL_LOG_T(BACKEND, "[RENDER DONE] wait_source(%p) tbm_surface(%p) fence_fd(%d)",
- wait_source, tbm_surface, wait_source->fence_fd);
- } else {
- /* When some io errors occur, it is not considered as a critical error.
- * There may be problems with the screen, but it does not affect the operation. */
- TPL_WARN("Invalid GIOCondition occured. fd(%d) cond(%d)",
- wait_source->fence_fd, cond);
- }
-
- surf_source->render_done_cnt++;
-
- TRACE_ASYNC_END((int)wait_source, "FENCE WAIT fd(%d)", wait_source->fence_fd);
-
- g_mutex_lock(&surf_source->surf_mutex);
- /* Since this source is going to be removed, acquire_and_commit must be
- * executed even in a situation other than G_IO_IN.
- * Nevertheless, there may be room for improvement. */
- _twe_thread_wl_surface_acquire_and_commit(surf_source);
- tbm_surface_internal_unref(tbm_surface);
-
- __tpl_list_remove_data(surf_source->fence_waiting_sources,
- (void *)wait_source, TPL_FIRST, NULL);
- g_mutex_unlock(&surf_source->surf_mutex);
-
- /* This source is used only once and does not allow reuse.
- * So finalize will be executed immediately. */
- g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag);
- g_source_destroy(&wait_source->gsource);
- g_source_unref(&wait_source->gsource);
-
- return G_SOURCE_REMOVE;
-}
-
-static void
-_twe_thread_fence_wait_source_finalize(GSource *source)
-{
- twe_fence_wait_source *wait_source = (twe_fence_wait_source *)source;
-
- TPL_DEBUG("[FINALIZE] wait_source(%p) fence_fd(%d)",
- wait_source, wait_source->fence_fd);
-
- close(wait_source->fence_fd);
-
- wait_source->fence_fd = -1;
- wait_source->surf_source = NULL;
- wait_source->tbm_surface = NULL;
- wait_source->tag = NULL;
-}
-
-static GSourceFuncs _twe_fence_wait_source_funcs = {
- .prepare = NULL,
- .check = NULL,
- .dispatch = _twe_thread_fence_wait_source_dispatch,
- .finalize = _twe_thread_fence_wait_source_finalize,
-};
-
-tpl_result_t
-_twe_thread_fence_wait_source_attach(twe_wl_surf_source *surf_source,
- tbm_surface_h tbm_surface, tbm_fd sync_fd)
-{
- twe_fence_wait_source *wait_source = NULL;
-
- wait_source = (twe_fence_wait_source *)g_source_new(&_twe_fence_wait_source_funcs,
- sizeof(twe_fence_wait_source));
- if (!wait_source) {
- TPL_ERR("[WAIT_SOURCE] Failed to create GSource");
- return TPL_ERROR_OUT_OF_MEMORY;
- }
-
- TRACE_ASYNC_BEGIN((int)wait_source, "FENCE WAIT fd(%d)", sync_fd);
-
- tbm_surface_internal_ref(tbm_surface);
-
- wait_source->fence_fd = sync_fd;
- wait_source->surf_source = surf_source;
- wait_source->tbm_surface = tbm_surface;
-
- wait_source->tag = g_source_add_unix_fd(&wait_source->gsource,
- wait_source->fence_fd,
- G_IO_IN);
-
- /* When waiting is over, it will be removed from the list. */
- __tpl_list_push_back(surf_source->fence_waiting_sources, (void *)wait_source);
-
- g_source_attach(&wait_source->gsource, g_main_loop_get_context(_twe_ctx->twe_loop));
-
- TPL_LOG_T(BACKEND, "fence_wait_source(%p) attached | tbm_surface(%p) fence_fd(%d)",
- wait_source, tbm_surface, sync_fd);
-
- return TPL_ERROR_NONE;
-}
-
-tpl_result_t
-twe_surface_set_sync_fd(twe_surface_h twe_surface,
- tbm_surface_h tbm_surface, tbm_fd sync_fd)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
- tpl_result_t ret = TPL_ERROR_NONE;
- twe_wl_buffer_info *buf_info = NULL;
-
- if (!surf_source) {
- TPL_ERR("Invalid parameter. twe_surface(%p)", twe_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (!tbm_surface || !tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (!buf_info) {
- TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (surf_source->use_surface_sync) {
- if (buf_info->acquire_fence_fd != -1)
- close(buf_info->acquire_fence_fd);
- buf_info->acquire_fence_fd = sync_fd;
- } else {
- /* The sync_info being pushed will be popped when surface_dispatch
- * is called and attached to the twe_thread. */
- struct sync_info *sync = (struct sync_info *)calloc(1, sizeof(struct sync_info));
- if (sync) {
- sync->sync_fd = sync_fd;
- sync->tbm_surface = tbm_surface;
-
- if (surf_source->render_done_fences) {
- g_mutex_lock(&surf_source->surf_mutex);
- __tpl_list_push_back(surf_source->render_done_fences,
- (void *)sync);
- surf_source->use_sync_fence = TPL_TRUE;
- TPL_DEBUG("[SET_SYNC_FD] surf_source(%p) tbm_surface(%p) sync_fd(%d)",
- surf_source, tbm_surface, sync_fd);
- g_mutex_unlock(&surf_source->surf_mutex);
- } else {
- surf_source->use_sync_fence = TPL_FALSE;
- free(sync);
- }
- }
- }
-
- return ret;
-}
-
-tbm_fd
-twe_surface_create_sync_fd(tbm_surface_h tbm_surface)
-{
- twe_wl_buffer_info *buf_info = NULL;
- tbm_fd sync_fd = -1;
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
-
- if (buf_info) {
- char name[32];
- snprintf(name, 32, "%d",
- _get_tbm_surface_bo_name(tbm_surface));
- sync_fd = tbm_sync_fence_create(buf_info->sync_timeline,
- name,
- buf_info->sync_timestamp);
- if (sync_fd == -1) {
- char buf[1024];
- strerror_r(errno, buf, sizeof(buf));
- TPL_ERR("Failed to create TBM sync fence: %d(%s)", errno, buf);
- }
-
- buf_info->sync_fd = sync_fd;
- }
-
- return sync_fd;
-}
-
-tbm_fd
-twe_surface_get_buffer_release_fence_fd(twe_surface_h twe_surface,
- tbm_surface_h tbm_surface)
-{
- twe_wl_buffer_info *buf_info = NULL;
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
- tbm_fd release_fence_fd = -1;
-
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (surf_source->use_surface_sync &&
- surf_source->disp_source->use_explicit_sync &&
- buf_info) {
- release_fence_fd = buf_info->release_fence_fd;
- TPL_DEBUG("surf_source(%p) buf_info(%p) release_fence_fd(%d)",
- surf_source, buf_info, release_fence_fd);
- }
-
- return release_fence_fd;
-}
-
-tpl_result_t
-twe_surface_wait_dequeueable(twe_surface_h twe_surface, uint64_t timeout_ns)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
- twe_wl_disp_source *disp_source = NULL;
- gint64 end_time;
-
- disp_source = surf_source->disp_source;
-
- if (timeout_ns != UINT64_MAX)
- end_time = g_get_monotonic_time() + (timeout_ns / 1000);
-
- while (!tbm_surface_queue_can_dequeue(surf_source->tbm_queue, 0)) {
- gboolean ret = FALSE;
-
- g_mutex_unlock(&disp_source->wl_event_mutex);
-
- /* wait until dequeueable */
- g_mutex_lock(&surf_source->free_queue_mutex);
-
- if (timeout_ns != UINT64_MAX) {
- ret = g_cond_wait_until(&surf_source->free_queue_cond,
- &surf_source->free_queue_mutex,
- end_time);
- if (ret == FALSE) {
- TPL_WARN("time out to wait dequeueable.");
- g_mutex_lock(&disp_source->wl_event_mutex);
- g_mutex_unlock(&surf_source->free_queue_mutex);
- return TPL_ERROR_TIME_OUT;
- }
- } else {
- g_cond_wait(&surf_source->free_queue_cond,
- &surf_source->free_queue_mutex);
- }
- g_mutex_unlock(&surf_source->free_queue_mutex);
- g_mutex_lock(&disp_source->wl_event_mutex);
- }
-
- return TPL_ERROR_NONE;
-}
-
-tpl_result_t
-twe_surface_queue_force_flush(twe_surface_h twe_surface)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
-
- _twe_print_buffer_list(twe_surface);
-
- if ((tsq_err = tbm_surface_queue_flush(surf_source->tbm_queue))
- != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("[TIMEOUT_RESET] Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
- surf_source->tbm_queue, tsq_err);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- g_mutex_lock(&surf_source->surf_mutex);
- if (surf_source->committed_buffers) {
- while (!__tpl_list_is_empty(surf_source->committed_buffers)) {
- tbm_surface_h tbm_surface =
- __tpl_list_pop_front(surf_source->committed_buffers,
- (tpl_free_func_t)__cb_buffer_remove_from_list);
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
- tbm_surface, tsq_err);
- }
- }
- g_mutex_unlock(&surf_source->surf_mutex);
-
- TPL_LOG_T(BACKEND,
- "[FORCE_FLUSH] surf_source(%p) tbm_queue(%p)",
- surf_source, surf_source->tbm_queue);
-
- return TPL_ERROR_NONE;
-}
-
-
-tpl_bool_t
-twe_check_native_handle_is_wl_display(tpl_handle_t display)
-{
- struct wl_interface *wl_egl_native_dpy = *(void **) display;
-
- if (!wl_egl_native_dpy) {
- TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy);
- return TPL_FALSE;
- }
-
- /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
- is a memory address pointing the structure of wl_display_interface. */
- if (wl_egl_native_dpy == &wl_display_interface)
- return TPL_TRUE;
-
- if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
- strlen(wl_display_interface.name)) == 0) {
- return TPL_TRUE;
- }
-
- return TPL_FALSE;
-}
-
-tpl_result_t
-twe_get_native_window_info(tpl_handle_t window, int *width, int *height,
- tbm_format *format, int a_size)
-{
- struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window;
- if (!wl_egl_window) {
- TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (width) *width = wl_egl_window->width;
- if (height) *height = wl_egl_window->height;
- if (format) {
- struct tizen_private *tizen_private = _get_tizen_private(wl_egl_window);
- if (tizen_private && tizen_private->data) {
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)tizen_private->data;
- *format = surf_source->format;
- } else {
- if (a_size == 8)
- *format = TBM_FORMAT_ARGB8888;
- else
- *format = TBM_FORMAT_XRGB8888;
- }
- }
-
- return TPL_ERROR_NONE;
-}
-
-tbm_surface_h
-twe_get_native_buffer_from_pixmap(tpl_handle_t pixmap)
-{
- tbm_surface_h tbm_surface = NULL;
-
- if (!pixmap) {
- TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap);
- return NULL;
- }
-
- tbm_surface = wayland_tbm_server_get_surface(NULL,
- (struct wl_resource *)pixmap);
- if (!tbm_surface) {
- TPL_ERR("Failed to get tbm_surface from wayland_tbm.");
- return NULL;
- }
-
- return tbm_surface;
-}
-
-tpl_result_t
-twe_surface_set_post_interval(twe_surface_h twe_surface, int post_interval)
-{
- twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
-
- surf_source->post_interval = post_interval;
-
- TPL_LOG_T(BACKEND, "surf_source(%p) post_interval(%d)",
- surf_source, surf_source->post_interval);
-
- return TPL_ERROR_NONE;
-}
+++ /dev/null
-#include <tbm_surface.h>
-#include <tbm_surface_queue.h>
-#include <wayland-client.h>
-
-#include "tpl.h"
-
-typedef struct _twe_thread twe_thread;
-typedef struct _twe_thread_context twe_thread_context;
-
-typedef void* twe_display_h;
-typedef void* twe_surface_h;
-
-twe_thread*
-twe_thread_create(void);
-
-void
-twe_thread_destroy(twe_thread* thread);
-
-twe_display_h
-twe_display_add(twe_thread* thread,
- struct wl_display *display,
- tpl_backend_type_t backend);
-
-tpl_result_t
-twe_display_del(twe_display_h display);
-
-tpl_result_t
-twe_display_lock(twe_display_h display);
-
-void
-twe_display_unlock(twe_display_h display);
-
-tpl_result_t
-twe_display_get_buffer_count(twe_display_h display,
- int *min,
- int *max);
-
-tpl_result_t
-twe_display_get_present_mode(twe_display_h display,
- int *present_modes);
-
-twe_surface_h
-twe_surface_add(twe_thread* thread,
- twe_display_h twe_display,
- tpl_handle_t native_handle,
- int format, int num_buffers);
-
-tpl_result_t
-twe_surface_del(twe_surface_h twe_surface);
-
-tpl_result_t
-twe_surface_create_swapchain(twe_surface_h twe_surface,
- int width, int height, int format,
- int buffer_count, int present_mode);
-tpl_result_t
-twe_surface_destroy_swapchain(twe_surface_h twe_surface);
-
-tpl_result_t
-twe_surface_get_swapchain_buffers(twe_surface_h twe_surface,
- tbm_surface_h *surfaces,
- int *buffer_count);
-
-tbm_surface_queue_h
-twe_surface_get_tbm_queue(twe_surface_h twe_surface);
-
-tpl_result_t
-twe_surface_set_rotate_callback(twe_surface_h twe_surface,
- void *data, tpl_surface_cb_func_t rotate_cb);
-
-int
-twe_surface_get_rotation(twe_surface_h twe_surface);
-
-void
-twe_surface_set_rotation_capablity(twe_surface_h twe_surface, tpl_bool_t set);
-
-tpl_bool_t
-twe_surface_check_activated(twe_surface_h twe_surface);
-
-tpl_bool_t
-twe_surface_check_commit_needed(twe_surface_h twe_surface,
- tbm_surface_h tbm_surface);
-
-tpl_result_t
-twe_surface_set_damage_region(tbm_surface_h tbm_surface,
- int num_rects, const int *rects);
-
-tpl_result_t
-twe_surface_set_sync_fd(twe_surface_h twe_surface,
- tbm_surface_h tbm_surface, tbm_fd sync_fd);
-
-tbm_fd
-twe_surface_create_sync_fd(tbm_surface_h tbm_surface);
-
-tbm_fd
-twe_surface_get_buffer_release_fence_fd(twe_surface_h twe_surface,
- tbm_surface_h tbm_surface);
-
-tpl_result_t
-twe_surface_wait_dequeueable(twe_surface_h twe_surface, uint64_t timeout_ns);
-
-tpl_result_t
-twe_surface_queue_force_flush(twe_surface_h twe_surface);
-
-tpl_bool_t
-twe_check_native_handle_is_wl_display(tpl_handle_t display);
-
-tpl_result_t
-twe_get_native_window_info(tpl_handle_t window, int *width, int *height, tbm_format *format, int a_size);
-
-tbm_surface_h
-twe_get_native_buffer_from_pixmap(tpl_handle_t pixmap);
-
-tpl_result_t
-twe_surface_set_post_interval(twe_surface_h twe_surface, int post_interval);
+++ /dev/null
-#define inline __inline__
-
-#undef inline
-
-#include "tpl_internal.h"
-
-#include <string.h>
-#include <fcntl.h>
-#include <unistd.h>
-
-#include <tbm_bufmgr.h>
-#include <tbm_surface.h>
-#include <tbm_surface_internal.h>
-#include <tbm_surface_queue.h>
-
-#include "tpl_wayland_egl_thread.h"
-
-/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
-#define CLIENT_QUEUE_SIZE 3
-
-typedef struct _tpl_wayland_egl_display tpl_wayland_egl_display_t;
-typedef struct _tpl_wayland_egl_surface tpl_wayland_egl_surface_t;
-
-struct _tpl_wayland_egl_display {
- twe_thread *wl_egl_thread;
- twe_display_h twe_display;
-};
-
-struct _tpl_wayland_egl_surface {
- tpl_object_t base;
- twe_surface_h twe_surface;
- tbm_surface_queue_h tbm_queue;
- tpl_bool_t is_activated;
- tpl_bool_t reset; /* TRUE if queue reseted by external */
- tpl_bool_t need_to_enqueue;
-};
-
-static tpl_result_t
-__tpl_wl_egl_display_init(tpl_display_t *display)
-{
- tpl_wayland_egl_display_t *wayland_egl_display = NULL;
-
- TPL_ASSERT(display);
-
- /* Do not allow default display in wayland. */
- if (!display->native_handle) {
- TPL_ERR("Invalid native handle for display.");
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- wayland_egl_display = (tpl_wayland_egl_display_t *) calloc(1,
- sizeof(tpl_wayland_egl_display_t));
- if (!wayland_egl_display) {
- TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_display_t.");
- return TPL_ERROR_OUT_OF_MEMORY;
- }
-
- display->backend.data = wayland_egl_display;
- display->bufmgr_fd = -1;
-
- if (twe_check_native_handle_is_wl_display(display->native_handle)) {
- wayland_egl_display->wl_egl_thread = twe_thread_create();
- if (!wayland_egl_display->wl_egl_thread) {
- TPL_ERR("Failed to create twe_thread.");
- goto free_display;
- }
-
- wayland_egl_display->twe_display =
- twe_display_add(wayland_egl_display->wl_egl_thread,
- display->native_handle,
- display->backend.type);
- if (!wayland_egl_display->twe_display) {
- TPL_ERR("Failed to add native_display(%p) to thread(%p)",
- display->native_handle,
- wayland_egl_display->wl_egl_thread);
- goto free_display;
- }
-
- } else {
- TPL_ERR("Invalid native handle for display.");
- goto free_display;
- }
-
- TPL_LOG_T("WL_EGL",
- "[INIT DISPLAY] wayland_egl_display(%p) twe_thread(%p) twe_display(%p)",
- wayland_egl_display,
- wayland_egl_display->wl_egl_thread,
- wayland_egl_display->twe_display);
-
- return TPL_ERROR_NONE;
-
-free_display:
- if (wayland_egl_display->twe_display)
- twe_display_del(wayland_egl_display->twe_display);
- if (wayland_egl_display->wl_egl_thread)
- twe_thread_destroy(wayland_egl_display->wl_egl_thread);
- wayland_egl_display->wl_egl_thread = NULL;
- wayland_egl_display->twe_display = NULL;
-
- free(wayland_egl_display);
- display->backend.data = NULL;
- return TPL_ERROR_INVALID_OPERATION;
-}
-
-static void
-__tpl_wl_egl_display_fini(tpl_display_t *display)
-{
- tpl_wayland_egl_display_t *wayland_egl_display;
-
- TPL_ASSERT(display);
-
- wayland_egl_display = (tpl_wayland_egl_display_t *)display->backend.data;
- if (wayland_egl_display) {
-
- TPL_LOG_T("WL_EGL",
- "[FINI] wayland_egl_display(%p) twe_thread(%p) twe_display(%p)",
- wayland_egl_display,
- wayland_egl_display->wl_egl_thread,
- wayland_egl_display->twe_display);
-
- if (wayland_egl_display->twe_display) {
- tpl_result_t ret = TPL_ERROR_NONE;
- ret = twe_display_del(wayland_egl_display->twe_display);
- if (ret != TPL_ERROR_NONE)
- TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)",
- wayland_egl_display->twe_display,
- wayland_egl_display->wl_egl_thread);
- wayland_egl_display->twe_display = NULL;
- }
-
- if (wayland_egl_display->wl_egl_thread) {
- twe_thread_destroy(wayland_egl_display->wl_egl_thread);
- wayland_egl_display->wl_egl_thread = NULL;
- }
-
- free(wayland_egl_display);
- }
-
- display->backend.data = NULL;
-}
-
-static tpl_result_t
-__tpl_wl_egl_display_query_config(tpl_display_t *display,
- tpl_surface_type_t surface_type,
- int red_size, int green_size,
- int blue_size, int alpha_size,
- int color_depth, int *native_visual_id,
- tpl_bool_t *is_slow)
-{
- TPL_ASSERT(display);
-
- if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
- green_size == 8 && blue_size == 8 &&
- (color_depth == 32 || color_depth == 24)) {
-
- if (alpha_size == 8) {
- if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
- if (is_slow) *is_slow = TPL_FALSE;
- return TPL_ERROR_NONE;
- }
- if (alpha_size == 0) {
- if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
- if (is_slow) *is_slow = TPL_FALSE;
- return TPL_ERROR_NONE;
- }
- }
-
- return TPL_ERROR_INVALID_PARAMETER;
-}
-
-static tpl_result_t
-__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
- int alpha_size)
-{
- TPL_IGNORE(display);
- TPL_IGNORE(visual_id);
- TPL_IGNORE(alpha_size);
- return TPL_ERROR_NONE;
-}
-
-static tpl_result_t
-__tpl_wl_egl_display_get_window_info(tpl_display_t *display,
- tpl_handle_t window, int *width,
- int *height, tbm_format *format,
- int depth, int a_size)
-{
- tpl_result_t ret = TPL_ERROR_NONE;
-
- TPL_ASSERT(display);
- TPL_ASSERT(window);
-
- if ((ret = twe_get_native_window_info(window, width, height, format, a_size))
- != TPL_ERROR_NONE) {
- TPL_ERR("Failed to get size info of native_window(%p)", window);
- }
-
- return ret;
-}
-
-static tpl_result_t
-__tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display,
- tpl_handle_t pixmap, int *width,
- int *height, tbm_format *format)
-{
- tbm_surface_h tbm_surface = NULL;
-
- tbm_surface = twe_get_native_buffer_from_pixmap(pixmap);
- if (!tbm_surface) {
- TPL_ERR("Failed to get tbm_surface_h from native pixmap.");
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- if (width) *width = tbm_surface_get_width(tbm_surface);
- if (height) *height = tbm_surface_get_height(tbm_surface);
- if (format) *format = tbm_surface_get_format(tbm_surface);
-
- return TPL_ERROR_NONE;
-}
-
-static tbm_surface_h
-__tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
-{
- tbm_surface_h tbm_surface = NULL;
-
- TPL_ASSERT(pixmap);
-
- tbm_surface = twe_get_native_buffer_from_pixmap(pixmap);
- if (!tbm_surface) {
- TPL_ERR("Failed to get tbm_surface_h from wayland_tbm.");
- return NULL;
- }
-
- return tbm_surface;
-}
-
-static void
-__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue,
- void *data)
-{
- tpl_surface_t *surface = NULL;
- tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
- tpl_bool_t is_activated = TPL_FALSE;
- int width, height;
-
- surface = (tpl_surface_t *)data;
- TPL_CHECK_ON_NULL_RETURN(surface);
-
- wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
- TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface);
-
- /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
- * the changed window size at the next frame. */
- width = tbm_surface_queue_get_width(surface_queue);
- height = tbm_surface_queue_get_height(surface_queue);
- if (surface->width != width || surface->height != height) {
- TPL_LOG_T("WL_EGL",
- "[QUEUE_RESIZE_CB] wayland_egl_surface(%p) tbm_queue(%p) (%dx%d)",
- wayland_egl_surface, surface_queue, width, height);
- }
-
- /* When queue_reset_callback is called, if is_activated is different from
- * its previous state change the reset flag to TPL_TRUE to get a new buffer
- * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
- is_activated = twe_surface_check_activated(wayland_egl_surface->twe_surface);
- if (wayland_egl_surface->is_activated != is_activated) {
- if (is_activated) {
- TPL_LOG_T("WL_EGL",
- "[ACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)",
- wayland_egl_surface, surface_queue);
- } else {
- TPL_LOG_T("WL_EGL",
- "[DEACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)",
- wayland_egl_surface, surface_queue);
- }
- }
-
- wayland_egl_surface->reset = TPL_TRUE;
-
- if (surface->reset_cb)
- surface->reset_cb(surface->reset_data);
-}
-
-void __cb_window_rotate_callback(void *data)
-{
- tpl_surface_t *surface = (tpl_surface_t *)data;
- tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
- int rotation;
-
- if (!surface) {
- TPL_ERR("Inavlid parameter. surface is NULL.");
- return;
- }
-
- wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
- if (!wayland_egl_surface) {
- TPL_ERR("Invalid parameter. surface->backend.data is NULL");
- return;
- }
-
- rotation = twe_surface_get_rotation(wayland_egl_surface->twe_surface);
-
- surface->rotation = rotation;
-}
-
-static tpl_result_t
-__tpl_wl_egl_surface_init(tpl_surface_t *surface)
-{
- tpl_wayland_egl_display_t *wayland_egl_display = NULL;
- tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
- tbm_surface_queue_h tbm_queue = NULL;
- twe_surface_h twe_surface = NULL;
- tpl_result_t ret = TPL_ERROR_NONE;
-
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->display);
- TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
- TPL_ASSERT(surface->native_handle);
-
- wayland_egl_display =
- (tpl_wayland_egl_display_t *)surface->display->backend.data;
- if (!wayland_egl_display) {
- TPL_ERR("Invalid parameter. wayland_egl_display(%p)",
- wayland_egl_display);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- wayland_egl_surface = (tpl_wayland_egl_surface_t *) calloc(1,
- sizeof(tpl_wayland_egl_surface_t));
- if (!wayland_egl_surface) {
- TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_surface_t.");
- return TPL_ERROR_OUT_OF_MEMORY;
- }
-
- surface->backend.data = (void *)wayland_egl_surface;
-
- if (__tpl_object_init(&wayland_egl_surface->base,
- TPL_OBJECT_SURFACE,
- NULL) != TPL_ERROR_NONE) {
- TPL_ERR("Failed to initialize backend surface's base object!");
- goto object_init_fail;
- }
-
- twe_surface = twe_surface_add(wayland_egl_display->wl_egl_thread,
- wayland_egl_display->twe_display,
- surface->native_handle,
- surface->format, surface->num_buffers);
- if (!twe_surface) {
- TPL_ERR("Failed to add native_window(%p) to thread(%p)",
- surface->native_handle, wayland_egl_display->wl_egl_thread);
- goto create_twe_surface_fail;
- }
-
- tbm_queue = twe_surface_get_tbm_queue(twe_surface);
- if (!tbm_queue) {
- TPL_ERR("Failed to get tbm_queue from twe_surface(%p)", twe_surface);
- goto queue_create_fail;
- }
-
- /* Set reset_callback to tbm_queue */
- if (tbm_surface_queue_add_reset_cb(tbm_queue,
- __cb_tbm_surface_queue_reset_callback,
- (void *)surface)) {
- TPL_ERR("TBM surface queue add reset cb failed!");
- goto add_reset_cb_fail;
- }
-
- wayland_egl_surface->reset = TPL_FALSE;
- wayland_egl_surface->twe_surface = twe_surface;
- wayland_egl_surface->tbm_queue = tbm_queue;
- wayland_egl_surface->is_activated = TPL_FALSE;
- wayland_egl_surface->need_to_enqueue = TPL_TRUE;
-
- surface->width = tbm_surface_queue_get_width(tbm_queue);
- surface->height = tbm_surface_queue_get_height(tbm_queue);
- surface->rotation = twe_surface_get_rotation(twe_surface);
-
- ret = twe_surface_set_rotate_callback(twe_surface, (void *)surface,
- (tpl_surface_cb_func_t)__cb_window_rotate_callback);
- if (ret != TPL_ERROR_NONE) {
- TPL_WARN("Failed to register rotate callback.");
- }
-
- TPL_LOG_T("WL_EGL",
- "[INIT1/2]tpl_surface(%p) tpl_wayland_egl_surface(%p) twe_surface(%p)",
- surface, wayland_egl_surface, twe_surface);
- TPL_LOG_T("WL_EGL",
- "[INIT2/2]size(%dx%d)rot(%d)|tbm_queue(%p)|native_window(%p)",
- surface->width, surface->height, surface->rotation,
- tbm_queue, surface->native_handle);
-
- return TPL_ERROR_NONE;
-
-add_reset_cb_fail:
-queue_create_fail:
- twe_surface_del(twe_surface);
-create_twe_surface_fail:
-object_init_fail:
- free(wayland_egl_surface);
- surface->backend.data = NULL;
- return TPL_ERROR_INVALID_OPERATION;
-}
-
-static void
-__tpl_wl_egl_surface_fini(tpl_surface_t *surface)
-{
- tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
- tpl_wayland_egl_display_t *wayland_egl_display = NULL;
-
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->display);
-
- wayland_egl_surface = (tpl_wayland_egl_surface_t *) surface->backend.data;
- TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface);
-
- TPL_OBJECT_LOCK(wayland_egl_surface);
-
- wayland_egl_display = (tpl_wayland_egl_display_t *)
- surface->display->backend.data;
-
- if (wayland_egl_display == NULL) {
- TPL_ERR("check failed: wayland_egl_display == NULL");
- TPL_OBJECT_UNLOCK(wayland_egl_surface);
- return;
- }
-
- if (surface->type == TPL_SURFACE_TYPE_WINDOW) {
- TPL_LOG_T("WL_EGL",
- "[FINI] wayland_egl_surface(%p) native_window(%p) twe_surface(%p)",
- wayland_egl_surface, surface->native_handle,
- wayland_egl_surface->twe_surface);
-
- if (twe_surface_del(wayland_egl_surface->twe_surface)
- != TPL_ERROR_NONE) {
- TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)",
- wayland_egl_surface->twe_surface,
- wayland_egl_display->wl_egl_thread);
- }
-
- wayland_egl_surface->twe_surface = NULL;
- wayland_egl_surface->tbm_queue = NULL;
- }
-
- TPL_OBJECT_UNLOCK(wayland_egl_surface);
- __tpl_object_fini(&wayland_egl_surface->base);
- free(wayland_egl_surface);
- surface->backend.data = NULL;
-}
-
-static tpl_result_t
-__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
- tpl_bool_t set)
-{
- tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
-
- if (!surface) {
- TPL_ERR("Invalid parameter. tpl_surface(%p)", surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
- if (!wayland_egl_surface) {
- TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)",
- surface, wayland_egl_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (!wayland_egl_surface->twe_surface) {
- TPL_ERR("Invalid parameter. wayland_egl_surface(%p) twe_surface(%p)",
- wayland_egl_surface, wayland_egl_surface->twe_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- twe_surface_set_rotation_capablity(wayland_egl_surface->twe_surface,
- set);
-
- return TPL_ERROR_NONE;
-}
-
-static tpl_result_t
-__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
- int post_interval)
-{
- tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
-
- if (!surface) {
- TPL_ERR("Invalid parameter. tpl_surface(%p)", surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
- if (!wayland_egl_surface) {
- TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)",
- surface, wayland_egl_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (!wayland_egl_surface->twe_surface) {
- TPL_ERR("Invalid parameter. wayland_egl_surface(%p) twe_surface(%p)",
- wayland_egl_surface, wayland_egl_surface->twe_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- twe_surface_set_post_interval(wayland_egl_surface->twe_surface,
- post_interval);
-
- return TPL_ERROR_NONE;
-}
-
-static tpl_result_t
-__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
- tbm_surface_h tbm_surface,
- int num_rects, const int *rects, tbm_fd sync_fence)
-{
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->display);
- TPL_ASSERT(tbm_surface);
- TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
-
- tpl_wayland_egl_surface_t *wayland_egl_surface =
- (tpl_wayland_egl_surface_t *) surface->backend.data;
- tbm_surface_queue_error_e tsq_err;
- tpl_result_t ret = TPL_ERROR_NONE;
- int bo_name = 0;
-
- TPL_OBJECT_LOCK(wayland_egl_surface);
-
- bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
-
- if (!wayland_egl_surface) {
- TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)",
- surface, wayland_egl_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_OBJECT_UNLOCK(wayland_egl_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (!tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
- tbm_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_OBJECT_UNLOCK(wayland_egl_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
-
- TPL_LOG_T("WL_EGL",
- "[ENQ] wayland_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)",
- wayland_egl_surface, tbm_surface, bo_name, sync_fence);
-
- /* If there are received region information,
- * save it to buf_info in tbm_surface user_data using below API. */
- if (num_rects && rects) {
- ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects);
- if (ret != TPL_ERROR_NONE) {
- TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)",
- num_rects, rects);
- }
- }
-
- if (!wayland_egl_surface->need_to_enqueue ||
- !twe_surface_check_commit_needed(wayland_egl_surface->twe_surface,
- tbm_surface)) {
- TPL_LOG_T("WL_EGL",
- "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
- ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_OBJECT_UNLOCK(wayland_egl_surface);
- return TPL_ERROR_NONE;
- }
-
- /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
- * commit if surface->frontbuffer that is already set and the tbm_surface
- * client want to enqueue are the same.
- */
- if (surface->is_frontbuffer_mode) {
- /* The first buffer to be activated in frontbuffer mode must be
- * committed. Subsequence frames do not need to be committed because
- * the buffer is already displayed.
- */
- if (surface->frontbuffer == tbm_surface)
- wayland_egl_surface->need_to_enqueue = TPL_FALSE;
-
- if (sync_fence != -1) {
- close(sync_fence);
- sync_fence = -1;
- }
- }
-
- if (sync_fence != -1) {
- ret = twe_surface_set_sync_fd(wayland_egl_surface->twe_surface,
- tbm_surface, sync_fence);
- if (ret != TPL_ERROR_NONE) {
- TPL_WARN("Failed to set sync fd (%d). But it will continue.",
- sync_fence);
- }
- }
-
- tsq_err = tbm_surface_queue_enqueue(wayland_egl_surface->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- tbm_surface_internal_unref(tbm_surface);
- TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d",
- tbm_surface, surface, tsq_err);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_OBJECT_UNLOCK(wayland_egl_surface);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- tbm_surface_internal_unref(tbm_surface);
-
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_OBJECT_UNLOCK(wayland_egl_surface);
-
- return TPL_ERROR_NONE;
-}
-
-static tpl_bool_t
-__tpl_wl_egl_surface_validate(tpl_surface_t *surface)
-{
- tpl_bool_t retval = TPL_TRUE;
-
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
-
- tpl_wayland_egl_surface_t *wayland_egl_surface =
- (tpl_wayland_egl_surface_t *)surface->backend.data;
-
- retval = !(wayland_egl_surface->reset);
-
- return retval;
-}
-
-static tpl_result_t
-__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
- tbm_surface_h tbm_surface)
-{
- tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
-
- wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
- if (!wayland_egl_surface) {
- TPL_ERR("Invalid backend surface. surface(%p) wayland_egl_surface(%p)",
- surface, wayland_egl_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (!tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- tbm_surface_internal_unref(tbm_surface);
-
- tsq_err = tbm_surface_queue_cancel_dequeue(wayland_egl_surface->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
- tbm_surface, surface);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
- surface, tbm_surface);
-
- return TPL_ERROR_NONE;
-}
-
-#define CAN_DEQUEUE_TIMEOUT_MS 10000
-
-static tbm_surface_h
-__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
- tbm_fd *sync_fence)
-{
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
- TPL_ASSERT(surface->display);
- TPL_ASSERT(surface->display->backend.data);
- TPL_OBJECT_CHECK_RETURN(surface, NULL);
-
- tbm_surface_h tbm_surface = NULL;
- tpl_wayland_egl_surface_t *wayland_egl_surface =
- (tpl_wayland_egl_surface_t *)surface->backend.data;
- tpl_wayland_egl_display_t *wayland_egl_display =
- (tpl_wayland_egl_display_t *)surface->display->backend.data;
- tbm_surface_queue_error_e tsq_err = 0;
- int is_activated = 0;
- int bo_name = 0;
- tpl_result_t lock_ret = TPL_FALSE;
-
- TPL_OBJECT_UNLOCK(surface);
- tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
- wayland_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
- TPL_OBJECT_LOCK(surface);
-
- /* After the can dequeue state, call twe_display_lock to prevent other
- * events from being processed in wayland_egl_thread
- * during below dequeue procedure. */
- lock_ret = twe_display_lock(wayland_egl_display->twe_display);
-
- if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
- TPL_ERR("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
- wayland_egl_surface->tbm_queue, surface);
- if (twe_surface_queue_force_flush(wayland_egl_surface->twe_surface)
- != TPL_ERROR_NONE) {
- TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
- wayland_egl_surface->tbm_queue, surface);
- if (lock_ret == TPL_ERROR_NONE)
- twe_display_unlock(wayland_egl_display->twe_display);
- return NULL;
- } else {
- tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- }
- }
-
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
- wayland_egl_surface->tbm_queue, surface);
- if (lock_ret == TPL_ERROR_NONE)
- twe_display_unlock(wayland_egl_display->twe_display);
- return NULL;
- }
-
- /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
- * below function [wayland_tbm_client_queue_check_activate()].
- * This function has to be called before tbm_surface_queue_dequeue()
- * in order to know what state the buffer will be dequeued next.
- *
- * ACTIVATED state means non-composite mode. Client can get buffers which
- can be displayed directly(without compositing).
- * DEACTIVATED state means composite mode. Client's buffer will be displayed
- by compositor(E20) with compositing.
- */
- is_activated = twe_surface_check_activated(wayland_egl_surface->twe_surface);
- wayland_egl_surface->is_activated = is_activated;
-
- surface->width = tbm_surface_queue_get_width(wayland_egl_surface->tbm_queue);
- surface->height = tbm_surface_queue_get_height(wayland_egl_surface->tbm_queue);
-
- if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
- /* If surface->frontbuffer is already set in frontbuffer mode,
- * it will return that frontbuffer if it is still activated,
- * otherwise dequeue the new buffer after initializing
- * surface->frontbuffer to NULL. */
- if (is_activated && !wayland_egl_surface->reset) {
- TPL_LOG_T("WL_EGL",
- "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
- surface->frontbuffer,
- tbm_bo_export(tbm_surface_internal_get_bo(
- surface->frontbuffer, 0)));
- TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
- "[DEQ]~[ENQ] BO_NAME:%d",
- tbm_bo_export(tbm_surface_internal_get_bo(
- surface->frontbuffer, 0)));
- if (lock_ret == TPL_ERROR_NONE)
- twe_display_unlock(wayland_egl_display->twe_display);
- return surface->frontbuffer;
- } else {
- surface->frontbuffer = NULL;
- wayland_egl_surface->need_to_enqueue = TPL_TRUE;
- }
- } else {
- surface->frontbuffer = NULL;
- }
-
- tsq_err = tbm_surface_queue_dequeue(wayland_egl_surface->tbm_queue,
- &tbm_surface);
- if (!tbm_surface) {
- TPL_ERR("Failed to dequeue from tbm_queue(%p) surface(%p)| tsq_err = %d",
- wayland_egl_surface->tbm_queue, surface, tsq_err);
- if (lock_ret == TPL_ERROR_NONE)
- twe_display_unlock(wayland_egl_display->twe_display);
- return NULL;
- }
-
- tbm_surface_internal_ref(tbm_surface);
-
- /* If twe_surface_get_buffer_release_fence_fd return -1,
- * the tbm_surface can be used immediately.
- * If not, user(EGL) have to wait until signaled. */
- if (sync_fence) {
- *sync_fence = twe_surface_get_buffer_release_fence_fd(
- wayland_egl_surface->twe_surface, tbm_surface);
- }
-
- bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
-
- if (surface->is_frontbuffer_mode && is_activated)
- surface->frontbuffer = tbm_surface;
-
- wayland_egl_surface->reset = TPL_FALSE;
-
- TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name);
- TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)",
- tbm_surface, bo_name, sync_fence ? *sync_fence : -1);
-
- if (lock_ret == TPL_ERROR_NONE)
- twe_display_unlock(wayland_egl_display->twe_display);
-
- return tbm_surface;
-}
-
-void
-__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
-{
- tpl_wayland_egl_surface_t *wayland_egl_surface =
- (tpl_wayland_egl_surface_t *)surface->backend.data;
-
- if (width)
- *width = tbm_surface_queue_get_width(wayland_egl_surface->tbm_queue);
- if (height)
- *height = tbm_surface_queue_get_height(wayland_egl_surface->tbm_queue);
-}
-
-
-tpl_bool_t
-__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
-{
- if (!native_dpy) return TPL_FALSE;
-
- if (twe_check_native_handle_is_wl_display(native_dpy))
- return TPL_TRUE;
-
- return TPL_FALSE;
-}
-
-void
-__tpl_display_init_backend_wl_egl_thread_legacy(tpl_display_backend_t *backend)
-{
- TPL_ASSERT(backend);
-
- backend->type = TPL_BACKEND_WAYLAND_THREAD;
- backend->data = NULL;
-
- backend->init = __tpl_wl_egl_display_init;
- backend->fini = __tpl_wl_egl_display_fini;
- backend->query_config = __tpl_wl_egl_display_query_config;
- backend->filter_config = __tpl_wl_egl_display_filter_config;
- backend->get_window_info = __tpl_wl_egl_display_get_window_info;
- backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
- backend->get_buffer_from_native_pixmap =
- __tpl_wl_egl_display_get_buffer_from_native_pixmap;
-}
-
-void
-__tpl_surface_init_backend_wl_egl_thread_legacy(tpl_surface_backend_t *backend)
-{
- TPL_ASSERT(backend);
-
- backend->type = TPL_BACKEND_WAYLAND_THREAD;
- backend->data = NULL;
-
- backend->init = __tpl_wl_egl_surface_init;
- backend->fini = __tpl_wl_egl_surface_fini;
- backend->validate = __tpl_wl_egl_surface_validate;
- backend->cancel_dequeued_buffer =
- __tpl_wl_egl_surface_cancel_dequeued_buffer;
- backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
- backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
- backend->set_rotation_capability =
- __tpl_wl_egl_surface_set_rotation_capability;
- backend->set_post_interval =
- __tpl_wl_egl_surface_set_post_interval;
- backend->get_size =
- __tpl_wl_egl_surface_get_size;
-}
-
--- /dev/null
+#include <sys/eventfd.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <wayland-client.h>
+#include <wayland-tbm-server.h>
+#include <wayland-tbm-client.h>
+#include <tdm_client.h>
+#include <glib.h>
+#include <glib-unix.h>
+#include <tizen-surface-client-protocol.h>
+#include <wayland-egl-backend.h>
+#include <presentation-time-client-protocol.h>
+#include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
+
+#include "tpl_utils.h"
+#include "tpl_internal.h"
+#include "wayland-egl-tizen/wayland-egl-tizen.h"
+#include "wayland-egl-tizen/wayland-egl-tizen-priv.h"
+#include "tpl_wayland_egl_thread.h"
+#include "wayland-vulkan/wayland-vulkan-client-protocol.h"
+#include "tpl_utils.h"
+
+static int buffer_info_key;
+#define KEY_BUFFER_INFO (unsigned long)(&buffer_info_key)
+
+#define CLIENT_QUEUE_SIZE 3
+#define VK_CLIENT_QUEUE_SIZE 3
+
+/* backend name will be optimized */
+#define BACKEND "WL_VK_GL"
+
+typedef struct _twe_wl_disp_source twe_wl_disp_source;
+typedef struct _twe_wl_surf_source twe_wl_surf_source;
+typedef struct _twe_wl_buffer_info twe_wl_buffer_info;
+typedef struct _twe_tdm_source twe_tdm_source;
+typedef struct _twe_del_source twe_del_source;
+typedef struct _twe_fence_wait_source twe_fence_wait_source;
+
+struct _twe_thread_context {
+ GThread *twe_thread;
+ GMainLoop *twe_loop;
+
+ int ref_cnt;
+
+ tpl_bool_t use_wait_vblank;
+ twe_tdm_source *tdm_source;
+ twe_del_source *tdm_del_source;
+
+ GMutex thread_mutex;
+ GCond thread_cond;
+};
+
+struct _twe_thread {
+ twe_thread_context *ctx;
+ /* TODO : display list */
+};
+
+struct _twe_tdm_source {
+ GSource gsource;
+ gpointer tag;
+ tdm_client *tdm_client;
+ int tdm_fd;
+};
+
+struct feedback_info {
+ struct wp_presentation_feedback *feedback;
+ twe_wl_surf_source *surf_source;
+};
+
+struct _twe_wl_disp_source {
+ GSource gsource;
+ GPollFD gfd;
+ struct wl_display *disp;
+ struct wl_event_queue *ev_queue;
+ struct wayland_tbm_client *wl_tbm_client;
+ struct tizen_surface_shm *tss; /* used for surface buffer_flush */
+ struct wp_presentation *presentation;
+ struct zwp_linux_explicit_synchronization_v1 *explicit_sync;
+ tpl_bool_t use_explicit_sync;
+ struct {
+ int min_buffer;
+ int max_buffer;
+ int present_modes;
+ } surface_capabilities;
+ struct wayland_vulkan *wl_vk_client;
+ tpl_bool_t is_vulkan_dpy;
+ tpl_bool_t prepared;
+ twe_del_source *disp_del_source;
+ twe_thread *thread;
+ GMutex wl_event_mutex;
+
+ int last_error; /* errno of the last wl_display error*/
+ /* TODO : surface list */
+};
+
+struct _twe_del_source {
+ GSource gsource;
+ gpointer tag;
+ int event_fd;
+ void* target_source;
+ void (*destroy_target_source_func)(void *);
+};
+
+
+struct sync_info {
+ tbm_surface_h tbm_surface;
+ int sync_fd;
+};
+
+struct _twe_wl_surf_source {
+ GSource gsource;
+ gpointer tag;
+ int event_fd;
+ struct wl_surface *surf;
+ struct wl_egl_window *wl_egl_window;
+ int latest_transform;
+ int rotation;
+ void *cb_data;
+ int format;
+ struct {
+ int width, height;
+ int buffer_count;
+ int present_mode;
+ } swapchain_properties;
+ tpl_surface_cb_func_t rotate_cb;
+ tpl_bool_t rotation_capability;
+ tpl_list_t *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */
+ tpl_list_t *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */
+ tpl_list_t *fence_waiting_sources; /* Trace fence_wait_source from ENQUEUE to fence signaled */
+ tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
+ tpl_list_t *render_done_fences; /* for attaching to twe_thread with fences passed by enqueue */
+ tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
+ tbm_surface_h draw_done_buffer; /* for MAILBOX mode */
+ int render_done_cnt;
+
+ tdm_client_vblank *vblank;
+ tpl_bool_t vblank_done;
+ tpl_bool_t is_destroying;
+ tpl_bool_t set_serial_is_used; /* Will be deprecated */
+ unsigned int serial;
+ struct tizen_surface_shm_flusher *tss_flusher;
+ tbm_surface_queue_h tbm_queue;
+ twe_wl_disp_source *disp_source;
+ twe_del_source *surf_del_source;
+
+ struct {
+ GMutex mutex;
+ int fd;
+ } commit_sync;
+
+ struct {
+ GMutex mutex;
+ int fd;
+ } presentation_sync;
+
+ GMutex surf_mutex;
+
+ GMutex free_queue_mutex;
+ GCond free_queue_cond;
+
+ /* for waiting draw done */
+ tpl_bool_t use_sync_fence;
+
+ /* to use zwp_linux_surface_synchronization */
+ tpl_bool_t use_surface_sync;
+
+ int post_interval;
+
+ struct zwp_linux_surface_synchronization_v1 *surface_sync;
+
+};
+
+struct _twe_wl_buffer_info {
+ struct wl_proxy *wl_buffer;
+ int dx, dy;
+ int width, height;
+ /* for wayland_tbm_client_set_buffer_transform */
+ int w_transform;
+ tpl_bool_t w_rotated;
+ /* for wl_surface_set_buffer_transform */
+ int transform;
+ /* for damage region */
+ int num_rects;
+ int *rects;
+ tpl_bool_t need_to_commit;
+
+ /* for checking need release */
+ tpl_bool_t need_to_release;
+
+ /* for checking draw done */
+ tpl_bool_t draw_done;
+
+ /* for checking released from display server */
+ tbm_fd sync_timeline;
+ unsigned int sync_timestamp;
+ tbm_fd sync_fd;
+ tpl_bool_t is_vk_image;
+
+ tbm_surface_h tbm_surface;
+
+ twe_wl_surf_source *surf_source;
+
+ /* for wayland_tbm_client_set_buffer_serial */
+ unsigned int serial;
+
+ /* to get release event via zwp_linux_buffer_release_v1 */
+ struct zwp_linux_buffer_release_v1 *buffer_release;
+
+ /* each buffers own its release_fence_fd, until it passes ownership
+ * to it to EGL */
+ int release_fence_fd;
+
+ /* each buffers own its acquire_fence_fd. until it passes ownership
+ * to it to SERVER */
+ int acquire_fence_fd;
+
+ int commit_sync_fd;
+
+ struct wp_presentation_feedback *presentation_feedback;
+ int presentation_sync_fd;
+
+};
+
+struct _twe_fence_wait_source {
+ GSource gsource;
+ gpointer tag;
+ tbm_fd fence_fd;
+ tbm_surface_h tbm_surface;
+ twe_wl_surf_source *surf_source;
+};
+
+static twe_thread_context *_twe_ctx;
+static twe_tdm_source *
+_twe_thread_tdm_source_create(void);
+static void
+_twe_thread_tdm_source_destroy(void *source);
+twe_del_source *
+_twe_del_source_init(twe_thread_context *ctx, void *target_source);
+void
+_twe_del_source_fini(twe_del_source *source);
+static void
+_twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source,
+ tbm_surface_h tbm_surface);
+static void
+_twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source);
+static void
+__cb_buffer_remove_from_list(void *data);
+static tpl_result_t
+_twe_surface_wait_vblank(twe_wl_surf_source *surf_source);
+static struct tizen_private *
+_get_tizen_private(struct wl_egl_window *);
+
+tpl_result_t
+_twe_thread_fence_wait_source_attach(twe_wl_surf_source *surf_source,
+ tbm_surface_h tbm_surface, tbm_fd sync_fd);
+
+static int
+_get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
+{
+ return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+}
+
+static gpointer
+_twe_thread_loop(gpointer data)
+{
+ twe_thread_context *ctx = data;
+
+ g_mutex_lock(&ctx->thread_mutex);
+
+ if (ctx->use_wait_vblank) {
+ twe_tdm_source *tdm_source = _twe_thread_tdm_source_create();
+
+ if (tdm_source) {
+ g_source_attach(&tdm_source->gsource,
+ g_main_loop_get_context(ctx->twe_loop));
+ }
+
+ _twe_ctx->tdm_source = tdm_source;
+
+ if (!ctx->tdm_source) {
+ TPL_WARN("Failed to create tdm_source. TPL_WAIT_VLANK:DISABLED");
+ }
+ }
+
+ g_cond_signal(&ctx->thread_cond);
+ g_mutex_unlock(&ctx->thread_mutex);
+
+ g_main_loop_run(ctx->twe_loop);
+
+ return ctx;
+}
+
+static gboolean
+_twe_thread_del_source_dispatch(GSource *source, GSourceFunc cb, gpointer data)
+{
+ twe_del_source *del_source = (twe_del_source *)source;
+ tpl_result_t res = TPL_ERROR_NONE;
+ GIOCondition cond;
+
+ g_mutex_lock(&_twe_ctx->thread_mutex);
+
+ cond = g_source_query_unix_fd(source, del_source->tag);
+
+ if (cond & G_IO_IN) {
+ ssize_t s;
+ uint64_t u;
+
+ s = read(del_source->event_fd, &u, sizeof(uint64_t));
+ if (s != sizeof(uint64_t)) {
+ TPL_ERR("Failed to read from event_fd(%d)",
+ del_source->event_fd);
+ res = TPL_ERROR_INVALID_CONNECTION;
+ }
+
+ if (del_source->destroy_target_source_func)
+ del_source->destroy_target_source_func(del_source->target_source);
+ }
+
+ if (cond && !(cond & G_IO_IN)) {
+ TPL_ERR("eventfd(%d) cannot wake up with other condition. cond(%d)",
+ del_source->event_fd, cond);
+ res = TPL_ERROR_INVALID_CONNECTION;
+ }
+
+ if (res != TPL_ERROR_NONE) {
+ g_source_remove_unix_fd(source, del_source->tag);
+
+ TPL_WARN("event_fd(%d) of del_source(%p) has been closed. it will be recreated.",
+ del_source->event_fd, del_source);
+
+ close(del_source->event_fd);
+
+ del_source->event_fd = eventfd(0, EFD_CLOEXEC);
+ if (del_source->event_fd < 0) {
+ TPL_ERR("Failed to create eventfd. errno(%d)", errno);
+ } else {
+ del_source->tag = g_source_add_unix_fd(&del_source->gsource,
+ del_source->event_fd,
+ G_IO_IN);
+ }
+ TPL_DEBUG("[RECREATED] eventfd(%d) tag(%p)", del_source->event_fd, del_source->tag);
+ }
+
+ g_cond_signal(&_twe_ctx->thread_cond);
+ g_mutex_unlock(&_twe_ctx->thread_mutex);
+
+ return G_SOURCE_CONTINUE;
+}
+
+static void
+_twe_thread_del_source_finalize(GSource *source)
+{
+ twe_del_source *del_source = (twe_del_source *)source;
+
+ TPL_LOG_T(BACKEND, "gsource(%p) event_fd(%d)",
+ source, del_source->event_fd);
+
+ close(del_source->event_fd);
+
+ del_source->tag = NULL;
+ del_source->event_fd = -1;
+
+ return;
+}
+
+static GSourceFuncs _twe_del_source_funcs = {
+ .prepare = NULL,
+ .check = NULL,
+ .dispatch = _twe_thread_del_source_dispatch,
+ .finalize = _twe_thread_del_source_finalize,
+};
+
+static void
+_twe_thread_del_source_trigger(twe_del_source *del_source)
+{
+ uint64_t value = 1;
+ int ret;
+
+ ret = write(del_source->event_fd, &value, sizeof(uint64_t));
+ if (ret == -1) {
+ TPL_ERR("failed to send delete event. twe_del_source(%p)",
+ del_source);
+ return;
+ }
+}
+
+twe_del_source *
+_twe_del_source_init(twe_thread_context *ctx, void *target_source)
+{
+ twe_del_source *source = NULL;
+
+ if (!ctx) {
+ TPL_ERR("Invalid parameter. twe_thread_context is NULL");
+ return NULL;
+ }
+
+ if (!target_source) {
+ TPL_ERR("Invalid parameter. target_source is NULL");
+ return NULL;
+ }
+
+ source = (twe_del_source *)g_source_new(&_twe_del_source_funcs,
+ sizeof(twe_del_source));
+ if (!source) {
+ TPL_ERR("[THREAD] Failed to create twe_del_source.");
+ return NULL;
+ }
+
+ source->event_fd = eventfd(0, EFD_CLOEXEC);
+ if (source->event_fd < 0) {
+ TPL_ERR("[THREAD] Failed to create eventfd. errno(%d)", errno);
+ g_source_unref(&source->gsource);
+ return NULL;
+ }
+
+ source->tag = g_source_add_unix_fd(&source->gsource,
+ source->event_fd,
+ G_IO_IN);
+ source->target_source = target_source;
+
+ g_source_attach(&source->gsource, g_main_loop_get_context(ctx->twe_loop));
+
+ return source;
+}
+
+void
+_twe_del_source_fini(twe_del_source *source)
+{
+ g_source_remove_unix_fd(&source->gsource, source->tag);
+ g_source_destroy(&source->gsource);
+ g_source_unref(&source->gsource);
+}
+
+static gboolean
+_twe_thread_tdm_source_dispatch(GSource *source, GSourceFunc cb, gpointer data)
+{
+ twe_tdm_source *tdm_source = (twe_tdm_source *)source;
+ tdm_error tdm_err = TDM_ERROR_NONE;
+ GIOCondition cond;
+
+ cond = g_source_query_unix_fd(source, tdm_source->tag);
+
+ if (cond & G_IO_IN) {
+ tdm_err = tdm_client_handle_events(tdm_source->tdm_client);
+ }
+
+ /* If an error occurs in tdm_client_handle_events, it cannot be recovered.
+ * When tdm_source is no longer available due to an unexpected situation,
+ * twe_thread must remove it from the thread and destroy it.
+ * In that case, tdm_vblank can no longer be used for surfaces and displays
+ * that used this tdm_source. */
+ if (tdm_err != TDM_ERROR_NONE) {
+ TPL_ERR("Error occured in tdm_client_handle_events. tdm_err(%d)",
+ tdm_err);
+ TPL_WARN("tdm_source(%p) will be removed from thread.", tdm_source);
+
+ g_source_remove_unix_fd(&tdm_source->gsource, tdm_source->tag);
+ g_source_destroy(&tdm_source->gsource);
+ g_source_unref(&tdm_source->gsource);
+
+ _twe_ctx->tdm_source = NULL;
+
+ if (_twe_ctx->tdm_del_source) {
+ _twe_del_source_fini(_twe_ctx->tdm_del_source);
+ _twe_ctx->tdm_del_source = NULL;
+ }
+
+ return G_SOURCE_REMOVE;
+ }
+
+ return G_SOURCE_CONTINUE;
+}
+
+static void
+_twe_thread_tdm_source_finalize(GSource *source)
+{
+ twe_tdm_source *tdm_source = (twe_tdm_source *)source;
+
+ TPL_LOG_T(BACKEND, "tdm_destroy| tdm_source(%p) tdm_client(%p)",
+ tdm_source, tdm_source->tdm_client);
+
+ if (tdm_source->tdm_client) {
+ tdm_client_destroy(tdm_source->tdm_client);
+ tdm_source->tdm_client = NULL;
+ }
+
+ tdm_source->tdm_fd = -1;
+}
+
+static GSourceFuncs _twe_tdm_funcs = {
+ .prepare = NULL,
+ .check = NULL,
+ .dispatch = _twe_thread_tdm_source_dispatch,
+ .finalize = _twe_thread_tdm_source_finalize,
+};
+
+static twe_tdm_source *
+_twe_thread_tdm_source_create(void)
+{
+ twe_tdm_source *tdm_source = NULL;
+ tdm_client *client = NULL;
+ int tdm_fd = -1;
+ tdm_error tdm_err = TDM_ERROR_NONE;
+
+ client = tdm_client_create(&tdm_err);
+ if (!client || tdm_err != TDM_ERROR_NONE) {
+ TPL_ERR("TDM_ERROR:%d Failed to create tdm_client\n", tdm_err);
+ return NULL;
+ }
+
+ tdm_err = tdm_client_get_fd(client, &tdm_fd);
+ if (tdm_fd < 0 || tdm_err != TDM_ERROR_NONE) {
+ TPL_ERR("TDM_ERROR:%d Failed to get tdm_client fd\n", tdm_err);
+ tdm_client_destroy(client);
+ return NULL;
+ }
+
+ tdm_source = (twe_tdm_source *)g_source_new(&_twe_tdm_funcs,
+ sizeof(twe_tdm_source));
+ if (!tdm_source) {
+ TPL_ERR("Failed to create tdm_gsource\n");
+ tdm_client_destroy(client);
+ return NULL;
+ }
+
+ tdm_source->tdm_client = client;
+ tdm_source->tdm_fd = tdm_fd;
+ tdm_source->tag = g_source_add_unix_fd(&tdm_source->gsource,
+ tdm_fd,
+ G_IO_IN);
+
+ TPL_LOG_T(BACKEND, "TPL_WAIT_VBLANK:DEFAULT_ENABLED");
+ TPL_LOG_T(BACKEND, "tdm_source(%p) tdm_client(%p) tdm_fd(%d)",
+ tdm_source, client, tdm_fd);
+
+ return tdm_source;
+}
+
+static void
+_twe_thread_tdm_source_destroy(void *source)
+{
+ twe_tdm_source *tdm_source = (twe_tdm_source *)source;
+
+ _twe_ctx->tdm_source = NULL;
+
+ g_source_remove_unix_fd(&tdm_source->gsource, tdm_source->tag);
+ g_source_destroy(&tdm_source->gsource);
+ g_source_unref(&tdm_source->gsource);
+}
+
+static int
+_write_to_eventfd(int eventfd)
+{
+ uint64_t value = 1;
+ int ret;
+
+ if (eventfd == -1) {
+ TPL_ERR("Invalid fd(-1)");
+ return -1;
+ }
+
+ ret = write(eventfd, &value, sizeof(uint64_t));
+ if (ret == -1) {
+ TPL_ERR("failed to write to fd(%d)", eventfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+twe_thread*
+twe_thread_create(void)
+{
+ twe_thread *thread = NULL;
+ char *env = NULL;
+
+ thread = calloc(1, sizeof(twe_thread));
+ if (!thread) {
+ TPL_ERR("Failed to allocate twe_thread");
+ return NULL;
+ }
+
+ if (!_twe_ctx) {
+ GMainContext *context;
+
+ _twe_ctx = calloc(1, sizeof(twe_thread_context));
+ if (!_twe_ctx) {
+ TPL_ERR("Failed to allocate _twe_ctx");
+ if (thread)
+ free(thread);
+ return NULL;
+ }
+
+ context = g_main_context_new();
+ _twe_ctx->twe_loop = g_main_loop_new(context, FALSE);
+ g_main_context_unref(context);
+
+ g_mutex_init(&_twe_ctx->thread_mutex);
+ g_cond_init(&_twe_ctx->thread_cond);
+
+ _twe_ctx->use_wait_vblank = TPL_TRUE;
+
+ env = tpl_getenv("TPL_WAIT_VBLANK");
+ if (env && !atoi(env)) {
+ _twe_ctx->use_wait_vblank = TPL_FALSE;
+ }
+
+ g_mutex_lock(&_twe_ctx->thread_mutex);
+ _twe_ctx->twe_thread = g_thread_new("twe_thread", _twe_thread_loop,
+ _twe_ctx);
+ g_cond_wait(&_twe_ctx->thread_cond,
+ &_twe_ctx->thread_mutex);
+
+ if (_twe_ctx->tdm_source) {
+ twe_tdm_source *tdm_source = _twe_ctx->tdm_source;
+
+ _twe_ctx->tdm_del_source = _twe_del_source_init(_twe_ctx, tdm_source);
+ if (_twe_ctx->tdm_del_source)
+ _twe_ctx->tdm_del_source->destroy_target_source_func
+ = _twe_thread_tdm_source_destroy;
+ }
+
+ g_mutex_unlock(&_twe_ctx->thread_mutex);
+
+ _twe_ctx->ref_cnt = 0;
+ }
+
+ thread->ctx = _twe_ctx;
+ _twe_ctx->ref_cnt++;
+
+ TPL_LOG_T(BACKEND, "_twe_ctx(%p) twe_thread(%p)", _twe_ctx, thread);
+ return thread;
+}
+
+void
+twe_thread_destroy(twe_thread* thread)
+{
+ thread->ctx->ref_cnt--;
+
+ if (thread->ctx->ref_cnt == 0) {
+ twe_del_source *tdm_del_source = _twe_ctx->tdm_del_source;
+
+ if (_twe_ctx->tdm_source) {
+ g_mutex_lock(&_twe_ctx->thread_mutex);
+
+ if (tdm_del_source) {
+ _twe_thread_del_source_trigger(tdm_del_source);
+ g_cond_wait(&_twe_ctx->thread_cond, &_twe_ctx->thread_mutex);
+ }
+
+ g_mutex_unlock(&_twe_ctx->thread_mutex);
+ }
+
+ if (tdm_del_source)
+ _twe_del_source_fini(tdm_del_source);
+
+ _twe_ctx->tdm_del_source = NULL;
+
+ g_main_loop_quit(thread->ctx->twe_loop);
+ g_thread_join(thread->ctx->twe_thread);
+ g_main_loop_unref(thread->ctx->twe_loop);
+
+ g_mutex_clear(&thread->ctx->thread_mutex);
+ g_cond_clear(&thread->ctx->thread_cond);
+
+ free(_twe_ctx);
+ _twe_ctx = NULL;
+ }
+
+ TPL_LOG_T(BACKEND, "[THREAD DESTROY] twe_thread(%p)", thread);
+
+ thread->ctx = NULL;
+ free(thread);
+}
+
+static void
+_twe_display_print_err(twe_wl_disp_source *disp_source,
+ const char *func_name)
+{
+ int dpy_err;
+ char buf[1024];
+ strerror_r(errno, buf, sizeof(buf));
+
+ if (disp_source->last_error == errno)
+ return;
+
+ TPL_ERR("falied to %s. error:%d(%s)", func_name, errno, buf);
+
+ dpy_err = wl_display_get_error(disp_source->disp);
+ if (dpy_err == EPROTO) {
+ const struct wl_interface *err_interface;
+ uint32_t err_proxy_id, err_code;
+ err_code = wl_display_get_protocol_error(disp_source->disp,
+ &err_interface,
+ &err_proxy_id);
+ TPL_ERR("[Protocol Error] interface: %s, error_code: %d, proxy_id: %d",
+ err_interface->name, err_code, err_proxy_id);
+ }
+
+ disp_source->last_error = errno;
+}
+
+static void
+_twe_print_buffer_list(twe_wl_surf_source *surf_source)
+{
+ int count = 0;
+ int idx = 0;
+ tpl_list_node_t *node = NULL;
+ tbm_surface_h tbm_surface = NULL;
+
+ /* vblank waiting list */
+ count = __tpl_list_get_count(surf_source->vblank_waiting_buffers);
+ TPL_DEBUG("VBLANK WAITING BUFFERS | surf_source(%p) list(%p) count(%d)",
+ surf_source, surf_source->vblank_waiting_buffers, count);
+
+ while ((!node &&
+ (node = __tpl_list_get_front_node(surf_source->vblank_waiting_buffers))) ||
+ (node && (node = __tpl_list_node_next(node)))) {
+ tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
+ TPL_DEBUG("VBLANK WAITING BUFFERS | %d | tbm_surface(%p) bo(%d)",
+ idx, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+ idx++;
+ }
+
+ idx = 0;
+ node = NULL;
+
+ /* in use buffers list */
+ count = __tpl_list_get_count(surf_source->in_use_buffers);
+ TPL_DEBUG("DEQUEUED BUFFERS | surf_source(%p) list(%p) count(%d)",
+ surf_source, surf_source->in_use_buffers, count);
+
+ while ((!node &&
+ (node = __tpl_list_get_front_node(surf_source->in_use_buffers))) ||
+ (node && (node = __tpl_list_node_next(node)))) {
+ tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
+ TPL_DEBUG("DEQUEUED BUFFERS | %d | tbm_surface(%p) bo(%d)",
+ idx, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+ idx++;
+ }
+
+ idx = 0;
+ node = NULL;
+
+ /* committed buffers list */
+ count = __tpl_list_get_count(surf_source->committed_buffers);
+ TPL_DEBUG("COMMITTED BUFFERS | surf_source(%p) list(%p) count(%d)",
+ surf_source, surf_source->committed_buffers, count);
+
+ while ((!node &&
+ (node = __tpl_list_get_front_node(surf_source->committed_buffers))) ||
+ (node && (node = __tpl_list_node_next(node)))) {
+ tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
+ TPL_DEBUG("COMMITTED BUFFERS | %d | tbm_surface(%p) bo(%d)",
+ idx, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+ idx++;
+ }
+}
+
+static gboolean
+_twe_thread_wl_disp_prepare(GSource *source, gint *time)
+{
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source;
+
+ /* If disp_source is already prepared, do nothing in this function. */
+ if (disp_source->prepared)
+ return FALSE;
+
+ /* If there is a last_error, there is no need to poll,
+ * so skip directly to dispatch.
+ * prepare -> dispatch */
+ if (disp_source->last_error)
+ return TRUE;
+
+ while (wl_display_prepare_read_queue(disp_source->disp,
+ disp_source->ev_queue) != 0) {
+ if (wl_display_dispatch_queue_pending(disp_source->disp,
+ disp_source->ev_queue) == -1) {
+ _twe_display_print_err(disp_source, "dispatch_queue_pending");
+ }
+ }
+
+ disp_source->prepared = TPL_TRUE;
+
+ wl_display_flush(disp_source->disp);
+ *time = -1;
+
+ return FALSE;
+}
+
+static gboolean
+_twe_thread_wl_disp_check(GSource *source)
+{
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source;
+ gboolean ret = FALSE;
+
+ if (!disp_source->prepared)
+ return ret;
+
+ /* If prepared, but last_error is set,
+ * cancel_read is executed and FALSE is returned.
+ * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
+ * and skipping disp_check from prepare to disp_dispatch.
+ * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
+ if (disp_source->prepared && disp_source->last_error) {
+ wl_display_cancel_read(disp_source->disp);
+ return ret;
+ }
+
+ if (disp_source->gfd.revents & G_IO_IN) {
+ if (wl_display_read_events(disp_source->disp) == -1)
+ _twe_display_print_err(disp_source, "read_event.");
+ ret = TRUE;
+ } else {
+ wl_display_cancel_read(disp_source->disp);
+ ret = FALSE;
+ }
+
+ disp_source->prepared = TPL_FALSE;
+
+ return ret;
+}
+
+static gboolean
+_twe_thread_wl_disp_dispatch(GSource *source, GSourceFunc cb, gpointer data)
+{
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source;
+
+ /* If there is last_error, G_SOURCE_REMOVE should be returned
+ * to remove the gsource from the main loop.
+ * This is because disp_source is not valid since last_error was set.*/
+ if (disp_source->last_error) {
+ return G_SOURCE_REMOVE;
+ }
+
+ g_mutex_lock(&disp_source->wl_event_mutex);
+ if (disp_source->gfd.revents & G_IO_IN) {
+ if (wl_display_dispatch_queue_pending(disp_source->disp,
+ disp_source->ev_queue) == -1) {
+ _twe_display_print_err(disp_source, "dispatch_queue_pending");
+ }
+ }
+
+ wl_display_flush(disp_source->disp);
+ g_mutex_unlock(&disp_source->wl_event_mutex);
+
+ return G_SOURCE_CONTINUE;
+}
+
+static void
+_twe_thread_wl_disp_finalize(GSource *source)
+{
+ TPL_LOG_T(BACKEND, "finalize| disp_source(%p)", source);
+
+ return;
+}
+
+static GSourceFuncs _twe_wl_disp_funcs = {
+ .prepare = _twe_thread_wl_disp_prepare,
+ .check = _twe_thread_wl_disp_check,
+ .dispatch = _twe_thread_wl_disp_dispatch,
+ .finalize = _twe_thread_wl_disp_finalize,
+};
+
+static struct wayland_tbm_client*
+_twe_display_init_wl_tbm_client(struct wl_display *display,
+ struct wl_event_queue *ev_queue)
+{
+ struct wl_proxy *wl_tbm = NULL;
+ struct wayland_tbm_client *wl_tbm_client = NULL;
+
+ wl_tbm_client = wayland_tbm_client_init(display);
+ if (!wl_tbm_client) {
+ TPL_ERR("Failed to initialize wl_tbm_client.");
+ return NULL;
+ }
+
+ wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
+ if (!wl_tbm) {
+ TPL_ERR("Failed to get wl_tbm from wl_tbm_client(%p)", wl_tbm_client);
+ wayland_tbm_client_deinit(wl_tbm_client);
+ return NULL;
+ }
+
+ wl_proxy_set_queue(wl_tbm, ev_queue);
+
+ TPL_LOG_T(BACKEND, "wl_tbm_client init| wl_tbm_client(%p)", wl_tbm_client);
+ return wl_tbm_client;
+}
+
+static void
+_twe_display_fini_wl_tbm_client(struct wayland_tbm_client *wl_tbm_client)
+{
+ struct wl_proxy *wl_tbm = NULL;
+
+ wl_tbm = (struct wl_proxy *)wayland_tbm_client_get_wl_tbm(wl_tbm_client);
+ if (wl_tbm) {
+ wl_proxy_set_queue(wl_tbm, NULL);
+ }
+
+ TPL_LOG_T(BACKEND, "wl_tbm_client deinit| wl_tbm_client(%p)", wl_tbm_client);
+ wayland_tbm_client_deinit(wl_tbm_client);
+}
+
+static void
+__cb_wl_vk_support_present_mode_listener(void *data,
+ struct wayland_vulkan *wayland_vulkan,
+ uint32_t mode)
+{
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)data;
+
+ switch (mode) {
+ case WAYLAND_VULKAN_PRESENT_MODE_TYPE_IMMEDIATE:
+ disp_source->surface_capabilities.present_modes
+ |= TPL_DISPLAY_PRESENT_MODE_IMMEDIATE;
+ break;
+ case WAYLAND_VULKAN_PRESENT_MODE_TYPE_MAILBOX:
+ disp_source->surface_capabilities.present_modes
+ |= TPL_DISPLAY_PRESENT_MODE_MAILBOX;
+ break;
+ case WAYLAND_VULKAN_PRESENT_MODE_TYPE_FIFO:
+ disp_source->surface_capabilities.present_modes
+ |= TPL_DISPLAY_PRESENT_MODE_FIFO;
+ break;
+ case WAYLAND_VULKAN_PRESENT_MODE_TYPE_FIFO_RELAXED:
+ disp_source->surface_capabilities.present_modes
+ |= TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED;
+ break;
+ default:
+ TPL_WARN("server sent unknown present type: %d", mode);
+ }
+}
+
+static struct wayland_vulkan_listener wl_vk_listener = {
+ __cb_wl_vk_support_present_mode_listener,
+};
+
+#define IMPL_TIZEN_SURFACE_SHM_VERSION 2
+
+void
+__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
+ uint32_t name, const char *interface,
+ uint32_t version)
+{
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)data;
+
+ if (!strcmp(interface, "tizen_surface_shm")) {
+ disp_source->tss = wl_registry_bind(wl_registry,
+ name,
+ &tizen_surface_shm_interface,
+ ((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ?
+ version : IMPL_TIZEN_SURFACE_SHM_VERSION));
+ } else if (disp_source->is_vulkan_dpy
+ && !strcmp(interface, "wayland_vulkan")) {
+ disp_source->wl_vk_client =
+ wl_registry_bind(wl_registry, name,
+ &wayland_vulkan_interface,
+ version);
+ } else if (!strcmp(interface, wp_presentation_interface.name)) {
+ disp_source->presentation =
+ wl_registry_bind(wl_registry,
+ name, &wp_presentation_interface, 1);
+ TPL_DEBUG("bind wp_presentation_interface");
+ } else if (strcmp(interface, "zwp_linux_explicit_synchronization_v1") == 0) {
+ char *env = tpl_getenv("TPL_EFS");
+ if (env && atoi(env)) {
+ disp_source->explicit_sync =
+ wl_registry_bind(wl_registry, name,
+ &zwp_linux_explicit_synchronization_v1_interface, 1);
+ disp_source->use_explicit_sync = TPL_TRUE;
+ TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
+ } else {
+ disp_source->use_explicit_sync = TPL_FALSE;
+ }
+ }
+}
+
+void
+__cb_wl_resistry_global_remove_callback(void *data,
+ struct wl_registry *wl_registry,
+ uint32_t name)
+{
+}
+
+static const struct wl_registry_listener registry_listener = {
+ __cb_wl_resistry_global_callback,
+ __cb_wl_resistry_global_remove_callback
+};
+
+static void
+_twe_display_wayland_init(twe_wl_disp_source *disp_source)
+{
+ struct wl_registry *registry = NULL;
+ struct wl_event_queue *queue = NULL;
+ struct wl_display *display_wrapper = NULL;
+ int ret;
+
+ queue = wl_display_create_queue(disp_source->disp);
+ if (!queue) {
+ TPL_ERR("Failed to create wl_queue");
+ goto fini;
+ }
+
+ display_wrapper = wl_proxy_create_wrapper(disp_source->disp);
+ if (!display_wrapper) {
+ TPL_ERR("Failed to create a proxy wrapper of wl_display");
+ goto fini;
+ }
+
+ wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
+
+ registry = wl_display_get_registry(display_wrapper);
+ if (!registry) {
+ TPL_ERR("Failed to create wl_registry");
+ goto fini;
+ }
+
+ wl_proxy_wrapper_destroy(display_wrapper);
+ display_wrapper = NULL;
+
+ if (wl_registry_add_listener(registry, ®istry_listener,
+ disp_source)) {
+ TPL_ERR("Failed to wl_registry_add_listener");
+ goto fini;
+ }
+
+ ret = wl_display_roundtrip_queue(disp_source->disp, queue);
+ if (ret == -1) {
+ _twe_display_print_err(disp_source, "roundtrip_queue");
+ goto fini;
+ }
+
+ /* set tizen_surface_shm's queue as client's private queue */
+ if (disp_source->tss) {
+ wl_proxy_set_queue((struct wl_proxy *)disp_source->tss,
+ disp_source->ev_queue);
+ TPL_LOG_T(BACKEND, "tizen_surface_shm(%p) init.", disp_source->tss);
+ }
+
+ if (disp_source->wl_vk_client) {
+ wayland_vulkan_add_listener(disp_source->wl_vk_client,
+ &wl_vk_listener, disp_source);
+
+ ret = wl_display_roundtrip_queue(disp_source->disp, queue);
+ if (ret == -1) {
+ _twe_display_print_err(disp_source, "roundtrip_queue");
+ goto fini;
+ }
+
+ wl_proxy_set_queue((struct wl_proxy *)disp_source->wl_vk_client,
+ disp_source->ev_queue);
+ TPL_LOG_T(BACKEND, "wl_vk_client(%p) init.", disp_source->wl_vk_client);
+ }
+
+ if (disp_source->presentation) {
+ wl_proxy_set_queue((struct wl_proxy *)disp_source->presentation,
+ disp_source->ev_queue);
+ TPL_LOG_T(BACKEND, "wp_presentation(%p) init.", disp_source->presentation);
+ }
+
+ if (disp_source->explicit_sync) {
+ wl_proxy_set_queue((struct wl_proxy *)disp_source->explicit_sync,
+ disp_source->ev_queue);
+ TPL_LOG_T(BACKEND, "zwp_linux_explicit_synchronization_v1(%p) init.",
+ disp_source->explicit_sync);
+ }
+
+fini:
+ if (display_wrapper)
+ wl_proxy_wrapper_destroy(display_wrapper);
+ if (registry)
+ wl_registry_destroy(registry);
+ if (queue)
+ wl_event_queue_destroy(queue);
+}
+
+static void
+_twe_display_wayland_fini(twe_wl_disp_source *disp_source)
+{
+ if (disp_source->tss) {
+ TPL_LOG_T(BACKEND, "tizen_surface_shm(%p) fini.", disp_source->tss);
+ tizen_surface_shm_destroy(disp_source->tss);
+ disp_source->tss = NULL;
+ }
+
+ if (disp_source->wl_vk_client) {
+ TPL_LOG_T(BACKEND, "wl_vk_client(%p) fini.", disp_source->wl_vk_client);
+ wayland_vulkan_destroy(disp_source->wl_vk_client);
+ disp_source->wl_vk_client = NULL;
+ }
+
+ if (disp_source->presentation) {
+ TPL_LOG_T(BACKEND, "wp_presentation(%p) fini.", disp_source->presentation);
+ wp_presentation_destroy(disp_source->presentation);
+ disp_source->presentation = NULL;
+ }
+
+ if (disp_source->explicit_sync) {
+ TPL_LOG_T(BACKEND, "zwp_linux_explicit_synchronization_v1(%p) fini.",
+ disp_source->explicit_sync);
+ zwp_linux_explicit_synchronization_v1_destroy(disp_source->explicit_sync);
+ disp_source->explicit_sync = NULL;
+ }
+}
+
+static void
+_twe_thread_wl_disp_source_destroy(void *source)
+{
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source;
+ if (g_source_is_destroyed(&disp_source->gsource)) {
+ TPL_ERR("disp_source(%p) already destroyed.", disp_source);
+ return;
+ }
+
+ g_mutex_lock(&disp_source->wl_event_mutex);
+
+ /* If disp_source is in prepared state, cancel it */
+ if (disp_source->prepared) {
+ wl_display_cancel_read(disp_source->disp);
+ disp_source->prepared = TPL_FALSE;
+ }
+
+ if (wl_display_dispatch_queue_pending(disp_source->disp,
+ disp_source->ev_queue) == -1) {
+ _twe_display_print_err(disp_source, "dispatch_queue_pending");
+ }
+
+ wl_event_queue_destroy(disp_source->ev_queue);
+ g_mutex_unlock(&disp_source->wl_event_mutex);
+
+ TPL_INFO("[DISPLAY_DEL]", "twe_display(%p) wl_display(%p)",
+ disp_source, disp_source->disp);
+
+ g_mutex_clear(&disp_source->wl_event_mutex);
+
+ g_source_remove_poll(&disp_source->gsource, &disp_source->gfd);
+ g_source_destroy(&disp_source->gsource);
+ g_source_unref(&disp_source->gsource);
+}
+
+twe_display_h
+twe_display_add(twe_thread* thread,
+ struct wl_display *display,
+ tpl_backend_type_t backend)
+{
+ twe_thread_context *ctx = thread->ctx;
+ twe_wl_disp_source *source;
+ struct wayland_tbm_client *wl_tbm_client = NULL;
+ struct wl_event_queue *ev_queue = NULL;
+
+ ev_queue = wl_display_create_queue(display);
+ if (!ev_queue) {
+ TPL_ERR("Failed to create wl_event_queue.");
+ return NULL;
+ }
+
+ wl_tbm_client = _twe_display_init_wl_tbm_client(display, ev_queue);
+ if (!wl_tbm_client) {
+ TPL_ERR("Failed to create wl_tbm_client.");
+ wl_event_queue_destroy(ev_queue);
+ return NULL;
+ }
+
+ source = (twe_wl_disp_source *)g_source_new(&_twe_wl_disp_funcs,
+ sizeof(twe_wl_disp_source));
+ if (!source) {
+ TPL_ERR("Failed to create twe_wl_disp_source.");
+ return NULL;
+ }
+
+ source->disp = display;
+ source->last_error = 0;
+ source->ev_queue = ev_queue;
+ source->wl_tbm_client = wl_tbm_client;
+ source->prepared = TPL_FALSE;
+ source->gfd.fd = wl_display_get_fd(display);
+ source->gfd.events = G_IO_IN | G_IO_ERR;
+ source->gfd.revents = 0;
+ g_mutex_init(&source->wl_event_mutex);
+
+ if (backend == TPL_BACKEND_WAYLAND_VULKAN_WSI ||
+ backend == TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD) {
+ source->is_vulkan_dpy = TPL_TRUE;
+
+ } else { /* wayland_egl backend */
+ /* These are not used. It just be initialized. */
+ source->is_vulkan_dpy = TPL_FALSE;
+ }
+
+ /* It will be changed to TPL_TRUE when zwp_linux_explicit_synchronization_v1
+ * succeeds to bind. */
+ source->use_explicit_sync = TPL_FALSE;
+
+ source->surface_capabilities.min_buffer = 2;
+ source->surface_capabilities.max_buffer = VK_CLIENT_QUEUE_SIZE;
+ source->surface_capabilities.present_modes =
+ TPL_DISPLAY_PRESENT_MODE_FIFO;
+ _twe_display_wayland_init(source);
+
+ source->disp_del_source = _twe_del_source_init(ctx, source);
+ source->disp_del_source->destroy_target_source_func
+ = _twe_thread_wl_disp_source_destroy;
+
+ g_source_set_callback(&source->gsource, NULL, display, NULL);
+ g_source_add_poll(&source->gsource, &source->gfd);
+ g_source_attach(&source->gsource, g_main_loop_get_context(ctx->twe_loop));
+
+ TPL_INFO("[DISPLAY_ADD]", "gsource(%p) ev_queue(%p) wl_display(%p)",
+ source, source->ev_queue, display);
+
+ return (twe_display_h)source;
+}
+
+tpl_result_t
+twe_display_del(twe_display_h twe_display)
+{
+ gboolean is_destroyed = FALSE;
+ twe_wl_disp_source *source = (twe_wl_disp_source *)twe_display;
+ twe_del_source *disp_del_source = NULL;
+
+ if (!source ||
+ (is_destroyed = g_source_is_destroyed(&source->gsource))) {
+ TPL_ERR("twe_display(%p) is invalid. | is_destroyed(%s)",
+ twe_display, (is_destroyed ? "TRUE" : "FALSE"));
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ disp_del_source = source->disp_del_source;
+
+ _twe_display_wayland_fini(source);
+
+ _twe_display_fini_wl_tbm_client(source->wl_tbm_client);
+ source->wl_tbm_client = NULL;
+
+ g_mutex_lock(&_twe_ctx->thread_mutex);
+
+ TPL_INFO("[DISPLAY_DEL]", "twe_display(%p) will be destroyed in thread",
+ twe_display);
+ _twe_thread_del_source_trigger(disp_del_source);
+ g_cond_wait(&_twe_ctx->thread_cond, &_twe_ctx->thread_mutex);
+ g_mutex_unlock(&_twe_ctx->thread_mutex);
+
+ _twe_del_source_fini(disp_del_source);
+
+ return TPL_ERROR_NONE;
+}
+
+tpl_result_t
+twe_display_lock(twe_display_h display)
+{
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)display;
+ if (!disp_source || g_source_is_destroyed(&disp_source->gsource)) {
+ TPL_ERR("Invalid parameter. display(%p)", display);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ g_mutex_lock(&disp_source->wl_event_mutex);
+
+ return TPL_ERROR_NONE;
+}
+
+void
+twe_display_unlock(twe_display_h display)
+{
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)display;
+ if (!disp_source || g_source_is_destroyed(&disp_source->gsource)) {
+ TPL_ERR("Invalid parameter. display(%p)", display);
+ return;
+ }
+
+ g_mutex_unlock(&disp_source->wl_event_mutex);
+}
+
+tpl_result_t
+twe_display_get_buffer_count(twe_display_h display,
+ int *min, int *max)
+{
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)display;
+ if (!disp_source || g_source_is_destroyed(&disp_source->gsource)) {
+ TPL_ERR("Invalid parameter. display(%p)", display);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (min) *min = disp_source->surface_capabilities.min_buffer;
+ if (max) *max = disp_source->surface_capabilities.max_buffer;
+
+ return TPL_ERROR_NONE;
+}
+
+tpl_result_t
+twe_display_get_present_mode(twe_display_h display,
+ int *present_modes)
+{
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)display;
+ if (!disp_source || g_source_is_destroyed(&disp_source->gsource)) {
+ TPL_ERR("Invalid parameter. display(%p)", display);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (present_modes) {
+ *present_modes = TPL_DISPLAY_PRESENT_MODE_MAILBOX |
+ TPL_DISPLAY_PRESENT_MODE_IMMEDIATE |
+ TPL_DISPLAY_PRESENT_MODE_FIFO |
+ TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED |
+ disp_source->surface_capabilities.present_modes;
+ }
+
+ return TPL_ERROR_NONE;
+}
+
+static struct tizen_private *
+_get_tizen_private(struct wl_egl_window * wl_egl_window)
+{
+ if (wl_egl_window && wl_egl_window->driver_private)
+ return (struct tizen_private *)wl_egl_window->driver_private;
+
+ return NULL;
+}
+
+static void
+__cb_destroy_callback(void *private)
+{
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ twe_wl_surf_source *surf_source = NULL;
+
+ if (!tizen_private) {
+ TPL_LOG_T(BACKEND, "[DESTROY_CB] Already destroyed surface");
+ return;
+ }
+
+ surf_source = (twe_wl_surf_source *)tizen_private->data;
+ if (surf_source) {
+ TPL_LOG_T(BACKEND, "[DESTROY_CB] wl_egl_window(%p) surf_source(%p)",
+ surf_source->wl_egl_window, surf_source);
+ g_mutex_lock(&surf_source->surf_mutex);
+ surf_source->wl_egl_window->destroy_window_callback = NULL;
+ surf_source->wl_egl_window->resize_callback = NULL;
+ surf_source->wl_egl_window->driver_private = NULL;
+ surf_source->wl_egl_window = NULL;
+ surf_source->surf = NULL;
+ surf_source->is_destroying = TPL_TRUE;
+
+ tizen_private->set_window_serial_callback = NULL;
+ tizen_private->rotate_callback = NULL;
+ tizen_private->get_rotation_capability = NULL;
+ tizen_private->data = NULL;
+ free(tizen_private);
+ tizen_private = NULL;
+ g_mutex_unlock(&surf_source->surf_mutex);
+ }
+
+ if (tizen_private) {
+ tizen_private->set_window_serial_callback = NULL;
+ tizen_private->rotate_callback = NULL;
+ tizen_private->get_rotation_capability = NULL;
+ tizen_private->data = NULL;
+ free(tizen_private);
+ tizen_private = NULL;
+ }
+}
+
+static void
+__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ twe_wl_surf_source *source = (twe_wl_surf_source *)tizen_private->data;
+ int cur_w, cur_h, req_w, req_h, format;
+
+ if (!source) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return;
+ }
+
+ format = tbm_surface_queue_get_format(source->tbm_queue);
+ cur_w = tbm_surface_queue_get_width(source->tbm_queue);
+ cur_h = tbm_surface_queue_get_height(source->tbm_queue);
+ req_w = wl_egl_window->width;
+ req_h = wl_egl_window->height;
+
+ TPL_LOG_T(BACKEND, "[RESIZE_CB] wl_egl_window(%p) (%dx%d) -> (%dx%d)",
+ wl_egl_window, cur_w, cur_h, req_w, req_h);
+
+ if (tbm_surface_queue_reset(source->tbm_queue, req_w, req_h, format)
+ != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to reset tbm_surface_queue(%p)", source->tbm_queue);
+ return;
+ }
+}
+
+static void
+__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ twe_wl_surf_source *source = (twe_wl_surf_source *)tizen_private->data;
+ int rotation = tizen_private->rotation;
+
+ if (!source) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return;
+ }
+
+ TPL_LOG_T(BACKEND, "[ROTATE_CB] wl_egl_window(%p) (%d) -> (%d)",
+ wl_egl_window, source->rotation, rotation);
+
+ source->rotation = rotation;
+
+ if (source->rotate_cb)
+ source->rotate_cb(source->cb_data);
+}
+
+static int
+__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
+ void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ twe_wl_surf_source *source = (twe_wl_surf_source *)tizen_private->data;
+
+ if (!source) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return rotation_capability;
+ }
+
+ if (source->rotation_capability == TPL_TRUE)
+ rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
+ else
+ rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
+
+
+ return rotation_capability;
+}
+
+static void
+__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
+ void *private, unsigned int serial)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ twe_wl_surf_source *source = (twe_wl_surf_source *)tizen_private->data;
+
+ if (!source) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return;
+ }
+
+ source->set_serial_is_used = TPL_TRUE;
+ source->serial = serial;
+}
+
+static int
+__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ twe_wl_surf_source *surf_source = NULL;
+
+ int commit_sync_fd = -1;
+
+ surf_source = (twe_wl_surf_source *)tizen_private->data;
+ if (!surf_source) {
+ TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source);
+ return -1;
+ }
+
+ g_mutex_lock(&surf_source->commit_sync.mutex);
+
+ if (surf_source->commit_sync.fd != -1) {
+ commit_sync_fd = dup(surf_source->commit_sync.fd);
+ TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
+ surf_source->commit_sync.fd, commit_sync_fd);
+ TPL_DEBUG("[DUP_COMMIT_SYNC] surf_source(%p) commit_sync_fd(%d) dup(%d)",
+ surf_source, surf_source->commit_sync.fd, commit_sync_fd);
+ g_mutex_unlock(&surf_source->commit_sync.mutex);
+ return commit_sync_fd;
+ }
+
+ surf_source->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
+ if (surf_source->commit_sync.fd == -1) {
+ TPL_ERR("Failed to create commit_sync_fd. twe_surface(%p)", surf_source);
+ g_mutex_unlock(&surf_source->commit_sync.mutex);
+ return -1;
+ }
+
+ commit_sync_fd = dup(surf_source->commit_sync.fd);
+
+ TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
+ surf_source->commit_sync.fd, commit_sync_fd);
+ TPL_DEBUG("[CREATE_COMMIT_SYNC] surf_source(%p) commit_sync_fd(%d)",
+ surf_source, commit_sync_fd);
+
+ g_mutex_unlock(&surf_source->commit_sync.mutex);
+
+ return commit_sync_fd;
+}
+
+static int
+__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ twe_wl_surf_source *surf_source = NULL;
+
+ int presentation_sync_fd = -1;
+
+ surf_source = (twe_wl_surf_source *)tizen_private->data;
+ if (!surf_source) {
+ TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source);
+ return -1;
+ }
+
+ g_mutex_lock(&surf_source->presentation_sync.mutex);
+ if (surf_source->presentation_sync.fd != -1) {
+ presentation_sync_fd = dup(surf_source->presentation_sync.fd);
+ TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
+ surf_source->presentation_sync.fd, presentation_sync_fd);
+ TPL_DEBUG("[DUP_PRESENTATION_SYNC] surf_source(%p) presentation_sync_fd(%d) dup(%d)",
+ surf_source, surf_source->presentation_sync.fd, presentation_sync_fd);
+ g_mutex_unlock(&surf_source->presentation_sync.mutex);
+ return presentation_sync_fd;
+ }
+
+ surf_source->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
+ if (surf_source->presentation_sync.fd == -1) {
+ TPL_ERR("Failed to create presentation_sync_fd. twe_surface(%p)", surf_source);
+ g_mutex_unlock(&surf_source->presentation_sync.mutex);
+ return -1;
+ }
+
+ presentation_sync_fd = dup(surf_source->presentation_sync.fd);
+ TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
+ surf_source->presentation_sync.fd, presentation_sync_fd);
+ TPL_DEBUG("[CREATE_PRESENTATION_SYNC] surf_source(%p) presentation_sync_fd(%d) dup(%d)",
+ surf_source, surf_source->presentation_sync.fd, presentation_sync_fd);
+
+ g_mutex_unlock(&surf_source->presentation_sync.mutex);
+
+ return presentation_sync_fd;
+}
+
+static void __cb_tss_flusher_flush_callback(void *data,
+ struct tizen_surface_shm_flusher *tss_flusher)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ TPL_LOG_T(BACKEND, "[FLUSH_CB] surf_source(%p)", surf_source);
+
+ if (surf_source->disp_source->is_vulkan_dpy) {
+ TPL_WARN("Vulkan do not support buffer flush");
+ return;
+ }
+
+ tsq_err = tbm_surface_queue_flush(surf_source->tbm_queue);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to free flush tbm_queue(%p)", surf_source->tbm_queue);
+ return;
+ }
+}
+
+static void __cb_tss_flusher_free_flush_callback(void *data,
+ struct tizen_surface_shm_flusher *tss_flusher)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ TPL_LOG_T(BACKEND, "[FREE_FLUSH_CB] surf_source(%p)", surf_source);
+
+ if (surf_source->disp_source->is_vulkan_dpy) {
+ TPL_WARN("Vulkan do not support buffer flush");
+ return;
+ }
+
+ tsq_err = tbm_surface_queue_free_flush(surf_source->tbm_queue);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to free flush tbm_queue(%p)", surf_source->tbm_queue);
+ return;
+ }
+}
+
+
+static const struct tizen_surface_shm_flusher_listener
+tss_flusher_listener = {
+ __cb_tss_flusher_flush_callback,
+ __cb_tss_flusher_free_flush_callback
+};
+
+void
+__cb_twe_buffer_free_callback(twe_wl_buffer_info *buf_info)
+{
+ twe_wl_surf_source *surf_source = buf_info->surf_source;
+ twe_wl_disp_source *disp_source = surf_source->disp_source;
+
+ TPL_INFO("[BUFFER_FREE]", "buf_info(%p) wl_buffer(%p) tbm_surface(%p)",
+ buf_info, buf_info->wl_buffer, buf_info->tbm_surface);
+
+ wl_display_flush(disp_source->disp);
+
+ if (buf_info->wl_buffer)
+ wayland_tbm_client_destroy_buffer(disp_source->wl_tbm_client,
+ (void *)buf_info->wl_buffer);
+
+ if (buf_info->commit_sync_fd != -1) {
+ int ret = _write_to_eventfd(buf_info->commit_sync_fd);
+ if (ret == -1)
+ TPL_ERR("Failed to send commit_sync signal to fd(%d)",
+ buf_info->commit_sync_fd);
+ close(buf_info->commit_sync_fd);
+ buf_info->commit_sync_fd = -1;
+ }
+
+ if (buf_info->presentation_sync_fd != -1) {
+ int ret = _write_to_eventfd(buf_info->presentation_sync_fd);
+ if (ret == -1)
+ TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+ buf_info->presentation_sync_fd);
+ close(buf_info->presentation_sync_fd);
+ buf_info->presentation_sync_fd = -1;
+
+ if (buf_info->presentation_feedback)
+ wp_presentation_feedback_destroy(buf_info->presentation_feedback);
+ buf_info->presentation_feedback = NULL;
+ }
+
+ if (buf_info->sync_timeline != -1) {
+ close(buf_info->sync_timeline);
+ buf_info->sync_timeline = -1;
+ }
+
+ if (buf_info->rects) {
+ free(buf_info->rects);
+ buf_info->rects = NULL;
+ buf_info->num_rects = 0;
+ }
+
+ buf_info->tbm_surface = NULL;
+
+ free(buf_info);
+}
+
+static void
+__cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer)
+{
+ twe_wl_buffer_info *buf_info = NULL;
+ tbm_surface_h tbm_surface = (tbm_surface_h)data;
+
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+
+ if (buf_info && buf_info->need_to_release) {
+ twe_wl_surf_source *surf_source = buf_info->surf_source;
+ tbm_surface_queue_error_e tsq_err;
+
+ if (buf_info->sync_fd == -1) {
+ tsq_err = tbm_surface_queue_release(surf_source->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+ }
+
+ if (surf_source->committed_buffers) {
+ g_mutex_lock(&surf_source->surf_mutex);
+ __tpl_list_remove_data(surf_source->committed_buffers,
+ (void *)tbm_surface,
+ TPL_FIRST, NULL);
+ g_mutex_unlock(&surf_source->surf_mutex);
+ }
+
+ buf_info->need_to_release = TPL_FALSE;
+
+ TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ TPL_LOG_T(BACKEND, "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ buf_info->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+ tbm_surface_internal_unref(tbm_surface);
+
+ }
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+ }
+}
+
+static const struct wl_buffer_listener wl_buffer_release_listener = {
+ (void *)__cb_buffer_release_callback,
+};
+
+static void
+__cb_buffer_fenced_release(void *data,
+ struct zwp_linux_buffer_release_v1 *release, int32_t fence)
+{
+ twe_wl_buffer_info *buf_info = NULL;
+ tbm_surface_h tbm_surface = (tbm_surface_h)data;
+
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+
+ if (buf_info && buf_info->need_to_release) {
+ twe_wl_surf_source *surf_source = buf_info->surf_source;
+ tbm_surface_queue_error_e tsq_err;
+
+ if (surf_source->committed_buffers) {
+ g_mutex_lock(&surf_source->surf_mutex);
+ __tpl_list_remove_data(surf_source->committed_buffers,
+ (void *)tbm_surface,
+ TPL_FIRST, NULL);
+ g_mutex_unlock(&surf_source->surf_mutex);
+ }
+
+ buf_info->need_to_release = TPL_FALSE;
+
+ zwp_linux_buffer_release_v1_destroy(buf_info->buffer_release);
+ buf_info->buffer_release = NULL;
+ buf_info->release_fence_fd = fence;
+
+ TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
+ _get_tbm_surface_bo_name(tbm_surface),
+ fence);
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ TPL_LOG_T(BACKEND,
+ "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ buf_info->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface),
+ fence);
+
+ tsq_err = tbm_surface_queue_release(surf_source->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+ tbm_surface_internal_unref(tbm_surface);
+ }
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+ }
+}
+
+static void
+__cb_buffer_immediate_release(void *data,
+ struct zwp_linux_buffer_release_v1 *release)
+{
+ twe_wl_buffer_info *buf_info = NULL;
+ tbm_surface_h tbm_surface = (tbm_surface_h)data;
+
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+
+ if (buf_info && buf_info->need_to_release) {
+ twe_wl_surf_source *surf_source = buf_info->surf_source;
+ tbm_surface_queue_error_e tsq_err;
+
+ if (surf_source->committed_buffers) {
+ g_mutex_lock(&surf_source->surf_mutex);
+ __tpl_list_remove_data(surf_source->committed_buffers,
+ (void *)tbm_surface,
+ TPL_FIRST, NULL);
+ g_mutex_unlock(&surf_source->surf_mutex);
+ }
+
+ buf_info->need_to_release = TPL_FALSE;
+
+ zwp_linux_buffer_release_v1_destroy(buf_info->buffer_release);
+ buf_info->buffer_release = NULL;
+ buf_info->release_fence_fd = -1;
+
+ TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ TPL_LOG_T(BACKEND,
+ "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ buf_info->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ tsq_err = tbm_surface_queue_release(surf_source->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+ tbm_surface_internal_unref(tbm_surface);
+ }
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+ }
+}
+
+static const struct zwp_linux_buffer_release_v1_listener explicit_sync_release_listner = {
+ __cb_buffer_fenced_release,
+ __cb_buffer_immediate_release,
+};
+
+static void
+_twe_surface_set_wl_buffer_info(twe_wl_surf_source *surf_source,
+ tbm_surface_h tbm_surface)
+{
+ twe_wl_buffer_info *buf_info = NULL;
+ struct wl_egl_window *wl_egl_window = NULL;
+ struct tizen_private *tizen_private = NULL;
+
+ if (!surf_source || g_source_is_destroyed(&surf_source->gsource)) {
+ TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source);
+ return;
+ }
+
+ wl_egl_window = surf_source->wl_egl_window;
+ tizen_private = _get_tizen_private(wl_egl_window);
+
+ if (!tbm_surface || !tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface);
+ return;
+ }
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ /* If buf_info is already existed, reuse it. */
+ if (buf_info) {
+ if (tizen_private) {
+ if (buf_info->w_transform != tizen_private->window_transform) {
+ buf_info->w_transform = tizen_private->window_transform;
+ buf_info->w_rotated = TPL_TRUE;
+ }
+
+ buf_info->transform = tizen_private->transform;
+ buf_info->dx = wl_egl_window->dx;
+ buf_info->dy = wl_egl_window->dy;
+ if (surf_source->set_serial_is_used) {
+ buf_info->serial = surf_source->serial;
+ } else {
+ ++tizen_private->serial;
+ buf_info->serial = tizen_private->serial;
+ }
+ }
+
+ if (buf_info->rects) {
+ free(buf_info->rects);
+ buf_info->rects = NULL;
+ buf_info->num_rects = 0;
+ }
+
+ buf_info->draw_done = TPL_FALSE;
+ buf_info->need_to_commit = TPL_TRUE;
+ buf_info->sync_fd = -1;
+ buf_info->acquire_fence_fd = -1;
+ buf_info->commit_sync_fd = -1;
+
+ buf_info->presentation_sync_fd = -1;
+ buf_info->presentation_feedback = NULL;
+
+ if (surf_source->in_use_buffers) {
+ g_mutex_lock(&surf_source->surf_mutex);
+ __tpl_list_push_back(surf_source->in_use_buffers,
+ (void *)tbm_surface);
+ g_mutex_unlock(&surf_source->surf_mutex);
+ }
+
+ TRACE_MARK("[SET_BUFFER_INFO] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ TPL_LOG_T(BACKEND,
+ "[REUSE_BUF] buf_info(%p) tbm_surface(%p) bo(%d) (%dx%d) "
+ "transform(%d) w_transform(%d)",
+ buf_info, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface),
+ buf_info->width, buf_info->height,
+ buf_info->transform, buf_info->w_transform);
+ return;
+ }
+
+ buf_info = (twe_wl_buffer_info *)calloc(1, sizeof(twe_wl_buffer_info));
+ if (!buf_info) {
+ TPL_ERR("Failed to allocate memory for twe_wl_buffer_info.");
+ return;
+ }
+
+ buf_info->wl_buffer =
+ (struct wl_proxy *)wayland_tbm_client_create_buffer(
+ surf_source->disp_source->wl_tbm_client, tbm_surface);
+
+ if (!buf_info->wl_buffer) {
+ TPL_ERR("Failed to create wl_buffer from tbm_surface(%p)",
+ tbm_surface);
+ free(buf_info);
+ return;
+ }
+
+ if (wl_egl_window && tizen_private) {
+ buf_info->dx = wl_egl_window->dx;
+ buf_info->dy = wl_egl_window->dy;
+ buf_info->width = wl_egl_window->width;
+ buf_info->height = wl_egl_window->height;
+
+ if (buf_info->w_transform != tizen_private->window_transform) {
+ buf_info->w_transform = tizen_private->window_transform;
+ buf_info->w_rotated = TPL_TRUE;
+ }
+
+ buf_info->transform = tizen_private->transform;
+
+ if (surf_source->set_serial_is_used) {
+ buf_info->serial = surf_source->serial;
+ } else {
+ ++tizen_private->serial;
+ buf_info->serial = tizen_private->serial;
+ }
+
+ if (surf_source->in_use_buffers) {
+ g_mutex_lock(&surf_source->surf_mutex);
+ __tpl_list_push_back(surf_source->in_use_buffers,
+ (void *)tbm_surface);
+ g_mutex_unlock(&surf_source->surf_mutex);
+ }
+ } else {
+ buf_info->dx = 0;
+ buf_info->dy = 0;
+ buf_info->width = surf_source->swapchain_properties.width;
+ buf_info->height = surf_source->swapchain_properties.height;
+ buf_info->w_transform = 0;
+ buf_info->w_rotated = TPL_FALSE;
+ buf_info->transform = 0;
+ buf_info->serial = 0;
+ }
+
+ buf_info->sync_timestamp = 0;
+ buf_info->surf_source = surf_source;
+ buf_info->num_rects = 0;
+ buf_info->rects = NULL;
+ buf_info->need_to_commit = TPL_TRUE;
+ buf_info->draw_done = TPL_FALSE;
+ buf_info->tbm_surface = tbm_surface;
+ buf_info->sync_fd = -1;
+ buf_info->sync_timeline = -1;
+ buf_info->is_vk_image = surf_source->disp_source->is_vulkan_dpy;
+ buf_info->release_fence_fd = -1;
+ buf_info->acquire_fence_fd = -1;
+ buf_info->commit_sync_fd = -1;
+ buf_info->presentation_sync_fd = -1;
+ buf_info->presentation_feedback = NULL;
+
+
+ wl_buffer_add_listener((void *)buf_info->wl_buffer,
+ &wl_buffer_release_listener, tbm_surface);
+
+ if (buf_info->is_vk_image) {
+ buf_info->sync_timeline = tbm_sync_timeline_create();
+ if (buf_info->sync_timeline == -1) {
+ char buf[1024];
+ strerror_r(errno, buf, sizeof(buf));
+ TPL_WARN("Failed to create TBM sync timeline: %d(%s)", errno, buf);
+ }
+
+ wayland_tbm_client_set_sync_timeline(surf_source->disp_source->wl_tbm_client,
+ (void *)buf_info->wl_buffer,
+ buf_info->sync_timeline);
+ }
+
+ tbm_surface_internal_add_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (tbm_data_free)__cb_twe_buffer_free_callback);
+ tbm_surface_internal_set_user_data(tbm_surface, KEY_BUFFER_INFO,
+ buf_info);
+
+ TRACE_MARK("[SET_BUFFER_INFO] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ TPL_INFO("[NEW_BUFFER_CREATED]",
+ "buf_info(%p) tbm_surface(%p) bo(%d) (%dx%d) transform(%d) w_transform(%d)",
+ buf_info, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface),
+ buf_info->width, buf_info->height,
+ buf_info->transform, buf_info->w_transform);
+}
+
+static void
+_twe_surface_cancel_dequeued_buffer(twe_wl_surf_source *surf_source,
+ tbm_surface_h tbm_surface)
+{
+ if (!surf_source) {
+ TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source);
+ return;
+ }
+
+ TPL_LOG_T(BACKEND,
+ "[CANCEL_BUFFER] Stop tracking of canceled tbm_surface(%p)",
+ tbm_surface);
+
+ if (surf_source->in_use_buffers) {
+ g_mutex_lock(&surf_source->surf_mutex);
+ /* Stop tracking of this canceled tbm_surface */
+ __tpl_list_remove_data(surf_source->in_use_buffers,
+ (void *)tbm_surface, TPL_FIRST, NULL);
+ g_mutex_unlock(&surf_source->surf_mutex);
+ }
+}
+
+static void
+_twe_surface_trace_enqueue_buffer(twe_wl_surf_source *surf_source,
+ tbm_surface_h tbm_surface)
+{
+ twe_wl_buffer_info *buf_info = NULL;
+
+ if (!surf_source) {
+ TPL_ERR("Invalid parameter. twe_surface(%p)", surf_source);
+ return;
+ }
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (buf_info) {
+ g_mutex_lock(&surf_source->commit_sync.mutex);
+ buf_info->commit_sync_fd = surf_source->commit_sync.fd;
+ surf_source->commit_sync.fd = -1;
+ TRACE_ASYNC_BEGIN(buf_info->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ g_mutex_unlock(&surf_source->commit_sync.mutex);
+
+ g_mutex_lock(&surf_source->presentation_sync.mutex);
+ buf_info->presentation_sync_fd = surf_source->presentation_sync.fd;
+ surf_source->presentation_sync.fd = -1;
+ g_mutex_unlock(&surf_source->presentation_sync.mutex);
+ }
+
+ if (surf_source->in_use_buffers) {
+ g_mutex_lock(&surf_source->surf_mutex);
+ /* Stop tracking of this canceled tbm_surface */
+ __tpl_list_remove_data(surf_source->in_use_buffers,
+ (void *)tbm_surface, TPL_FIRST, NULL);
+ g_mutex_unlock(&surf_source->surf_mutex);
+ }
+}
+
+static void
+__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
+ void *data)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
+
+ if (!surf_source || g_source_is_destroyed(&surf_source->gsource)) {
+ TPL_ERR("Invalid parameter. surf_source(%p)", surf_source);
+ return;
+ }
+
+ surf_source->swapchain_properties.width =
+ tbm_surface_queue_get_width(tbm_queue);
+ surf_source->swapchain_properties.height =
+ tbm_surface_queue_get_height(tbm_queue);
+ surf_source->swapchain_properties.buffer_count =
+ tbm_surface_queue_get_size(tbm_queue);
+ surf_source->format = tbm_surface_queue_get_format(tbm_queue);
+
+ g_mutex_lock(&surf_source->free_queue_mutex);
+ g_cond_signal(&surf_source->free_queue_cond);
+ g_mutex_unlock(&surf_source->free_queue_mutex);
+
+ TPL_LOG_T(BACKEND, "tbm_queue(%p) has been reset!", tbm_queue);
+}
+
+static void
+__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h surface_queue,
+ void *data)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
+ uint64_t value = 1;
+ int ret;
+
+ g_mutex_lock(&surf_source->surf_mutex);
+
+ ret = write(surf_source->event_fd, &value, sizeof(uint64_t));
+ if (ret == -1) {
+ TPL_ERR("failed to send acquirable event. twe_wl_surf_source(%p)",
+ surf_source);
+ g_mutex_unlock(&surf_source->surf_mutex);
+ return;
+ }
+
+ g_mutex_unlock(&surf_source->surf_mutex);
+}
+
+static void __cb_tbm_queue_trace_callback(tbm_surface_queue_h tbm_queue,
+ tbm_surface_h tbm_surface,
+ tbm_surface_queue_trace trace,
+ void *data)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
+
+ switch (trace) {
+ case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
+ _twe_surface_set_wl_buffer_info(surf_source, tbm_surface);
+ break;
+ case TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE:
+ _twe_surface_cancel_dequeued_buffer(surf_source, tbm_surface);
+ break;
+ case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
+ _twe_surface_trace_enqueue_buffer(surf_source, tbm_surface);
+ break;
+ default:
+ break;
+ }
+}
+
+static void __cb_tbm_queue_dequeueable_callback(tbm_surface_queue_h tbm_queue,
+ void *data)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)data;
+
+ if (!surf_source || g_source_is_destroyed(&surf_source->gsource)) {
+ TPL_ERR("Invalid parameter. surf_source(%p)", surf_source);
+ return;
+ }
+
+ g_mutex_lock(&surf_source->free_queue_mutex);
+
+ TPL_LOG_T(BACKEND, "[DEQUEUEABLE_CB] surf_source(%p) tbm_queue(%p)",
+ surf_source, surf_source->tbm_queue);
+
+ g_cond_signal(&surf_source->free_queue_cond);
+ g_mutex_unlock(&surf_source->free_queue_mutex);
+}
+
+static void
+_twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source,
+ tbm_surface_h tbm_surface);
+static void
+__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
+ unsigned int sequence, unsigned int tv_sec,
+ unsigned int tv_usec, void *user_data)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)user_data;
+ twe_wl_disp_source *disp_source = NULL;
+
+ if (!surf_source) {
+ TPL_ERR("Invalid parameter. user_data(%p)", user_data);
+ return;
+ }
+
+ if (g_source_is_destroyed(&surf_source->gsource)) {
+ TPL_WARN("surf_source already destroyed.");
+ return;
+ }
+
+ TRACE_ASYNC_END((int)surf_source, "WAIT_VBLANK");
+
+ if (error == TDM_ERROR_TIMEOUT)
+ TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. surf_source(%p)",
+ surf_source);
+
+ disp_source = surf_source->disp_source;
+
+ surf_source->vblank_done = TPL_TRUE;
+
+ g_mutex_lock(&surf_source->surf_mutex);
+ if (!disp_source->is_vulkan_dpy) {
+ if (surf_source->vblank_waiting_buffers) {
+ tbm_surface_h tbm_surface = NULL;
+ tbm_surface = (tbm_surface_h)__tpl_list_pop_front(
+ surf_source->vblank_waiting_buffers,
+ NULL);
+ if (tbm_surface)
+ _twe_thread_wl_surface_commit(surf_source, tbm_surface);
+ }
+ } else {
+ switch (surf_source->swapchain_properties.present_mode) {
+ case TPL_DISPLAY_PRESENT_MODE_MAILBOX:
+ if (surf_source->draw_done_buffer) {
+ _twe_thread_wl_vk_surface_commit(surf_source,
+ surf_source->draw_done_buffer);
+ surf_source->draw_done_buffer = NULL;
+ }
+ break;
+
+ case TPL_DISPLAY_PRESENT_MODE_FIFO:
+ case TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED:
+ if (surf_source->vblank_waiting_buffers) {
+ tbm_surface_h tbm_surface = NULL;
+ tbm_surface = (tbm_surface_h)__tpl_list_pop_front(
+ surf_source->vblank_waiting_buffers,
+ NULL);
+ if (tbm_surface)
+ _twe_thread_wl_vk_surface_commit(surf_source, tbm_surface);
+ }
+
+ break;
+ }
+ }
+ g_mutex_unlock(&surf_source->surf_mutex);
+}
+
+static tdm_client_vblank*
+_twe_surface_create_vblank(tdm_client *tdm_client);
+
+static tpl_result_t
+_twe_surface_wait_vblank(twe_wl_surf_source *surf_source)
+{
+ tdm_error tdm_err = TDM_ERROR_NONE;
+
+ if (!_twe_ctx->tdm_source) {
+ TPL_WARN("tdm_vblank feature is disabled.");
+
+ if (surf_source->vblank) {
+ tdm_client_vblank_destroy(surf_source->vblank);
+ surf_source->vblank = NULL;
+ surf_source->vblank_done = TPL_TRUE;
+ }
+
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (!surf_source->vblank) {
+ surf_source->vblank =
+ _twe_surface_create_vblank(_twe_ctx->tdm_source->tdm_client);
+ if (!surf_source->vblank) {
+ TPL_WARN("Failed to create vblank. surf_source(%p)",
+ surf_source);
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ tdm_err = tdm_client_vblank_wait(surf_source->vblank,
+ surf_source->post_interval, /* TODO: interval */
+ __cb_tdm_client_vblank,
+ (void *)surf_source);
+
+ if (tdm_err == TDM_ERROR_NONE) {
+ surf_source->vblank_done = TPL_FALSE;
+ TRACE_ASYNC_BEGIN((int)surf_source, "WAIT_VBLANK");
+ } else {
+ TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ return TPL_ERROR_NONE;
+}
+
+static void
+_twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source,
+ tbm_surface_h tbm_surface)
+{
+ twe_wl_buffer_info *buf_info = NULL;
+ struct wl_surface *wl_surface = surf_source->surf;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ uint32_t version;
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (!buf_info) {
+ TPL_ERR("Failed to get wl_buffer_info from tbm_surface(%p)",
+ tbm_surface);
+ return;
+ }
+
+ version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
+ wl_surface_attach(wl_surface, (void *)buf_info->wl_buffer,
+ 0, 0);
+
+ if (buf_info->num_rects < 1 || buf_info->rects == NULL) {
+ if (version < 4) {
+ wl_surface_damage(wl_surface, 0, 0,
+ surf_source->swapchain_properties.width,
+ surf_source->swapchain_properties.height);
+ } else {
+ wl_surface_damage_buffer(wl_surface, 0, 0,
+ surf_source->swapchain_properties.width,
+ surf_source->swapchain_properties.height);
+ }
+ } else {
+ int i;
+ for (i = 0; i < buf_info->num_rects; i++) {
+ if (version < 4) {
+ wl_surface_damage(wl_surface,
+ buf_info->rects[i * 4 + 0],
+ buf_info->rects[i * 4 + 1],
+ buf_info->rects[i * 4 + 2],
+ buf_info->rects[i * 4 + 3]);
+ } else {
+ wl_surface_damage_buffer(wl_surface,
+ buf_info->rects[i * 4 + 0],
+ buf_info->rects[i * 4 + 1],
+ buf_info->rects[i * 4 + 2],
+ buf_info->rects[i * 4 + 3]);
+ }
+ }
+ }
+
+ /* Dependent on wl_buffer release event. */
+ buf_info->need_to_release = TPL_TRUE;
+
+ wl_surface_commit(wl_surface);
+
+ wl_display_flush(surf_source->disp_source->disp);
+
+ TRACE_MARK("[COMMIT] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
+ TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ buf_info->sync_timestamp++;
+
+ TPL_LOG_T(BACKEND, "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ buf_info->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ if (surf_source->swapchain_properties.present_mode
+ == TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED ||
+ surf_source->swapchain_properties.present_mode
+ == TPL_DISPLAY_PRESENT_MODE_FIFO) {
+ if ((_twe_ctx->tdm_source || surf_source->vblank) &&
+ _twe_surface_wait_vblank(surf_source) != TPL_ERROR_NONE)
+ TPL_ERR("Failed to set wait vblank");
+ }
+
+ if (surf_source->committed_buffers) {
+ __tpl_list_push_back(surf_source->committed_buffers, tbm_surface);
+ }
+
+ /* Presented buffer's sync operating dependent on tdm timeline fence. */
+ if (buf_info->sync_fd != -1) {
+ TPL_LOG_T(BACKEND, "[RELEASE_IMMEDIATELY] tbm_surface(%p) bo(%d) sync_fd(%d)",
+ tbm_surface, _get_tbm_surface_bo_name(tbm_surface),
+ buf_info->sync_fd);
+ TRACE_MARK("[RELEASE_IMMEDIATELY] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release tbm_surface(%p) when vk_surface_commit.",
+ tbm_surface);
+ }
+}
+
+static void
+__cb_presentation_feedback_sync_output(void *data,
+ struct wp_presentation_feedback *presentation_feedback,
+ struct wl_output *output)
+{
+ TPL_IGNORE(data);
+ TPL_IGNORE(presentation_feedback);
+ TPL_IGNORE(output);
+}
+
+static void
+__cb_presentation_feedback_presented(void *data,
+ struct wp_presentation_feedback *presentation_feedback,
+ uint32_t tv_sec_hi,
+ uint32_t tv_sec_lo,
+ uint32_t tv_nsec,
+ uint32_t refresh_nsec,
+ uint32_t seq_hi,
+ uint32_t seq_lo,
+ uint32_t flags)
+{
+ TPL_IGNORE(tv_sec_hi);
+ TPL_IGNORE(tv_sec_lo);
+ TPL_IGNORE(tv_nsec);
+ TPL_IGNORE(refresh_nsec);
+ TPL_IGNORE(seq_hi);
+ TPL_IGNORE(seq_lo);
+ TPL_IGNORE(flags);
+
+ tbm_surface_h tbm_surface = (tbm_surface_h)data;
+ twe_wl_buffer_info *buf_info = NULL;
+ twe_wl_surf_source *surf_source = NULL;
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (!buf_info) {
+ TPL_ERR("Failed to get twe_wl_buffer_info from tbm_surface(%p)",
+ tbm_surface);
+ return;
+ }
+
+ surf_source = buf_info->surf_source;
+
+ g_mutex_lock(&surf_source->presentation_sync.mutex);
+
+ TPL_DEBUG("[FEEDBACK][PRESENTED] surf_source(%p) tbm_surface(%p) bo(%d)",
+ surf_source, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ if (buf_info->presentation_sync_fd != -1) {
+ int ret = _write_to_eventfd(buf_info->presentation_sync_fd);
+ if (ret == -1) {
+ TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+ buf_info->presentation_sync_fd);
+ }
+
+ TRACE_ASYNC_END(buf_info->presentation_sync_fd,
+ "[PRESENTATION_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ close(buf_info->presentation_sync_fd);
+ buf_info->presentation_sync_fd = -1;
+ }
+
+ if (buf_info->presentation_feedback)
+ wp_presentation_feedback_destroy(buf_info->presentation_feedback);
+
+ buf_info->presentation_feedback = NULL;
+
+ __tpl_list_remove_data(surf_source->presentation_feedbacks, tbm_surface,
+ TPL_FIRST, NULL);
+
+ g_mutex_unlock(&surf_source->presentation_sync.mutex);
+}
+
+static void
+__cb_presentation_feedback_discarded(void *data,
+ struct wp_presentation_feedback *presentation_feedback)
+{
+ tbm_surface_h tbm_surface = (tbm_surface_h)data;
+ twe_wl_buffer_info *buf_info = NULL;
+ twe_wl_surf_source *surf_source = NULL;
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (!buf_info) {
+ TPL_ERR("Failed to get twe_wl_buffer_info from tbm_surface(%p)",
+ tbm_surface);
+ return;
+ }
+
+ surf_source = buf_info->surf_source;
+
+ g_mutex_lock(&surf_source->presentation_sync.mutex);
+
+ TPL_DEBUG("[FEEDBACK][DISCARDED] surf_source(%p) tbm_surface(%p) bo(%d)",
+ surf_source, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ if (buf_info->presentation_sync_fd != -1) {
+ int ret = _write_to_eventfd(buf_info->presentation_sync_fd);
+ if (ret == -1) {
+ TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+ buf_info->presentation_sync_fd);
+ }
+
+ TRACE_ASYNC_END(buf_info->presentation_sync_fd,
+ "[PRESENTATION_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ close(buf_info->presentation_sync_fd);
+ buf_info->presentation_sync_fd = -1;
+ }
+
+ if (buf_info->presentation_feedback)
+ wp_presentation_feedback_destroy(buf_info->presentation_feedback);
+
+ buf_info->presentation_feedback = NULL;
+
+ __tpl_list_remove_data(surf_source->presentation_feedbacks, tbm_surface,
+ TPL_FIRST, NULL);
+
+ g_mutex_unlock(&surf_source->presentation_sync.mutex);
+}
+
+static const struct wp_presentation_feedback_listener feedback_listener = {
+ __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
+ __cb_presentation_feedback_presented,
+ __cb_presentation_feedback_discarded
+};
+
+static void
+_twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source,
+ tbm_surface_h tbm_surface)
+{
+ twe_wl_buffer_info *buf_info = NULL;
+ twe_wl_disp_source *disp_source = surf_source->disp_source;
+ struct wl_surface *wl_surface = surf_source->surf;
+ struct wl_egl_window *wl_egl_window = surf_source->wl_egl_window;
+ uint32_t version;
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (!buf_info) {
+ TPL_ERR("Failed to get twe_wl_buffer_info from tbm_surface(%p)",
+ tbm_surface);
+ return;
+ }
+
+ version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
+
+ g_mutex_lock(&surf_source->presentation_sync.mutex);
+ if (disp_source->presentation && buf_info->presentation_sync_fd != -1) {
+ buf_info->presentation_feedback =
+ wp_presentation_feedback(disp_source->presentation,
+ wl_surface);
+ wp_presentation_feedback_add_listener(buf_info->presentation_feedback,
+ &feedback_listener, tbm_surface);
+ __tpl_list_push_back(surf_source->presentation_feedbacks, tbm_surface);
+ TRACE_ASYNC_BEGIN(buf_info->presentation_sync_fd,
+ "[PRESENTATION_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ }
+ g_mutex_unlock(&surf_source->presentation_sync.mutex);
+
+ if (buf_info->w_rotated == TPL_TRUE) {
+ wayland_tbm_client_set_buffer_transform(
+ disp_source->wl_tbm_client,
+ (void *)buf_info->wl_buffer,
+ buf_info->w_transform);
+ buf_info->w_rotated = TPL_FALSE;
+ }
+
+ if (surf_source->latest_transform != buf_info->transform) {
+ surf_source->latest_transform = buf_info->transform;
+ wl_surface_set_buffer_transform(wl_surface, buf_info->transform);
+ }
+
+ if (wl_egl_window) {
+ wl_egl_window->attached_width = buf_info->width;
+ wl_egl_window->attached_height = buf_info->height;
+ }
+
+ wl_surface_attach(wl_surface, (void *)buf_info->wl_buffer,
+ buf_info->dx, buf_info->dy);
+
+ if (buf_info->num_rects < 1 || buf_info->rects == NULL) {
+ if (version < 4) {
+ wl_surface_damage(wl_surface,
+ buf_info->dx, buf_info->dy,
+ buf_info->width, buf_info->height);
+ } else {
+ wl_surface_damage_buffer(wl_surface,
+ 0, 0,
+ buf_info->width, buf_info->height);
+ }
+ } else {
+ int i;
+ for (i = 0; i < buf_info->num_rects; i++) {
+ int inverted_y =
+ buf_info->height - (buf_info->rects[i * 4 + 1] +
+ buf_info->rects[i * 4 + 3]);
+ if (version < 4) {
+ wl_surface_damage(wl_surface,
+ buf_info->rects[i * 4 + 0],
+ inverted_y,
+ buf_info->rects[i * 4 + 2],
+ buf_info->rects[i * 4 + 3]);
+ } else {
+ wl_surface_damage_buffer(wl_surface,
+ buf_info->rects[i * 4 + 0],
+ inverted_y,
+ buf_info->rects[i * 4 + 2],
+ buf_info->rects[i * 4 + 3]);
+ }
+ }
+ }
+ wayland_tbm_client_set_buffer_serial(disp_source->wl_tbm_client,
+ (void *)buf_info->wl_buffer,
+ buf_info->serial);
+
+ buf_info->need_to_release = TPL_TRUE;
+
+ if (surf_source->disp_source->use_explicit_sync &&
+ surf_source->use_surface_sync) {
+
+ zwp_linux_surface_synchronization_v1_set_acquire_fence(surf_source->surface_sync,
+ buf_info->acquire_fence_fd);
+ TPL_DEBUG("[SET_ACQUIRE_FENCE] surf_source(%p) tbm_surface(%p) acquire_fence(%d)",
+ surf_source, tbm_surface, buf_info->acquire_fence_fd);
+ close(buf_info->acquire_fence_fd);
+ buf_info->acquire_fence_fd = -1;
+
+ buf_info->buffer_release =
+ zwp_linux_surface_synchronization_v1_get_release(surf_source->surface_sync);
+ if (!buf_info->buffer_release) {
+ TPL_ERR("Failed to get buffer_release. twe_surface(%p)", surf_source);
+ } else {
+ zwp_linux_buffer_release_v1_add_listener(
+ buf_info->buffer_release, &explicit_sync_release_listner, tbm_surface);
+ TPL_DEBUG("add explicit_sync_release_listener.");
+ }
+ }
+
+ wl_surface_commit(wl_surface);
+
+ wl_display_flush(surf_source->disp_source->disp);
+
+ TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ buf_info->need_to_commit = TPL_FALSE;
+
+ TPL_LOG_T(BACKEND, "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ buf_info->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ if ((_twe_ctx->tdm_source || surf_source->vblank) &&
+ _twe_surface_wait_vblank(surf_source) != TPL_ERROR_NONE)
+ TPL_ERR("Failed to set wait vblank.");
+
+
+ if (surf_source->committed_buffers) {
+ __tpl_list_push_back(surf_source->committed_buffers, tbm_surface);
+ }
+
+ g_mutex_lock(&surf_source->commit_sync.mutex);
+
+ if (buf_info->commit_sync_fd != -1) {
+ int ret = _write_to_eventfd(buf_info->commit_sync_fd);
+ if (ret == -1) {
+ TPL_ERR("Failed to send commit_sync signal to fd(%d)", buf_info->commit_sync_fd);
+ }
+
+ TRACE_ASYNC_END(buf_info->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ TPL_DEBUG("[COMMIT_SYNC][SEND] surf_source(%p) commit_sync_fd(%d)",
+ surf_source, buf_info->commit_sync_fd);
+
+ close(buf_info->commit_sync_fd);
+ buf_info->commit_sync_fd = -1;
+ }
+
+ g_mutex_unlock(&surf_source->commit_sync.mutex);
+}
+
+/* The following function _twe_thread_wl_surface_acquire_and_commit can be
+ * called in both situations.
+ * One is when acquirable event is received from the main thread,
+ * and the other is when __cb_tdm_client_vblank callback is called.
+ * The reason for calling the next function in the two situations described
+ * above is to make only one commit for one vblank.
+ */
+static void
+_twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source)
+{
+ twe_wl_disp_source *disp_source = surf_source->disp_source;
+ tbm_surface_h tbm_surface = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ twe_wl_buffer_info *buf_info = NULL;
+
+ if (surf_source->is_destroying) {
+ TPL_WARN("surf_source(%p) native window is already destroyed.",
+ surf_source);
+ return;
+ }
+
+ /* If there are multiple buffers in the drity_queue of tbm_queue
+ * as render done state, this function should decide whether
+ * to commit or pending, depending on whether vblank_done
+ * after acquire as much as possible. */
+ while (tbm_surface_queue_can_acquire(surf_source->tbm_queue, 0)) {
+
+ /* queue_acquire should be performed only when render_done_cnt
+ * is greater than 0 when using sync_fence even in the case of
+ * queue_can_acquire. */
+ if (surf_source->use_sync_fence && !(surf_source->render_done_cnt > 0)) {
+ return;
+ }
+
+ tsq_err = tbm_surface_queue_acquire(surf_source->tbm_queue, &tbm_surface);
+ if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to acquire from tbm_queue(%p)",
+ surf_source->tbm_queue);
+ return;
+ }
+
+ surf_source->render_done_cnt--;
+
+ tbm_surface_internal_ref(tbm_surface);
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+
+ if (!disp_source->is_vulkan_dpy) { /* wayland_egl */
+ if (surf_source->vblank_done) {
+ TPL_LOG_T(BACKEND, "[ACQ] tbm_surface(%p) bo(%d)",
+ tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ _twe_thread_wl_surface_commit(surf_source, tbm_surface);
+ } else {
+ /* If the current surface needs to wait for vblank
+ * to commit after acquire, keep the acquired buffer
+ * in the vblank_waiting_buffers list. */
+ if (surf_source->vblank_waiting_buffers) {
+ __tpl_list_push_back(surf_source->vblank_waiting_buffers,
+ (void *)tbm_surface);
+ TPL_LOG_T(BACKEND,
+ "[ACQ][COMMIT_PENDING] tbm_surface(%p) bo(%d)",
+ tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+ } else {
+ _twe_thread_wl_surface_commit(surf_source, tbm_surface);
+ }
+ }
+
+ } else { /* wayland_vulkan */
+ TPL_LOG_T(BACKEND, "[ACQ] tbm_surface(%p) bo(%d)",
+ tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ switch (surf_source->swapchain_properties.present_mode) {
+ case TPL_DISPLAY_PRESENT_MODE_IMMEDIATE:
+ _twe_thread_wl_vk_surface_commit(surf_source, tbm_surface);
+ break;
+
+ case TPL_DISPLAY_PRESENT_MODE_MAILBOX:
+ if (surf_source->draw_done_buffer) {
+ TPL_LOG_T(BACKEND, "[SKIP] tbm_surface(%p) bo(%d)",
+ tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+ tbm_surface_internal_unref(surf_source->draw_done_buffer);
+ tbm_surface_queue_release(surf_source->tbm_queue,
+ surf_source->draw_done_buffer);
+ }
+
+ surf_source->draw_done_buffer = tbm_surface;
+
+ if (surf_source->vblank_done) {
+ if ((_twe_ctx->tdm_source || surf_source->vblank) &&
+ _twe_surface_wait_vblank(surf_source) != TPL_ERROR_NONE)
+ TPL_ERR("Failed to set wait vblank");
+ }
+ break;
+
+ case TPL_DISPLAY_PRESENT_MODE_FIFO:
+ case TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED:
+ if (surf_source->vblank_done) {
+ _twe_thread_wl_vk_surface_commit(surf_source, tbm_surface);
+ } else {
+ if (surf_source->vblank_waiting_buffers) {
+ __tpl_list_push_back(surf_source->vblank_waiting_buffers,
+ (void *)tbm_surface);
+ } else {
+ TPL_ERR("Invalid list. vblank_waiting_buffers is NULL.");
+ }
+ }
+ break;
+ }
+ }
+ }
+}
+
+static gboolean
+_twe_thread_wl_surface_dispatch(GSource *source, GSourceFunc cb, gpointer data)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)source;
+ tpl_result_t res = TPL_ERROR_NONE;
+ GIOCondition cond;
+
+ g_mutex_lock(&surf_source->surf_mutex);
+
+ cond = g_source_query_unix_fd(source, surf_source->tag);
+
+ if (cond & G_IO_IN) {
+ ssize_t s;
+ uint64_t u;
+
+ s = read(surf_source->event_fd, &u, sizeof(uint64_t));
+ if (s != sizeof(uint64_t)) {
+ TPL_ERR("Failed to read from event_fd(%d)",
+ surf_source->event_fd);
+ res = TPL_ERROR_INVALID_CONNECTION;
+ }
+
+ if (surf_source->use_sync_fence &&
+ surf_source->render_done_fences) {
+
+ while (__tpl_list_get_count(surf_source->render_done_fences)) {
+ struct sync_info *sync = __tpl_list_pop_front(surf_source->render_done_fences,
+ NULL);
+ if (sync) {
+ res = _twe_thread_fence_wait_source_attach(surf_source,
+ sync->tbm_surface,
+ sync->sync_fd);
+ if (res != TPL_ERROR_NONE) {
+ TPL_ERR("Failed to attach source with fence_fd(%d) result(%d)",
+ sync->sync_fd, res);
+ surf_source->use_sync_fence = TPL_FALSE;
+ }
+
+ sync->sync_fd = -1;
+ sync->tbm_surface = NULL;
+ free(sync);
+ }
+ }
+ } else {
+ _twe_thread_wl_surface_acquire_and_commit(surf_source);
+ }
+ }
+
+ if (cond && !(cond & G_IO_IN)) {
+ TPL_ERR("eventfd(%d) cannot wake up with other condition. cond(%d)",
+ surf_source->event_fd, cond);
+ res = TPL_ERROR_INVALID_CONNECTION;
+ }
+
+ if (res != TPL_ERROR_NONE) {
+ g_source_remove_unix_fd(source, surf_source->tag);
+ close(surf_source->event_fd);
+
+ TPL_WARN("event_fd of surf_source(%p) has been closed. it will be recreated.",
+ surf_source);
+ surf_source->event_fd = eventfd(0, EFD_CLOEXEC);
+ if (surf_source->event_fd < 0) {
+ TPL_ERR("Failed to create eventfd. errno(%d)", errno);
+ } else {
+ surf_source->tag = g_source_add_unix_fd(&surf_source->gsource,
+ surf_source->event_fd,
+ G_IO_IN);
+ }
+ TPL_DEBUG("[RECREATED] eventfd(%d) tag(%p)", surf_source->event_fd, surf_source->tag);
+ }
+
+ g_mutex_unlock(&surf_source->surf_mutex);
+
+ return G_SOURCE_CONTINUE;
+}
+
+static void
+_twe_thread_wl_surface_finalize(GSource *source)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)source;
+
+ TPL_LOG_T(BACKEND, "gsource(%p) event_fd(%d)",
+ source, surf_source->event_fd);
+
+ close(surf_source->event_fd);
+ surf_source->event_fd = -1;
+
+ return;
+}
+
+static GSourceFuncs _twe_wl_surface_funcs = {
+ .prepare = NULL,
+ .check = NULL,
+ .dispatch = _twe_thread_wl_surface_dispatch,
+ .finalize = _twe_thread_wl_surface_finalize,
+};
+
+static void
+_twe_surface_buffer_flusher_init(twe_wl_surf_source *surf_source)
+{
+ twe_wl_disp_source *disp_source = surf_source->disp_source;
+
+ if (!disp_source->tss)
+ return;
+
+ surf_source->tss_flusher =
+ tizen_surface_shm_get_flusher(disp_source->tss, surf_source->surf);
+
+ tizen_surface_shm_flusher_add_listener(surf_source->tss_flusher,
+ &tss_flusher_listener,
+ surf_source);
+ TPL_LOG_T(BACKEND,
+ "tss_flusher init. surf_source(%p) tss_flusher(%p)",
+ surf_source, surf_source->tss_flusher);
+}
+
+static void
+_twe_surface_buffer_flusher_fini(twe_wl_surf_source *surf_source)
+{
+ if (surf_source->tss_flusher) {
+ TPL_LOG_T(BACKEND,
+ "tss_flusher fini. surf_source(%p) tss_flusher(%p)",
+ surf_source, surf_source->tss_flusher);
+ tizen_surface_shm_flusher_destroy(surf_source->tss_flusher);
+ surf_source->tss_flusher = NULL;
+ }
+}
+
+static tdm_client_vblank*
+_twe_surface_create_vblank(tdm_client *tdm_client)
+{
+ tdm_client_vblank *vblank = NULL;
+ tdm_client_output *tdm_output = NULL;
+ tdm_error tdm_err = TDM_ERROR_NONE;
+
+ if (!tdm_client) {
+ TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
+ return NULL;
+ }
+
+ tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
+ if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
+ TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
+ return NULL;
+ }
+
+ vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
+ if (!vblank || tdm_err != TDM_ERROR_NONE) {
+ TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
+ return NULL;
+ }
+
+ tdm_client_vblank_set_enable_fake(vblank, 1);
+ tdm_client_vblank_set_sync(vblank, 0);
+
+ TPL_LOG_T(BACKEND, "[VBLANK INIT] vblank(%p)", vblank);
+
+ return vblank;
+}
+
+static tbm_surface_queue_h
+_twe_surface_create_tbm_queue(twe_wl_surf_source *source,
+ struct wayland_tbm_client *wl_tbm_client,
+ tpl_handle_t native_handle,
+ int format, int num_buffers)
+{
+ tbm_surface_queue_h tbm_queue = NULL;
+ struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)native_handle;
+ tbm_bufmgr bufmgr = NULL;
+ unsigned int capability;
+
+ if (!wl_tbm_client || !wl_egl_window) {
+ TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_egl_window(%p)",
+ wl_tbm_client, wl_egl_window);
+ return NULL;
+ }
+
+ bufmgr = tbm_bufmgr_init(-1);
+ capability = tbm_bufmgr_get_capability(bufmgr);
+ tbm_bufmgr_deinit(bufmgr);
+
+ if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
+ tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
+ wl_tbm_client,
+ wl_egl_window->surface,
+ num_buffers,
+ wl_egl_window->width,
+ wl_egl_window->height,
+ format);
+ } else {
+ tbm_queue = wayland_tbm_client_create_surface_queue(
+ wl_tbm_client,
+ wl_egl_window->surface,
+ num_buffers,
+ wl_egl_window->width,
+ wl_egl_window->height,
+ format);
+ }
+
+ if (!tbm_queue) {
+ TPL_ERR("Failed to create tbm_surface_queue.");
+ return NULL;
+ }
+
+ if (tbm_surface_queue_set_modes(
+ tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
+ TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
+ }
+
+ if (tbm_surface_queue_add_reset_cb(tbm_queue,
+ __cb_tbm_queue_reset_callback,
+ (void *)source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
+ }
+
+ if (tbm_surface_queue_add_trace_cb(tbm_queue,
+ __cb_tbm_queue_trace_callback,
+ (void *)source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register trace callback to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
+ }
+
+ if (tbm_surface_queue_add_acquirable_cb(tbm_queue,
+ __cb_tbm_queue_acquirable_callback,
+ (void *)source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
+ }
+
+ return tbm_queue;
+}
+
+
+tbm_surface_queue_h
+twe_surface_get_tbm_queue(twe_surface_h twe_surface)
+{
+ twe_wl_surf_source *source = (twe_wl_surf_source *)twe_surface;
+ if (!source) {
+ TPL_ERR("Invalid parameters. twe_surface(%p)", source);
+ return NULL;
+ }
+
+ if (!source->tbm_queue) {
+ TPL_ERR("Invalid parameters. twe_surface(%p) tbm_queue(%p)",
+ source, source->tbm_queue);
+ return NULL;
+ }
+
+ return source->tbm_queue;
+}
+
+static void
+__cb_buffer_remove_from_list(void *data)
+{
+ tbm_surface_h tbm_surface = (tbm_surface_h)data;
+
+ if (tbm_surface && tbm_surface_internal_is_valid(tbm_surface))
+ tbm_surface_internal_unref(tbm_surface);
+}
+
+static void
+_twe_thread_wl_surf_source_destroy(void *source)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)source;
+ twe_wl_disp_source *disp_source = NULL;
+ gboolean is_destroyed = FALSE;
+
+ if (!surf_source || g_source_is_destroyed(&surf_source->gsource)) {
+ TPL_ERR("twe_surface(%p) is already destroyed.", surf_source);
+ return;
+ }
+
+ disp_source = surf_source->disp_source;
+ if (!disp_source ||
+ (is_destroyed = g_source_is_destroyed(&disp_source->gsource))) {
+ TPL_ERR("twe_display(%p) is invalid. | is_destroyed(%s)",
+ disp_source, (is_destroyed ? "TRUE" : "FALSE"));
+ return;
+ }
+
+ g_mutex_lock(&disp_source->wl_event_mutex);
+
+ g_mutex_lock(&surf_source->surf_mutex);
+
+ g_mutex_lock(&surf_source->presentation_sync.mutex);
+
+ TPL_INFO("[TWE_SURFACE_DESTROY]",
+ "surf_source(%p) wl_egl_window(%p) wl_surface(%p)",
+ surf_source, surf_source->wl_egl_window, surf_source->surf);
+
+ if (disp_source->presentation && surf_source->presentation_feedbacks) {
+ while (!__tpl_list_is_empty(surf_source->presentation_feedbacks)) {
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(surf_source->presentation_feedbacks, NULL);
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ twe_wl_buffer_info *buf_info = NULL;
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (buf_info && buf_info->presentation_sync_fd != -1 &&
+ buf_info->presentation_feedback) {
+
+ _write_to_eventfd(buf_info->presentation_sync_fd);
+ close(buf_info->presentation_sync_fd);
+ buf_info->presentation_sync_fd = -1;
+
+ wp_presentation_feedback_destroy(buf_info->presentation_feedback);
+ buf_info->presentation_feedback = NULL;
+ }
+ }
+ }
+ }
+
+ if (surf_source->presentation_sync.fd != -1) {
+ _write_to_eventfd(surf_source->presentation_sync.fd);
+ close(surf_source->presentation_sync.fd);
+ surf_source->presentation_sync.fd = -1;
+ }
+ g_mutex_unlock(&surf_source->presentation_sync.mutex);
+ g_mutex_clear(&surf_source->presentation_sync.mutex);
+
+ if (surf_source->in_use_buffers) {
+ __tpl_list_free(surf_source->in_use_buffers,
+ (tpl_free_func_t)__cb_buffer_remove_from_list);
+ surf_source->in_use_buffers = NULL;
+ }
+
+ if (surf_source->surface_sync) {
+ TPL_INFO("[SURFACE_SYNC FINI]", "twe_wl_surf_source(%p) surface_sync(%p)",
+ surf_source, surf_source->surface_sync);
+ zwp_linux_surface_synchronization_v1_destroy(surf_source->surface_sync);
+ surf_source->surface_sync = NULL;
+ }
+
+ if (surf_source->committed_buffers) {
+ while (!__tpl_list_is_empty(surf_source->committed_buffers)) {
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(surf_source->committed_buffers,
+ (tpl_free_func_t)__cb_buffer_remove_from_list);
+
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ tbm_surface, tsq_err);
+ }
+ __tpl_list_free(surf_source->committed_buffers, NULL);
+ surf_source->committed_buffers = NULL;
+ }
+
+ if (surf_source->vblank_waiting_buffers) {
+ while (!__tpl_list_is_empty(surf_source->vblank_waiting_buffers)) {
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(surf_source->vblank_waiting_buffers,
+ (tpl_free_func_t)__cb_buffer_remove_from_list);
+
+ tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ tbm_surface, tsq_err);
+ }
+ __tpl_list_free(surf_source->vblank_waiting_buffers, NULL);
+ surf_source->vblank_waiting_buffers = NULL;
+ }
+
+ if (surf_source->use_sync_fence && surf_source->fence_waiting_sources) {
+ while (!__tpl_list_is_empty(surf_source->fence_waiting_sources)) {
+ twe_fence_wait_source *wait_source =
+ __tpl_list_pop_front(surf_source->fence_waiting_sources,
+ NULL);
+ if (wait_source && !g_source_is_destroyed(&wait_source->gsource)) {
+ tbm_surface_internal_unref(wait_source->tbm_surface);
+ wait_source->tbm_surface = NULL;
+
+ close(wait_source->fence_fd);
+ wait_source->fence_fd = -1;
+
+ g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag);
+ g_source_destroy(&wait_source->gsource);
+ g_source_unref(&wait_source->gsource);
+ }
+ }
+ }
+
+ _twe_surface_buffer_flusher_fini(surf_source);
+
+ if (surf_source->tbm_queue) {
+ tbm_surface_queue_destroy(surf_source->tbm_queue);
+ surf_source->tbm_queue = NULL;
+ }
+
+ if (surf_source->vblank) {
+ TPL_INFO("[VBLANK FINI]",
+ "twe_wl_surf_source(%p) vblank(%p)",
+ surf_source, surf_source->vblank);
+ tdm_client_vblank_destroy(surf_source->vblank);
+ surf_source->vblank = NULL;
+ }
+
+ surf_source->cb_data = NULL;
+ surf_source->rotate_cb = NULL;
+
+ if (surf_source->wl_egl_window) {
+ struct tizen_private *tizen_private = NULL;
+ TPL_INFO("[WINDOW_FINI]", "twe_surface(%p) wl_egl_window(%p) wl_surface(%p)",
+ surf_source, surf_source->wl_egl_window, surf_source->surf);
+ tizen_private = _get_tizen_private(surf_source->wl_egl_window);
+ if (tizen_private) {
+ tizen_private->set_window_serial_callback = NULL;
+ tizen_private->rotate_callback = NULL;
+ tizen_private->get_rotation_capability = NULL;
+ tizen_private->data = NULL;
+ free(tizen_private);
+ }
+
+ surf_source->wl_egl_window->destroy_window_callback = NULL;
+ surf_source->wl_egl_window->resize_callback = NULL;
+ surf_source->wl_egl_window->driver_private = NULL;
+ surf_source->wl_egl_window = NULL;
+ surf_source->surf = NULL;
+ }
+
+ g_mutex_lock(&surf_source->commit_sync.mutex);
+ g_mutex_unlock(&surf_source->commit_sync.mutex);
+ g_mutex_clear(&surf_source->commit_sync.mutex);
+
+ g_mutex_unlock(&surf_source->surf_mutex);
+ g_mutex_clear(&surf_source->surf_mutex);
+
+ g_mutex_unlock(&disp_source->wl_event_mutex);
+
+ g_cond_clear(&surf_source->free_queue_cond);
+ g_mutex_clear(&surf_source->free_queue_mutex);
+
+ g_source_remove_unix_fd(&surf_source->gsource, surf_source->tag);
+ g_source_destroy(&surf_source->gsource);
+ g_source_unref(&surf_source->gsource);
+}
+
+twe_surface_h
+twe_surface_add(twe_thread* thread,
+ twe_display_h twe_display,
+ tpl_handle_t native_handle,
+ int format, int num_buffers)
+{
+ twe_thread_context *ctx = thread->ctx;
+ twe_wl_surf_source *source = NULL;
+ twe_wl_disp_source *disp_source = (twe_wl_disp_source *)twe_display;
+ gboolean is_destroyed = FALSE;
+ tbm_surface_queue_h tbm_queue = NULL;
+
+ if (!twe_display ||
+ (is_destroyed = g_source_is_destroyed(&disp_source->gsource))) {
+ TPL_ERR("twe_display(%p) is invalid. | is_destroyed(%s)",
+ twe_display, (is_destroyed ? "TRUE" : "FALSE"));
+ return NULL;
+ }
+
+ source = (twe_wl_surf_source *)g_source_new(&_twe_wl_surface_funcs,
+ sizeof(twe_wl_surf_source));
+ if (!source) {
+ TPL_ERR("[THREAD] Failed to create twe_wl_surf_source");
+ return NULL;
+ }
+
+ source->event_fd = eventfd(0, EFD_CLOEXEC);
+ if (source->event_fd < 0) {
+ TPL_ERR("[THREAD] Failed to create eventfd. errno(%d)", errno);
+ g_source_unref(&source->gsource);
+ return NULL;
+ }
+
+ if (!disp_source->is_vulkan_dpy &&
+ !(tbm_queue = _twe_surface_create_tbm_queue(source,
+ disp_source->wl_tbm_client,
+ native_handle,
+ format, num_buffers))) {
+ TPL_ERR("Failed to create tbm_surface_queue.");
+ g_source_unref(&source->gsource);
+ return NULL;
+ }
+
+ source->tag = g_source_add_unix_fd(&source->gsource,
+ source->event_fd,
+ G_IO_IN);
+ source->tbm_queue = tbm_queue;
+ source->disp_source = (twe_wl_disp_source *)twe_display;
+ source->latest_transform = 0;
+ source->rotation = 0;
+ source->rotation_capability = TPL_FALSE;
+ source->vblank = NULL;
+ source->vblank_done = TPL_TRUE;
+ source->is_destroying = TPL_FALSE;
+ source->committed_buffers = __tpl_list_alloc();
+ source->in_use_buffers = __tpl_list_alloc();
+ source->fence_waiting_sources = __tpl_list_alloc();
+ source->render_done_fences = __tpl_list_alloc();
+ source->render_done_cnt = 0;
+
+ source->cb_data = NULL;
+ source->rotate_cb = NULL;
+ source->format = format;
+ source->use_sync_fence = TPL_FALSE;
+ source->use_surface_sync = TPL_FALSE;
+
+ /* for vulkan swapchain */
+ source->vblank_waiting_buffers = NULL;
+ source->draw_done_buffer = NULL;
+
+ source->set_serial_is_used = TPL_FALSE;
+ source->serial = 0;
+
+ source->post_interval = 1;
+
+ source->commit_sync.fd = -1;
+ g_mutex_init(&source->commit_sync.mutex);
+
+ source->presentation_sync.fd = -1;
+ g_mutex_init(&source->presentation_sync.mutex);
+ if (disp_source->presentation)
+ source->presentation_feedbacks = __tpl_list_alloc();
+ else
+ source->presentation_feedbacks = NULL;
+
+ if (!disp_source->is_vulkan_dpy) {
+ struct wl_egl_window *wl_egl_window =
+ (struct wl_egl_window *)native_handle;
+ struct tizen_private *private = NULL;
+
+ if (wl_egl_window->driver_private)
+ private = (struct tizen_private *)wl_egl_window->driver_private;
+ else {
+ private = tizen_private_create();
+ wl_egl_window->driver_private = (void *)private;
+ }
+
+ if (private) {
+ private->data = (void *)source;
+ private->rotate_callback = (void *)__cb_rotate_callback;
+ private->get_rotation_capability = (void *)
+ __cb_get_rotation_capability;
+ private->set_window_serial_callback = (void *)
+ __cb_set_window_serial_callback;
+ private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
+ private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
+
+ wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
+ wl_egl_window->resize_callback = (void *)__cb_resize_callback;
+ }
+
+ source->wl_egl_window = wl_egl_window;
+ source->surf = wl_egl_window->surface;
+ source->vblank_waiting_buffers = __tpl_list_alloc();
+
+ } else {
+ struct wl_surface *wl_surf = (struct wl_surface *)native_handle;
+
+ source->wl_egl_window = NULL;
+ source->surf = wl_surf;
+ }
+
+ _twe_surface_buffer_flusher_init(source);
+
+ if (disp_source->explicit_sync && disp_source->use_explicit_sync) {
+ source->surface_sync =
+ zwp_linux_explicit_synchronization_v1_get_synchronization(
+ disp_source->explicit_sync, source->surf);
+ if (!source->surface_sync) {
+ TPL_WARN("Failed to create surf_sync. | surf_source(%p)", source);
+ } else {
+ source->use_surface_sync = TPL_TRUE;
+ }
+ }
+
+ source->surf_del_source = _twe_del_source_init(ctx, source);
+ if (source->surf_del_source) {
+ source->surf_del_source->destroy_target_source_func
+ = _twe_thread_wl_surf_source_destroy;
+ }
+
+ g_source_attach(&source->gsource, g_main_loop_get_context(ctx->twe_loop));
+
+ g_mutex_init(&source->surf_mutex);
+
+ g_mutex_init(&source->free_queue_mutex);
+ g_cond_init(&source->free_queue_cond);
+
+ TPL_INFO("[SURFACE_ADD]", "gsource(%p) wl_surface(%p) event_fd(%d)",
+ source, source->surf, source->event_fd);
+
+ return (twe_surface_h)source;
+}
+
+tpl_result_t
+twe_surface_del(twe_surface_h twe_surface)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+ twe_wl_disp_source *disp_source = NULL;
+ twe_del_source *surf_del_source = NULL;
+ gboolean is_destroyed = FALSE;
+
+ if (!surf_source ||
+ (is_destroyed = g_source_is_destroyed(&surf_source->gsource))) {
+ TPL_ERR("twe_surface(%p) is invalid. | is_destroyed(%s)",
+ twe_surface, (is_destroyed ? "TRUE" : "FALSE"));
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ disp_source = surf_source->disp_source;
+ if (!disp_source ||
+ (is_destroyed = g_source_is_destroyed(&disp_source->gsource))) {
+ TPL_ERR("twe_display(%p) is invalid. | is_destroyed(%s)",
+ disp_source, (is_destroyed ? "TRUE" : "FALSE"));
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (surf_source->use_sync_fence && surf_source->fence_waiting_sources) {
+ TPL_DEBUG("twe_surface(%p) is waiting for all fences to be signaled.",
+ surf_source);
+ while (!__tpl_list_is_empty(surf_source->fence_waiting_sources)) {
+ __tpl_util_sys_yield();
+ }
+ }
+
+ TPL_INFO("[SURFACE_DEL]", "twe_surface(%p) will be destroyed in thread",
+ twe_surface);
+ surf_del_source = surf_source->surf_del_source;
+
+ g_mutex_lock(&_twe_ctx->thread_mutex);
+
+ _twe_thread_del_source_trigger(surf_del_source);
+ g_cond_wait(&_twe_ctx->thread_cond,
+ &_twe_ctx->thread_mutex);
+ g_mutex_unlock(&_twe_ctx->thread_mutex);
+
+ _twe_del_source_fini(surf_del_source);
+
+ return TPL_ERROR_NONE;
+}
+
+tpl_result_t
+twe_surface_create_swapchain(twe_surface_h twe_surface,
+ int width, int height, int format,
+ int buffer_count, int present_mode)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+ twe_wl_disp_source *disp_source = NULL;
+ tbm_bufmgr bufmgr = NULL;
+ unsigned int capability;
+
+ if (surf_source->tbm_queue) {
+ TPL_LOG_B(BACKEND, "[REUSE SWAPCHAIN] surf_source(%p) tbm_queue(%p)",
+ surf_source, surf_source->tbm_queue);
+ return TPL_ERROR_NONE;
+ }
+
+ disp_source = surf_source->disp_source;
+
+ TPL_ASSERT(disp_source);
+
+ if ((buffer_count < disp_source->surface_capabilities.min_buffer)
+ || (buffer_count > disp_source->surface_capabilities.max_buffer)) {
+ TPL_ERR("Invalid buffer_count(%d)! min_buffer(%d) max_buffer(%d)",
+ buffer_count,
+ disp_source->surface_capabilities.min_buffer,
+ disp_source->surface_capabilities.max_buffer);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if ((present_mode & disp_source->surface_capabilities.present_modes) == 0) {
+ /* server not supported current mode check client mode */
+ switch (present_mode) {
+ case TPL_DISPLAY_PRESENT_MODE_FIFO:
+ case TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED:
+ case TPL_DISPLAY_PRESENT_MODE_MAILBOX:
+ case TPL_DISPLAY_PRESENT_MODE_IMMEDIATE:
+ break;
+ default:
+ TPL_ERR("Unsupported present mode: %d", present_mode);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+ }
+
+ bufmgr = tbm_bufmgr_init(-1);
+ capability = tbm_bufmgr_get_capability(bufmgr);
+ tbm_bufmgr_deinit(bufmgr);
+
+ if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
+ surf_source->tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
+ disp_source->wl_tbm_client,
+ surf_source->surf,
+ buffer_count,
+ width, height,
+ TBM_FORMAT_ARGB8888);
+ } else {
+ surf_source->tbm_queue = wayland_tbm_client_create_surface_queue(
+ disp_source->wl_tbm_client,
+ surf_source->surf,
+ buffer_count,
+ width, height,
+ TBM_FORMAT_ARGB8888);
+ }
+
+ if (!surf_source->tbm_queue) {
+ TPL_ERR("TBM surface queue creation failed!");
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+
+ if (tbm_surface_queue_add_reset_cb(surf_source->tbm_queue,
+ __cb_tbm_queue_reset_callback,
+ surf_source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
+ surf_source->tbm_queue);
+ tbm_surface_queue_destroy(surf_source->tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (tbm_surface_queue_set_modes(surf_source->tbm_queue,
+ TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
+ TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
+ surf_source->tbm_queue);
+ tbm_surface_queue_destroy(surf_source->tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (tbm_surface_queue_add_trace_cb(surf_source->tbm_queue,
+ __cb_tbm_queue_trace_callback,
+ (void *)surf_source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register trace callback to tbm_surface_queue(%p)",
+ surf_source->tbm_queue);
+ tbm_surface_queue_destroy(surf_source->tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (tbm_surface_queue_add_acquirable_cb(surf_source->tbm_queue,
+ __cb_tbm_queue_acquirable_callback,
+ (void *)surf_source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
+ surf_source->tbm_queue);
+ tbm_surface_queue_destroy(surf_source->tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (tbm_surface_queue_add_dequeuable_cb(surf_source->tbm_queue,
+ __cb_tbm_queue_dequeueable_callback,
+ (void *)surf_source) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register dequeueable callback to tbm_surface_queue(%p)",
+ surf_source->tbm_queue);
+ tbm_surface_queue_destroy(surf_source->tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO
+ || present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED) {
+ surf_source->vblank_waiting_buffers = __tpl_list_alloc();
+ }
+
+ surf_source->format = format;
+ surf_source->swapchain_properties.width = width;
+ surf_source->swapchain_properties.height = height;
+ surf_source->swapchain_properties.present_mode = present_mode;
+ surf_source->swapchain_properties.buffer_count = buffer_count;
+
+ TPL_LOG_T(BACKEND, "[SWAPCHAIN_CREATE][1/2] twe_surface(%p) tbm_queue(%p)",
+ twe_surface, surf_source->tbm_queue);
+ TPL_LOG_T(BACKEND,
+ "[SWAPCHAIN_CREATE][2/2] w(%d) h(%d) f(%d) p(%d) b_cnt(%d)",
+ width, height, format, present_mode, buffer_count);
+
+ return TPL_ERROR_NONE;
+}
+
+tpl_result_t
+twe_surface_destroy_swapchain(twe_surface_h twe_surface)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+
+ TPL_LOG_T(BACKEND, "[SWAPCHAIN_DESTROY] twe_surface(%p) tbm_queue(%p)",
+ twe_surface, surf_source->tbm_queue);
+
+ /* Waiting for vblank to commit all draw done buffers.*/
+ while (surf_source->vblank_waiting_buffers &&
+ !__tpl_list_is_empty(surf_source->vblank_waiting_buffers)) {
+ __tpl_util_sys_yield();
+ }
+
+ if (surf_source->committed_buffers) {
+ g_mutex_lock(&surf_source->surf_mutex);
+ while (!__tpl_list_is_empty(surf_source->committed_buffers)) {
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(surf_source->committed_buffers,
+ (tpl_free_func_t)__cb_buffer_remove_from_list);
+
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ tbm_surface, tsq_err);
+ }
+ g_mutex_unlock(&surf_source->surf_mutex);
+ }
+
+ if (surf_source->tbm_queue) {
+ tbm_surface_queue_destroy(surf_source->tbm_queue);
+ surf_source->tbm_queue = NULL;
+ }
+
+ return TPL_ERROR_NONE;
+}
+
+tpl_result_t
+twe_surface_get_swapchain_buffers(twe_surface_h twe_surface,
+ tbm_surface_h *surfaces,
+ int *buffer_count)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+ twe_wl_disp_source *disp_source = NULL;
+ int ret = 1;
+
+ if (!buffer_count) {
+ TPL_ERR("Invalid parameter. buffer_count is NULL.");
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!surfaces) {
+ *buffer_count = tbm_surface_queue_get_size(surf_source->tbm_queue);
+ return TPL_ERROR_NONE;
+ }
+
+ disp_source = surf_source->disp_source;
+
+ ret = wayland_tbm_client_queue_get_surfaces(
+ disp_source->wl_tbm_client,
+ surf_source->tbm_queue,
+ surfaces, buffer_count);
+ if (!ret) {
+ TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
+ disp_source->wl_tbm_client, surf_source->tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ return TPL_ERROR_NONE;
+}
+
+tpl_result_t
+twe_surface_set_rotate_callback(twe_surface_h twe_surface,
+ void *data, tpl_surface_cb_func_t rotate_cb)
+{
+ twe_wl_surf_source *source = (twe_wl_surf_source *)twe_surface;
+ if (!source) {
+ TPL_ERR("Invalid parameter. twe_surface is NULL.");
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!data || !rotate_cb) {
+ TPL_ERR("Invalid parameter. data(%p) rotate_cb(%p)",
+ data, rotate_cb);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ source->cb_data = data;
+ source->rotate_cb = rotate_cb;
+
+ return TPL_ERROR_NONE;
+}
+
+int
+twe_surface_get_rotation(twe_surface_h twe_surface)
+{
+ twe_wl_surf_source *source = (twe_wl_surf_source *)twe_surface;
+ if (!source) {
+ TPL_ERR("Invalid parameter. twe_surface(%p)", twe_surface);
+ return -1;
+ }
+
+ return source->rotation;
+}
+
+void
+twe_surface_set_rotation_capablity(twe_surface_h twe_surface, tpl_bool_t set)
+{
+ twe_wl_surf_source *source = (twe_wl_surf_source *)twe_surface;
+ if (!source) {
+ TPL_ERR("Invalid parameter. twe_surface(%p)", twe_surface);
+ return;
+ }
+
+ TPL_LOG_T(BACKEND, "twe_surface(%p) rotation capability set to [%s]",
+ source, (set ? "TRUE" : "FALSE"));
+
+ source->rotation_capability = set;
+}
+
+tpl_result_t
+twe_surface_set_damage_region(tbm_surface_h tbm_surface,
+ int num_rects,
+ const int *rects)
+{
+ twe_wl_buffer_info *buf_info = NULL;
+
+ if (!tbm_surface || !tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (num_rects == 0 || rects == NULL) {
+ return TPL_ERROR_NONE;
+ }
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (!buf_info) {
+ TPL_ERR("Failed to get twe_wl_buffer_info from tbm_surface(%p)",
+ tbm_surface);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ /* Destroy old region if there are old region info. */
+ if (buf_info->rects != NULL) {
+ free(buf_info->rects);
+ buf_info->rects = NULL;
+ buf_info->num_rects = 0;
+ }
+
+ buf_info->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
+ buf_info->num_rects = num_rects;
+
+ if (!buf_info->rects) {
+ TPL_ERR("Failed to allocate memory fo damage rects info.");
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ memcpy((char *)buf_info->rects, (char *)rects, sizeof(int) * 4 * num_rects);
+
+ return TPL_ERROR_NONE;
+}
+
+tpl_bool_t
+twe_surface_check_activated(twe_surface_h twe_surface)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+ twe_wl_disp_source *disp_source = surf_source->disp_source;
+
+ return wayland_tbm_client_queue_check_activate(disp_source->wl_tbm_client,
+ surf_source->tbm_queue);
+}
+
+tpl_bool_t
+twe_surface_check_commit_needed(twe_surface_h twe_surface,
+ tbm_surface_h tbm_surface)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+ twe_wl_buffer_info *buf_info = NULL;
+
+ if (!surf_source) {
+ TPL_ERR("Invalid parameter. twe_surface(%p)", twe_surface);
+ return TPL_FALSE;
+ }
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (!buf_info) {
+ TPL_ERR("Failed to get buf_info from tbm_surface(%p).", tbm_surface);
+ return TPL_FALSE;
+ }
+
+ return buf_info->need_to_commit;
+}
+
+static gboolean
+_twe_thread_fence_wait_source_dispatch(GSource *source, GSourceFunc cb, gpointer data)
+{
+ twe_fence_wait_source *wait_source = (twe_fence_wait_source *)source;
+ twe_wl_surf_source *surf_source = wait_source->surf_source;
+ tbm_surface_h tbm_surface = wait_source->tbm_surface;
+ GIOCondition cond = g_source_query_unix_fd(source, wait_source->tag);
+
+ if (cond & G_IO_IN) {
+ TPL_LOG_T(BACKEND, "[RENDER DONE] wait_source(%p) tbm_surface(%p) fence_fd(%d)",
+ wait_source, tbm_surface, wait_source->fence_fd);
+ } else {
+ /* When some io errors occur, it is not considered as a critical error.
+ * There may be problems with the screen, but it does not affect the operation. */
+ TPL_WARN("Invalid GIOCondition occured. fd(%d) cond(%d)",
+ wait_source->fence_fd, cond);
+ }
+
+ surf_source->render_done_cnt++;
+
+ TRACE_ASYNC_END((int)wait_source, "FENCE WAIT fd(%d)", wait_source->fence_fd);
+
+ g_mutex_lock(&surf_source->surf_mutex);
+ /* Since this source is going to be removed, acquire_and_commit must be
+ * executed even in a situation other than G_IO_IN.
+ * Nevertheless, there may be room for improvement. */
+ _twe_thread_wl_surface_acquire_and_commit(surf_source);
+ tbm_surface_internal_unref(tbm_surface);
+
+ __tpl_list_remove_data(surf_source->fence_waiting_sources,
+ (void *)wait_source, TPL_FIRST, NULL);
+ g_mutex_unlock(&surf_source->surf_mutex);
+
+ /* This source is used only once and does not allow reuse.
+ * So finalize will be executed immediately. */
+ g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag);
+ g_source_destroy(&wait_source->gsource);
+ g_source_unref(&wait_source->gsource);
+
+ return G_SOURCE_REMOVE;
+}
+
+static void
+_twe_thread_fence_wait_source_finalize(GSource *source)
+{
+ twe_fence_wait_source *wait_source = (twe_fence_wait_source *)source;
+
+ TPL_DEBUG("[FINALIZE] wait_source(%p) fence_fd(%d)",
+ wait_source, wait_source->fence_fd);
+
+ close(wait_source->fence_fd);
+
+ wait_source->fence_fd = -1;
+ wait_source->surf_source = NULL;
+ wait_source->tbm_surface = NULL;
+ wait_source->tag = NULL;
+}
+
+static GSourceFuncs _twe_fence_wait_source_funcs = {
+ .prepare = NULL,
+ .check = NULL,
+ .dispatch = _twe_thread_fence_wait_source_dispatch,
+ .finalize = _twe_thread_fence_wait_source_finalize,
+};
+
+tpl_result_t
+_twe_thread_fence_wait_source_attach(twe_wl_surf_source *surf_source,
+ tbm_surface_h tbm_surface, tbm_fd sync_fd)
+{
+ twe_fence_wait_source *wait_source = NULL;
+
+ wait_source = (twe_fence_wait_source *)g_source_new(&_twe_fence_wait_source_funcs,
+ sizeof(twe_fence_wait_source));
+ if (!wait_source) {
+ TPL_ERR("[WAIT_SOURCE] Failed to create GSource");
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+
+ TRACE_ASYNC_BEGIN((int)wait_source, "FENCE WAIT fd(%d)", sync_fd);
+
+ tbm_surface_internal_ref(tbm_surface);
+
+ wait_source->fence_fd = sync_fd;
+ wait_source->surf_source = surf_source;
+ wait_source->tbm_surface = tbm_surface;
+
+ wait_source->tag = g_source_add_unix_fd(&wait_source->gsource,
+ wait_source->fence_fd,
+ G_IO_IN);
+
+ /* When waiting is over, it will be removed from the list. */
+ __tpl_list_push_back(surf_source->fence_waiting_sources, (void *)wait_source);
+
+ g_source_attach(&wait_source->gsource, g_main_loop_get_context(_twe_ctx->twe_loop));
+
+ TPL_LOG_T(BACKEND, "fence_wait_source(%p) attached | tbm_surface(%p) fence_fd(%d)",
+ wait_source, tbm_surface, sync_fd);
+
+ return TPL_ERROR_NONE;
+}
+
+tpl_result_t
+twe_surface_set_sync_fd(twe_surface_h twe_surface,
+ tbm_surface_h tbm_surface, tbm_fd sync_fd)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+ tpl_result_t ret = TPL_ERROR_NONE;
+ twe_wl_buffer_info *buf_info = NULL;
+
+ if (!surf_source) {
+ TPL_ERR("Invalid parameter. twe_surface(%p)", twe_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!tbm_surface || !tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (!buf_info) {
+ TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (surf_source->use_surface_sync) {
+ if (buf_info->acquire_fence_fd != -1)
+ close(buf_info->acquire_fence_fd);
+ buf_info->acquire_fence_fd = sync_fd;
+ } else {
+ /* The sync_info being pushed will be popped when surface_dispatch
+ * is called and attached to the twe_thread. */
+ struct sync_info *sync = (struct sync_info *)calloc(1, sizeof(struct sync_info));
+ if (sync) {
+ sync->sync_fd = sync_fd;
+ sync->tbm_surface = tbm_surface;
+
+ if (surf_source->render_done_fences) {
+ g_mutex_lock(&surf_source->surf_mutex);
+ __tpl_list_push_back(surf_source->render_done_fences,
+ (void *)sync);
+ surf_source->use_sync_fence = TPL_TRUE;
+ TPL_DEBUG("[SET_SYNC_FD] surf_source(%p) tbm_surface(%p) sync_fd(%d)",
+ surf_source, tbm_surface, sync_fd);
+ g_mutex_unlock(&surf_source->surf_mutex);
+ } else {
+ surf_source->use_sync_fence = TPL_FALSE;
+ free(sync);
+ }
+ }
+ }
+
+ return ret;
+}
+
+tbm_fd
+twe_surface_create_sync_fd(tbm_surface_h tbm_surface)
+{
+ twe_wl_buffer_info *buf_info = NULL;
+ tbm_fd sync_fd = -1;
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+
+ if (buf_info) {
+ char name[32];
+ snprintf(name, 32, "%d",
+ _get_tbm_surface_bo_name(tbm_surface));
+ sync_fd = tbm_sync_fence_create(buf_info->sync_timeline,
+ name,
+ buf_info->sync_timestamp);
+ if (sync_fd == -1) {
+ char buf[1024];
+ strerror_r(errno, buf, sizeof(buf));
+ TPL_ERR("Failed to create TBM sync fence: %d(%s)", errno, buf);
+ }
+
+ buf_info->sync_fd = sync_fd;
+ }
+
+ return sync_fd;
+}
+
+tbm_fd
+twe_surface_get_buffer_release_fence_fd(twe_surface_h twe_surface,
+ tbm_surface_h tbm_surface)
+{
+ twe_wl_buffer_info *buf_info = NULL;
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+ tbm_fd release_fence_fd = -1;
+
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (surf_source->use_surface_sync &&
+ surf_source->disp_source->use_explicit_sync &&
+ buf_info) {
+ release_fence_fd = buf_info->release_fence_fd;
+ TPL_DEBUG("surf_source(%p) buf_info(%p) release_fence_fd(%d)",
+ surf_source, buf_info, release_fence_fd);
+ }
+
+ return release_fence_fd;
+}
+
+tpl_result_t
+twe_surface_wait_dequeueable(twe_surface_h twe_surface, uint64_t timeout_ns)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+ twe_wl_disp_source *disp_source = NULL;
+ gint64 end_time;
+
+ disp_source = surf_source->disp_source;
+
+ if (timeout_ns != UINT64_MAX)
+ end_time = g_get_monotonic_time() + (timeout_ns / 1000);
+
+ while (!tbm_surface_queue_can_dequeue(surf_source->tbm_queue, 0)) {
+ gboolean ret = FALSE;
+
+ g_mutex_unlock(&disp_source->wl_event_mutex);
+
+ /* wait until dequeueable */
+ g_mutex_lock(&surf_source->free_queue_mutex);
+
+ if (timeout_ns != UINT64_MAX) {
+ ret = g_cond_wait_until(&surf_source->free_queue_cond,
+ &surf_source->free_queue_mutex,
+ end_time);
+ if (ret == FALSE) {
+ TPL_WARN("time out to wait dequeueable.");
+ g_mutex_lock(&disp_source->wl_event_mutex);
+ g_mutex_unlock(&surf_source->free_queue_mutex);
+ return TPL_ERROR_TIME_OUT;
+ }
+ } else {
+ g_cond_wait(&surf_source->free_queue_cond,
+ &surf_source->free_queue_mutex);
+ }
+ g_mutex_unlock(&surf_source->free_queue_mutex);
+ g_mutex_lock(&disp_source->wl_event_mutex);
+ }
+
+ return TPL_ERROR_NONE;
+}
+
+tpl_result_t
+twe_surface_queue_force_flush(twe_surface_h twe_surface)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ _twe_print_buffer_list(twe_surface);
+
+ if ((tsq_err = tbm_surface_queue_flush(surf_source->tbm_queue))
+ != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("[TIMEOUT_RESET] Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
+ surf_source->tbm_queue, tsq_err);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ g_mutex_lock(&surf_source->surf_mutex);
+ if (surf_source->committed_buffers) {
+ while (!__tpl_list_is_empty(surf_source->committed_buffers)) {
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(surf_source->committed_buffers,
+ (tpl_free_func_t)__cb_buffer_remove_from_list);
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ tbm_surface, tsq_err);
+ }
+ }
+ g_mutex_unlock(&surf_source->surf_mutex);
+
+ TPL_LOG_T(BACKEND,
+ "[FORCE_FLUSH] surf_source(%p) tbm_queue(%p)",
+ surf_source, surf_source->tbm_queue);
+
+ return TPL_ERROR_NONE;
+}
+
+
+tpl_bool_t
+twe_check_native_handle_is_wl_display(tpl_handle_t display)
+{
+ struct wl_interface *wl_egl_native_dpy = *(void **) display;
+
+ if (!wl_egl_native_dpy) {
+ TPL_ERR("Invalid parameter. native_display(%p)", wl_egl_native_dpy);
+ return TPL_FALSE;
+ }
+
+ /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
+ is a memory address pointing the structure of wl_display_interface. */
+ if (wl_egl_native_dpy == &wl_display_interface)
+ return TPL_TRUE;
+
+ if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
+ strlen(wl_display_interface.name)) == 0) {
+ return TPL_TRUE;
+ }
+
+ return TPL_FALSE;
+}
+
+tpl_result_t
+twe_get_native_window_info(tpl_handle_t window, int *width, int *height,
+ tbm_format *format, int a_size)
+{
+ struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window;
+ if (!wl_egl_window) {
+ TPL_ERR("Invalid parameter. tpl_handle_t(%p)", window);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (width) *width = wl_egl_window->width;
+ if (height) *height = wl_egl_window->height;
+ if (format) {
+ struct tizen_private *tizen_private = _get_tizen_private(wl_egl_window);
+ if (tizen_private && tizen_private->data) {
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)tizen_private->data;
+ *format = surf_source->format;
+ } else {
+ if (a_size == 8)
+ *format = TBM_FORMAT_ARGB8888;
+ else
+ *format = TBM_FORMAT_XRGB8888;
+ }
+ }
+
+ return TPL_ERROR_NONE;
+}
+
+tbm_surface_h
+twe_get_native_buffer_from_pixmap(tpl_handle_t pixmap)
+{
+ tbm_surface_h tbm_surface = NULL;
+
+ if (!pixmap) {
+ TPL_ERR("Invalid parameter. tpl_handle_t(%p)", pixmap);
+ return NULL;
+ }
+
+ tbm_surface = wayland_tbm_server_get_surface(NULL,
+ (struct wl_resource *)pixmap);
+ if (!tbm_surface) {
+ TPL_ERR("Failed to get tbm_surface from wayland_tbm.");
+ return NULL;
+ }
+
+ return tbm_surface;
+}
+
+tpl_result_t
+twe_surface_set_post_interval(twe_surface_h twe_surface, int post_interval)
+{
+ twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+
+ surf_source->post_interval = post_interval;
+
+ TPL_LOG_T(BACKEND, "surf_source(%p) post_interval(%d)",
+ surf_source, surf_source->post_interval);
+
+ return TPL_ERROR_NONE;
+}
--- /dev/null
+#include <tbm_surface.h>
+#include <tbm_surface_queue.h>
+#include <wayland-client.h>
+
+#include "tpl.h"
+
+typedef struct _twe_thread twe_thread;
+typedef struct _twe_thread_context twe_thread_context;
+
+typedef void* twe_display_h;
+typedef void* twe_surface_h;
+
+twe_thread*
+twe_thread_create(void);
+
+void
+twe_thread_destroy(twe_thread* thread);
+
+twe_display_h
+twe_display_add(twe_thread* thread,
+ struct wl_display *display,
+ tpl_backend_type_t backend);
+
+tpl_result_t
+twe_display_del(twe_display_h display);
+
+tpl_result_t
+twe_display_lock(twe_display_h display);
+
+void
+twe_display_unlock(twe_display_h display);
+
+tpl_result_t
+twe_display_get_buffer_count(twe_display_h display,
+ int *min,
+ int *max);
+
+tpl_result_t
+twe_display_get_present_mode(twe_display_h display,
+ int *present_modes);
+
+twe_surface_h
+twe_surface_add(twe_thread* thread,
+ twe_display_h twe_display,
+ tpl_handle_t native_handle,
+ int format, int num_buffers);
+
+tpl_result_t
+twe_surface_del(twe_surface_h twe_surface);
+
+tpl_result_t
+twe_surface_create_swapchain(twe_surface_h twe_surface,
+ int width, int height, int format,
+ int buffer_count, int present_mode);
+tpl_result_t
+twe_surface_destroy_swapchain(twe_surface_h twe_surface);
+
+tpl_result_t
+twe_surface_get_swapchain_buffers(twe_surface_h twe_surface,
+ tbm_surface_h *surfaces,
+ int *buffer_count);
+
+tbm_surface_queue_h
+twe_surface_get_tbm_queue(twe_surface_h twe_surface);
+
+tpl_result_t
+twe_surface_set_rotate_callback(twe_surface_h twe_surface,
+ void *data, tpl_surface_cb_func_t rotate_cb);
+
+int
+twe_surface_get_rotation(twe_surface_h twe_surface);
+
+void
+twe_surface_set_rotation_capablity(twe_surface_h twe_surface, tpl_bool_t set);
+
+tpl_bool_t
+twe_surface_check_activated(twe_surface_h twe_surface);
+
+tpl_bool_t
+twe_surface_check_commit_needed(twe_surface_h twe_surface,
+ tbm_surface_h tbm_surface);
+
+tpl_result_t
+twe_surface_set_damage_region(tbm_surface_h tbm_surface,
+ int num_rects, const int *rects);
+
+tpl_result_t
+twe_surface_set_sync_fd(twe_surface_h twe_surface,
+ tbm_surface_h tbm_surface, tbm_fd sync_fd);
+
+tbm_fd
+twe_surface_create_sync_fd(tbm_surface_h tbm_surface);
+
+tbm_fd
+twe_surface_get_buffer_release_fence_fd(twe_surface_h twe_surface,
+ tbm_surface_h tbm_surface);
+
+tpl_result_t
+twe_surface_wait_dequeueable(twe_surface_h twe_surface, uint64_t timeout_ns);
+
+tpl_result_t
+twe_surface_queue_force_flush(twe_surface_h twe_surface);
+
+tpl_bool_t
+twe_check_native_handle_is_wl_display(tpl_handle_t display);
+
+tpl_result_t
+twe_get_native_window_info(tpl_handle_t window, int *width, int *height, tbm_format *format, int a_size);
+
+tbm_surface_h
+twe_get_native_buffer_from_pixmap(tpl_handle_t pixmap);
+
+tpl_result_t
+twe_surface_set_post_interval(twe_surface_h twe_surface, int post_interval);
--- /dev/null
+#define inline __inline__
+
+#undef inline
+
+#include "tpl_internal.h"
+
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <tbm_bufmgr.h>
+#include <tbm_surface.h>
+#include <tbm_surface_internal.h>
+#include <tbm_surface_queue.h>
+
+#include "tpl_wayland_egl_thread.h"
+
+/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
+#define CLIENT_QUEUE_SIZE 3
+
+typedef struct _tpl_wayland_egl_display tpl_wayland_egl_display_t;
+typedef struct _tpl_wayland_egl_surface tpl_wayland_egl_surface_t;
+
+struct _tpl_wayland_egl_display {
+ twe_thread *wl_egl_thread;
+ twe_display_h twe_display;
+};
+
+struct _tpl_wayland_egl_surface {
+ tpl_object_t base;
+ twe_surface_h twe_surface;
+ tbm_surface_queue_h tbm_queue;
+ tpl_bool_t is_activated;
+ tpl_bool_t reset; /* TRUE if queue reseted by external */
+ tpl_bool_t need_to_enqueue;
+};
+
+static tpl_result_t
+__tpl_wl_egl_display_init(tpl_display_t *display)
+{
+ tpl_wayland_egl_display_t *wayland_egl_display = NULL;
+
+ TPL_ASSERT(display);
+
+ /* Do not allow default display in wayland. */
+ if (!display->native_handle) {
+ TPL_ERR("Invalid native handle for display.");
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ wayland_egl_display = (tpl_wayland_egl_display_t *) calloc(1,
+ sizeof(tpl_wayland_egl_display_t));
+ if (!wayland_egl_display) {
+ TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_display_t.");
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+
+ display->backend.data = wayland_egl_display;
+ display->bufmgr_fd = -1;
+
+ if (twe_check_native_handle_is_wl_display(display->native_handle)) {
+ wayland_egl_display->wl_egl_thread = twe_thread_create();
+ if (!wayland_egl_display->wl_egl_thread) {
+ TPL_ERR("Failed to create twe_thread.");
+ goto free_display;
+ }
+
+ wayland_egl_display->twe_display =
+ twe_display_add(wayland_egl_display->wl_egl_thread,
+ display->native_handle,
+ display->backend.type);
+ if (!wayland_egl_display->twe_display) {
+ TPL_ERR("Failed to add native_display(%p) to thread(%p)",
+ display->native_handle,
+ wayland_egl_display->wl_egl_thread);
+ goto free_display;
+ }
+
+ } else {
+ TPL_ERR("Invalid native handle for display.");
+ goto free_display;
+ }
+
+ TPL_LOG_T("WL_EGL",
+ "[INIT DISPLAY] wayland_egl_display(%p) twe_thread(%p) twe_display(%p)",
+ wayland_egl_display,
+ wayland_egl_display->wl_egl_thread,
+ wayland_egl_display->twe_display);
+
+ return TPL_ERROR_NONE;
+
+free_display:
+ if (wayland_egl_display->twe_display)
+ twe_display_del(wayland_egl_display->twe_display);
+ if (wayland_egl_display->wl_egl_thread)
+ twe_thread_destroy(wayland_egl_display->wl_egl_thread);
+ wayland_egl_display->wl_egl_thread = NULL;
+ wayland_egl_display->twe_display = NULL;
+
+ free(wayland_egl_display);
+ display->backend.data = NULL;
+ return TPL_ERROR_INVALID_OPERATION;
+}
+
+static void
+__tpl_wl_egl_display_fini(tpl_display_t *display)
+{
+ tpl_wayland_egl_display_t *wayland_egl_display;
+
+ TPL_ASSERT(display);
+
+ wayland_egl_display = (tpl_wayland_egl_display_t *)display->backend.data;
+ if (wayland_egl_display) {
+
+ TPL_LOG_T("WL_EGL",
+ "[FINI] wayland_egl_display(%p) twe_thread(%p) twe_display(%p)",
+ wayland_egl_display,
+ wayland_egl_display->wl_egl_thread,
+ wayland_egl_display->twe_display);
+
+ if (wayland_egl_display->twe_display) {
+ tpl_result_t ret = TPL_ERROR_NONE;
+ ret = twe_display_del(wayland_egl_display->twe_display);
+ if (ret != TPL_ERROR_NONE)
+ TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)",
+ wayland_egl_display->twe_display,
+ wayland_egl_display->wl_egl_thread);
+ wayland_egl_display->twe_display = NULL;
+ }
+
+ if (wayland_egl_display->wl_egl_thread) {
+ twe_thread_destroy(wayland_egl_display->wl_egl_thread);
+ wayland_egl_display->wl_egl_thread = NULL;
+ }
+
+ free(wayland_egl_display);
+ }
+
+ display->backend.data = NULL;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_query_config(tpl_display_t *display,
+ tpl_surface_type_t surface_type,
+ int red_size, int green_size,
+ int blue_size, int alpha_size,
+ int color_depth, int *native_visual_id,
+ tpl_bool_t *is_slow)
+{
+ TPL_ASSERT(display);
+
+ if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
+ green_size == 8 && blue_size == 8 &&
+ (color_depth == 32 || color_depth == 24)) {
+
+ if (alpha_size == 8) {
+ if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
+ if (is_slow) *is_slow = TPL_FALSE;
+ return TPL_ERROR_NONE;
+ }
+ if (alpha_size == 0) {
+ if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
+ if (is_slow) *is_slow = TPL_FALSE;
+ return TPL_ERROR_NONE;
+ }
+ }
+
+ return TPL_ERROR_INVALID_PARAMETER;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
+ int alpha_size)
+{
+ TPL_IGNORE(display);
+ TPL_IGNORE(visual_id);
+ TPL_IGNORE(alpha_size);
+ return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_get_window_info(tpl_display_t *display,
+ tpl_handle_t window, int *width,
+ int *height, tbm_format *format,
+ int depth, int a_size)
+{
+ tpl_result_t ret = TPL_ERROR_NONE;
+
+ TPL_ASSERT(display);
+ TPL_ASSERT(window);
+
+ if ((ret = twe_get_native_window_info(window, width, height, format, a_size))
+ != TPL_ERROR_NONE) {
+ TPL_ERR("Failed to get size info of native_window(%p)", window);
+ }
+
+ return ret;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_get_pixmap_info(tpl_display_t *display,
+ tpl_handle_t pixmap, int *width,
+ int *height, tbm_format *format)
+{
+ tbm_surface_h tbm_surface = NULL;
+
+ tbm_surface = twe_get_native_buffer_from_pixmap(pixmap);
+ if (!tbm_surface) {
+ TPL_ERR("Failed to get tbm_surface_h from native pixmap.");
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (width) *width = tbm_surface_get_width(tbm_surface);
+ if (height) *height = tbm_surface_get_height(tbm_surface);
+ if (format) *format = tbm_surface_get_format(tbm_surface);
+
+ return TPL_ERROR_NONE;
+}
+
+static tbm_surface_h
+__tpl_wl_egl_display_get_buffer_from_native_pixmap(tpl_handle_t pixmap)
+{
+ tbm_surface_h tbm_surface = NULL;
+
+ TPL_ASSERT(pixmap);
+
+ tbm_surface = twe_get_native_buffer_from_pixmap(pixmap);
+ if (!tbm_surface) {
+ TPL_ERR("Failed to get tbm_surface_h from wayland_tbm.");
+ return NULL;
+ }
+
+ return tbm_surface;
+}
+
+static void
+__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue,
+ void *data)
+{
+ tpl_surface_t *surface = NULL;
+ tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+ tpl_bool_t is_activated = TPL_FALSE;
+ int width, height;
+
+ surface = (tpl_surface_t *)data;
+ TPL_CHECK_ON_NULL_RETURN(surface);
+
+ wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
+ TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface);
+
+ /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
+ * the changed window size at the next frame. */
+ width = tbm_surface_queue_get_width(surface_queue);
+ height = tbm_surface_queue_get_height(surface_queue);
+ if (surface->width != width || surface->height != height) {
+ TPL_LOG_T("WL_EGL",
+ "[QUEUE_RESIZE_CB] wayland_egl_surface(%p) tbm_queue(%p) (%dx%d)",
+ wayland_egl_surface, surface_queue, width, height);
+ }
+
+ /* When queue_reset_callback is called, if is_activated is different from
+ * its previous state change the reset flag to TPL_TRUE to get a new buffer
+ * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
+ is_activated = twe_surface_check_activated(wayland_egl_surface->twe_surface);
+ if (wayland_egl_surface->is_activated != is_activated) {
+ if (is_activated) {
+ TPL_LOG_T("WL_EGL",
+ "[ACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)",
+ wayland_egl_surface, surface_queue);
+ } else {
+ TPL_LOG_T("WL_EGL",
+ "[DEACTIVATED_CB] wayland_egl_surface(%p) tbm_queue(%p)",
+ wayland_egl_surface, surface_queue);
+ }
+ }
+
+ wayland_egl_surface->reset = TPL_TRUE;
+
+ if (surface->reset_cb)
+ surface->reset_cb(surface->reset_data);
+}
+
+void __cb_window_rotate_callback(void *data)
+{
+ tpl_surface_t *surface = (tpl_surface_t *)data;
+ tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+ int rotation;
+
+ if (!surface) {
+ TPL_ERR("Inavlid parameter. surface is NULL.");
+ return;
+ }
+
+ wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
+ if (!wayland_egl_surface) {
+ TPL_ERR("Invalid parameter. surface->backend.data is NULL");
+ return;
+ }
+
+ rotation = twe_surface_get_rotation(wayland_egl_surface->twe_surface);
+
+ surface->rotation = rotation;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_init(tpl_surface_t *surface)
+{
+ tpl_wayland_egl_display_t *wayland_egl_display = NULL;
+ tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+ tbm_surface_queue_h tbm_queue = NULL;
+ twe_surface_h twe_surface = NULL;
+ tpl_result_t ret = TPL_ERROR_NONE;
+
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
+ TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
+ TPL_ASSERT(surface->native_handle);
+
+ wayland_egl_display =
+ (tpl_wayland_egl_display_t *)surface->display->backend.data;
+ if (!wayland_egl_display) {
+ TPL_ERR("Invalid parameter. wayland_egl_display(%p)",
+ wayland_egl_display);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ wayland_egl_surface = (tpl_wayland_egl_surface_t *) calloc(1,
+ sizeof(tpl_wayland_egl_surface_t));
+ if (!wayland_egl_surface) {
+ TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_surface_t.");
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+
+ surface->backend.data = (void *)wayland_egl_surface;
+
+ if (__tpl_object_init(&wayland_egl_surface->base,
+ TPL_OBJECT_SURFACE,
+ NULL) != TPL_ERROR_NONE) {
+ TPL_ERR("Failed to initialize backend surface's base object!");
+ goto object_init_fail;
+ }
+
+ twe_surface = twe_surface_add(wayland_egl_display->wl_egl_thread,
+ wayland_egl_display->twe_display,
+ surface->native_handle,
+ surface->format, surface->num_buffers);
+ if (!twe_surface) {
+ TPL_ERR("Failed to add native_window(%p) to thread(%p)",
+ surface->native_handle, wayland_egl_display->wl_egl_thread);
+ goto create_twe_surface_fail;
+ }
+
+ tbm_queue = twe_surface_get_tbm_queue(twe_surface);
+ if (!tbm_queue) {
+ TPL_ERR("Failed to get tbm_queue from twe_surface(%p)", twe_surface);
+ goto queue_create_fail;
+ }
+
+ /* Set reset_callback to tbm_queue */
+ if (tbm_surface_queue_add_reset_cb(tbm_queue,
+ __cb_tbm_surface_queue_reset_callback,
+ (void *)surface)) {
+ TPL_ERR("TBM surface queue add reset cb failed!");
+ goto add_reset_cb_fail;
+ }
+
+ wayland_egl_surface->reset = TPL_FALSE;
+ wayland_egl_surface->twe_surface = twe_surface;
+ wayland_egl_surface->tbm_queue = tbm_queue;
+ wayland_egl_surface->is_activated = TPL_FALSE;
+ wayland_egl_surface->need_to_enqueue = TPL_TRUE;
+
+ surface->width = tbm_surface_queue_get_width(tbm_queue);
+ surface->height = tbm_surface_queue_get_height(tbm_queue);
+ surface->rotation = twe_surface_get_rotation(twe_surface);
+
+ ret = twe_surface_set_rotate_callback(twe_surface, (void *)surface,
+ (tpl_surface_cb_func_t)__cb_window_rotate_callback);
+ if (ret != TPL_ERROR_NONE) {
+ TPL_WARN("Failed to register rotate callback.");
+ }
+
+ TPL_LOG_T("WL_EGL",
+ "[INIT1/2]tpl_surface(%p) tpl_wayland_egl_surface(%p) twe_surface(%p)",
+ surface, wayland_egl_surface, twe_surface);
+ TPL_LOG_T("WL_EGL",
+ "[INIT2/2]size(%dx%d)rot(%d)|tbm_queue(%p)|native_window(%p)",
+ surface->width, surface->height, surface->rotation,
+ tbm_queue, surface->native_handle);
+
+ return TPL_ERROR_NONE;
+
+add_reset_cb_fail:
+queue_create_fail:
+ twe_surface_del(twe_surface);
+create_twe_surface_fail:
+object_init_fail:
+ free(wayland_egl_surface);
+ surface->backend.data = NULL;
+ return TPL_ERROR_INVALID_OPERATION;
+}
+
+static void
+__tpl_wl_egl_surface_fini(tpl_surface_t *surface)
+{
+ tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+ tpl_wayland_egl_display_t *wayland_egl_display = NULL;
+
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
+
+ wayland_egl_surface = (tpl_wayland_egl_surface_t *) surface->backend.data;
+ TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface);
+
+ TPL_OBJECT_LOCK(wayland_egl_surface);
+
+ wayland_egl_display = (tpl_wayland_egl_display_t *)
+ surface->display->backend.data;
+
+ if (wayland_egl_display == NULL) {
+ TPL_ERR("check failed: wayland_egl_display == NULL");
+ TPL_OBJECT_UNLOCK(wayland_egl_surface);
+ return;
+ }
+
+ if (surface->type == TPL_SURFACE_TYPE_WINDOW) {
+ TPL_LOG_T("WL_EGL",
+ "[FINI] wayland_egl_surface(%p) native_window(%p) twe_surface(%p)",
+ wayland_egl_surface, surface->native_handle,
+ wayland_egl_surface->twe_surface);
+
+ if (twe_surface_del(wayland_egl_surface->twe_surface)
+ != TPL_ERROR_NONE) {
+ TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)",
+ wayland_egl_surface->twe_surface,
+ wayland_egl_display->wl_egl_thread);
+ }
+
+ wayland_egl_surface->twe_surface = NULL;
+ wayland_egl_surface->tbm_queue = NULL;
+ }
+
+ TPL_OBJECT_UNLOCK(wayland_egl_surface);
+ __tpl_object_fini(&wayland_egl_surface->base);
+ free(wayland_egl_surface);
+ surface->backend.data = NULL;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
+ tpl_bool_t set)
+{
+ tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+
+ if (!surface) {
+ TPL_ERR("Invalid parameter. tpl_surface(%p)", surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
+ if (!wayland_egl_surface) {
+ TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)",
+ surface, wayland_egl_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!wayland_egl_surface->twe_surface) {
+ TPL_ERR("Invalid parameter. wayland_egl_surface(%p) twe_surface(%p)",
+ wayland_egl_surface, wayland_egl_surface->twe_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ twe_surface_set_rotation_capablity(wayland_egl_surface->twe_surface,
+ set);
+
+ return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
+ int post_interval)
+{
+ tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+
+ if (!surface) {
+ TPL_ERR("Invalid parameter. tpl_surface(%p)", surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
+ if (!wayland_egl_surface) {
+ TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)",
+ surface, wayland_egl_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!wayland_egl_surface->twe_surface) {
+ TPL_ERR("Invalid parameter. wayland_egl_surface(%p) twe_surface(%p)",
+ wayland_egl_surface, wayland_egl_surface->twe_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ twe_surface_set_post_interval(wayland_egl_surface->twe_surface,
+ post_interval);
+
+ return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface,
+ int num_rects, const int *rects, tbm_fd sync_fence)
+{
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
+ TPL_ASSERT(tbm_surface);
+ TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
+
+ tpl_wayland_egl_surface_t *wayland_egl_surface =
+ (tpl_wayland_egl_surface_t *) surface->backend.data;
+ tbm_surface_queue_error_e tsq_err;
+ tpl_result_t ret = TPL_ERROR_NONE;
+ int bo_name = 0;
+
+ TPL_OBJECT_LOCK(wayland_egl_surface);
+
+ bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+
+ if (!wayland_egl_surface) {
+ TPL_ERR("Invalid parameter. surface(%p) wayland_egl_surface(%p)",
+ surface, wayland_egl_surface);
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TPL_OBJECT_UNLOCK(wayland_egl_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
+ tbm_surface);
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TPL_OBJECT_UNLOCK(wayland_egl_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
+
+ TPL_LOG_T("WL_EGL",
+ "[ENQ] wayland_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ wayland_egl_surface, tbm_surface, bo_name, sync_fence);
+
+ /* If there are received region information,
+ * save it to buf_info in tbm_surface user_data using below API. */
+ if (num_rects && rects) {
+ ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects);
+ if (ret != TPL_ERROR_NONE) {
+ TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)",
+ num_rects, rects);
+ }
+ }
+
+ if (!wayland_egl_surface->need_to_enqueue ||
+ !twe_surface_check_commit_needed(wayland_egl_surface->twe_surface,
+ tbm_surface)) {
+ TPL_LOG_T("WL_EGL",
+ "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
+ ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TPL_OBJECT_UNLOCK(wayland_egl_surface);
+ return TPL_ERROR_NONE;
+ }
+
+ /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
+ * commit if surface->frontbuffer that is already set and the tbm_surface
+ * client want to enqueue are the same.
+ */
+ if (surface->is_frontbuffer_mode) {
+ /* The first buffer to be activated in frontbuffer mode must be
+ * committed. Subsequence frames do not need to be committed because
+ * the buffer is already displayed.
+ */
+ if (surface->frontbuffer == tbm_surface)
+ wayland_egl_surface->need_to_enqueue = TPL_FALSE;
+
+ if (sync_fence != -1) {
+ close(sync_fence);
+ sync_fence = -1;
+ }
+ }
+
+ if (sync_fence != -1) {
+ ret = twe_surface_set_sync_fd(wayland_egl_surface->twe_surface,
+ tbm_surface, sync_fence);
+ if (ret != TPL_ERROR_NONE) {
+ TPL_WARN("Failed to set sync fd (%d). But it will continue.",
+ sync_fence);
+ }
+ }
+
+ tsq_err = tbm_surface_queue_enqueue(wayland_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ tbm_surface_internal_unref(tbm_surface);
+ TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d",
+ tbm_surface, surface, tsq_err);
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TPL_OBJECT_UNLOCK(wayland_egl_surface);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ tbm_surface_internal_unref(tbm_surface);
+
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TPL_OBJECT_UNLOCK(wayland_egl_surface);
+
+ return TPL_ERROR_NONE;
+}
+
+static tpl_bool_t
+__tpl_wl_egl_surface_validate(tpl_surface_t *surface)
+{
+ tpl_bool_t retval = TPL_TRUE;
+
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->backend.data);
+
+ tpl_wayland_egl_surface_t *wayland_egl_surface =
+ (tpl_wayland_egl_surface_t *)surface->backend.data;
+
+ retval = !(wayland_egl_surface->reset);
+
+ return retval;
+}
+
+static tpl_result_t
+__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface)
+{
+ tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
+ if (!wayland_egl_surface) {
+ TPL_ERR("Invalid backend surface. surface(%p) wayland_egl_surface(%p)",
+ surface, wayland_egl_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ tbm_surface_internal_unref(tbm_surface);
+
+ tsq_err = tbm_surface_queue_cancel_dequeue(wayland_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
+ tbm_surface, surface);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
+ surface, tbm_surface);
+
+ return TPL_ERROR_NONE;
+}
+
+#define CAN_DEQUEUE_TIMEOUT_MS 10000
+
+static tbm_surface_h
+__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
+ tbm_fd *sync_fence)
+{
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->backend.data);
+ TPL_ASSERT(surface->display);
+ TPL_ASSERT(surface->display->backend.data);
+ TPL_OBJECT_CHECK_RETURN(surface, NULL);
+
+ tbm_surface_h tbm_surface = NULL;
+ tpl_wayland_egl_surface_t *wayland_egl_surface =
+ (tpl_wayland_egl_surface_t *)surface->backend.data;
+ tpl_wayland_egl_display_t *wayland_egl_display =
+ (tpl_wayland_egl_display_t *)surface->display->backend.data;
+ tbm_surface_queue_error_e tsq_err = 0;
+ int is_activated = 0;
+ int bo_name = 0;
+ tpl_result_t lock_ret = TPL_FALSE;
+
+ TPL_OBJECT_UNLOCK(surface);
+ tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
+ wayland_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
+ TPL_OBJECT_LOCK(surface);
+
+ /* After the can dequeue state, call twe_display_lock to prevent other
+ * events from being processed in wayland_egl_thread
+ * during below dequeue procedure. */
+ lock_ret = twe_display_lock(wayland_egl_display->twe_display);
+
+ if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
+ TPL_ERR("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
+ wayland_egl_surface->tbm_queue, surface);
+ if (twe_surface_queue_force_flush(wayland_egl_surface->twe_surface)
+ != TPL_ERROR_NONE) {
+ TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
+ wayland_egl_surface->tbm_queue, surface);
+ if (lock_ret == TPL_ERROR_NONE)
+ twe_display_unlock(wayland_egl_display->twe_display);
+ return NULL;
+ } else {
+ tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+ }
+
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
+ wayland_egl_surface->tbm_queue, surface);
+ if (lock_ret == TPL_ERROR_NONE)
+ twe_display_unlock(wayland_egl_display->twe_display);
+ return NULL;
+ }
+
+ /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
+ * below function [wayland_tbm_client_queue_check_activate()].
+ * This function has to be called before tbm_surface_queue_dequeue()
+ * in order to know what state the buffer will be dequeued next.
+ *
+ * ACTIVATED state means non-composite mode. Client can get buffers which
+ can be displayed directly(without compositing).
+ * DEACTIVATED state means composite mode. Client's buffer will be displayed
+ by compositor(E20) with compositing.
+ */
+ is_activated = twe_surface_check_activated(wayland_egl_surface->twe_surface);
+ wayland_egl_surface->is_activated = is_activated;
+
+ surface->width = tbm_surface_queue_get_width(wayland_egl_surface->tbm_queue);
+ surface->height = tbm_surface_queue_get_height(wayland_egl_surface->tbm_queue);
+
+ if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
+ /* If surface->frontbuffer is already set in frontbuffer mode,
+ * it will return that frontbuffer if it is still activated,
+ * otherwise dequeue the new buffer after initializing
+ * surface->frontbuffer to NULL. */
+ if (is_activated && !wayland_egl_surface->reset) {
+ TPL_LOG_T("WL_EGL",
+ "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
+ surface->frontbuffer,
+ tbm_bo_export(tbm_surface_internal_get_bo(
+ surface->frontbuffer, 0)));
+ TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
+ "[DEQ]~[ENQ] BO_NAME:%d",
+ tbm_bo_export(tbm_surface_internal_get_bo(
+ surface->frontbuffer, 0)));
+ if (lock_ret == TPL_ERROR_NONE)
+ twe_display_unlock(wayland_egl_display->twe_display);
+ return surface->frontbuffer;
+ } else {
+ surface->frontbuffer = NULL;
+ wayland_egl_surface->need_to_enqueue = TPL_TRUE;
+ }
+ } else {
+ surface->frontbuffer = NULL;
+ }
+
+ tsq_err = tbm_surface_queue_dequeue(wayland_egl_surface->tbm_queue,
+ &tbm_surface);
+ if (!tbm_surface) {
+ TPL_ERR("Failed to dequeue from tbm_queue(%p) surface(%p)| tsq_err = %d",
+ wayland_egl_surface->tbm_queue, surface, tsq_err);
+ if (lock_ret == TPL_ERROR_NONE)
+ twe_display_unlock(wayland_egl_display->twe_display);
+ return NULL;
+ }
+
+ tbm_surface_internal_ref(tbm_surface);
+
+ /* If twe_surface_get_buffer_release_fence_fd return -1,
+ * the tbm_surface can be used immediately.
+ * If not, user(EGL) have to wait until signaled. */
+ if (sync_fence) {
+ *sync_fence = twe_surface_get_buffer_release_fence_fd(
+ wayland_egl_surface->twe_surface, tbm_surface);
+ }
+
+ bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+
+ if (surface->is_frontbuffer_mode && is_activated)
+ surface->frontbuffer = tbm_surface;
+
+ wayland_egl_surface->reset = TPL_FALSE;
+
+ TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name);
+ TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)",
+ tbm_surface, bo_name, sync_fence ? *sync_fence : -1);
+
+ if (lock_ret == TPL_ERROR_NONE)
+ twe_display_unlock(wayland_egl_display->twe_display);
+
+ return tbm_surface;
+}
+
+void
+__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
+{
+ tpl_wayland_egl_surface_t *wayland_egl_surface =
+ (tpl_wayland_egl_surface_t *)surface->backend.data;
+
+ if (width)
+ *width = tbm_surface_queue_get_width(wayland_egl_surface->tbm_queue);
+ if (height)
+ *height = tbm_surface_queue_get_height(wayland_egl_surface->tbm_queue);
+}
+
+
+tpl_bool_t
+__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
+{
+ if (!native_dpy) return TPL_FALSE;
+
+ if (twe_check_native_handle_is_wl_display(native_dpy))
+ return TPL_TRUE;
+
+ return TPL_FALSE;
+}
+
+void
+__tpl_display_init_backend_wl_egl_thread_legacy(tpl_display_backend_t *backend)
+{
+ TPL_ASSERT(backend);
+
+ backend->type = TPL_BACKEND_WAYLAND_THREAD;
+ backend->data = NULL;
+
+ backend->init = __tpl_wl_egl_display_init;
+ backend->fini = __tpl_wl_egl_display_fini;
+ backend->query_config = __tpl_wl_egl_display_query_config;
+ backend->filter_config = __tpl_wl_egl_display_filter_config;
+ backend->get_window_info = __tpl_wl_egl_display_get_window_info;
+ backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
+ backend->get_buffer_from_native_pixmap =
+ __tpl_wl_egl_display_get_buffer_from_native_pixmap;
+}
+
+void
+__tpl_surface_init_backend_wl_egl_thread_legacy(tpl_surface_backend_t *backend)
+{
+ TPL_ASSERT(backend);
+
+ backend->type = TPL_BACKEND_WAYLAND_THREAD;
+ backend->data = NULL;
+
+ backend->init = __tpl_wl_egl_surface_init;
+ backend->fini = __tpl_wl_egl_surface_fini;
+ backend->validate = __tpl_wl_egl_surface_validate;
+ backend->cancel_dequeued_buffer =
+ __tpl_wl_egl_surface_cancel_dequeued_buffer;
+ backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
+ backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
+ backend->set_rotation_capability =
+ __tpl_wl_egl_surface_set_rotation_capability;
+ backend->set_post_interval =
+ __tpl_wl_egl_surface_set_post_interval;
+ backend->get_size =
+ __tpl_wl_egl_surface_get_size;
+}
+