#include <string.h>
#include <fcntl.h>
#include <unistd.h>
+#include <sys/eventfd.h>
#include <tbm_bufmgr.h>
#include <tbm_surface.h>
tpl_bool_t prepared;
struct tizen_surface_shm *tss; /* used for surface buffer_flush */
- struct wp_presentation *presentation;
- struct zwp_linux_explicit_synchronization_v1 *explicit_sync;
+ struct wp_presentation *presentation; /* for presentation feedback */
+ struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
};
struct _tpl_wl_egl_surface {
tbm_surface_queue_h tbm_queue;
- struct wl_surface *surf;
struct wl_egl_window *wl_egl_window;
- struct zwp_linux_surface_synchronization_v1 *surface_sync;
- struct tizen_surface_shm_flusher *tss_flusher;
+ struct wl_surface *wl_surface;
+ struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
+ struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */
+
+ tdm_client_vblank *vblank;
/* surface information */
- int latest_transform;
- int rotation;
- int format;
int render_done_cnt;
unsigned int serial;
+ int width;
+ int height;
+ int format;
+ int latest_transform;
+ int rotation;
+ int post_interval;
tpl_wl_egl_display_t *wl_egl_display;
+ tpl_surface_t *tpl_surface;
/* the lists for buffer tracing */
tpl_list_t *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */
tpl_list_t *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */
- tpl_list_t *fence_waiting_sources; /* Trace fence_wait_source from ENQUEUE to fence signaled */
+ tpl_list_t *fence_waiting_bufferss; /* Trace buffers from ENQUEUE to fence signaled */
tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
tpl_list_t *render_done_fences; /* for attaching to twe_thread with fences passed by enqueue */
+ tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
- tdm_client_vblank *vblank;
-
- tbm_fd commit_sync_timeline;
- int commit_sync_timestamp;
- unsigned int commit_sync_fence_number;
+ struct {
+ tpl_gmutex mutex;
+ int fd;
+ } commit_sync;
- tbm_fd presentation_sync_timeline;
- int presentation_sync_timestamp;
- int presentation_sync_ts_backup;
- int presentation_sync_req_cnt;
+ struct {
+ tpl_gmutex mutex;
+ int fd;
+ } presentation_sync;
- tpl_gmutex pst_mutex;
- tpl_gmutex surf_mutex;
tpl_gmutex free_queue_mutex;
tpl_gcond free_queue_cond;
- /* for waiting draw done */
- tpl_bool_t use_sync_fence;
-
- /* to use zwp_linux_surface_synchronization */
- tpl_bool_t use_surface_sync;
+ tpl_gmutex surf_mutex;
+ tpl_gcond surf_cond;
+ /* for waiting draw done */
+ tpl_bool_t use_render_done_fence;
tpl_bool_t is_activated;
tpl_bool_t reset; /* TRUE if queue reseted by external */
tpl_bool_t need_to_enqueue;
- tpl_bool_t rotation_capability;
+ tpl_bool_t prerotation_capability;
tpl_bool_t vblank_done;
- tpl_bool_t is_destroying;
- tpl_bool_t set_serial_is_used; /* Will be deprecated */
-
- int post_interval;
+ tpl_bool_t set_serial_is_used;
};
struct _tpl_wl_egl_bufer {
wl_egl_display->last_error = errno;
}
-static tpl_bool_t
-__thread_func_disp_prepare(tpl_gsource *gsource)
-{
- tpl_wl_egl_display_t *wl_egl_display =
- (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
-
- /* If this wl_egl_display is already prepared,
- * do nothing in this function. */
- if (wl_egl_display->prepared)
- return TPL_FALSE;
-
- /* If there is a last_error, there is no need to poll,
- * so skip directly to dispatch.
- * prepare -> dispatch */
- if (wl_egl_display->last_error)
- return TPL_TRUE;
-
- while (wl_display_prepare_read_queue(wl_egl_display->wl_display,
- wl_egl_display->ev_queue) != 0) {
- if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
- wl_egl_display->ev_queue) == -1) {
- _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
- }
- }
-
- wl_egl_display->prepared = TPL_TRUE;
-
- wl_display_flush(wl_egl_display->wl_display);
-
- return TPL_FALSE;
-}
-
-static tpl_bool_t
-__thread_func_disp_check(tpl_gsource *gsource)
-{
- tpl_wl_egl_display_t *wl_egl_display =
- (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
- tpl_bool_t ret = TPL_FALSE;
-
- if (!wl_egl_display->prepared)
- return ret;
-
- /* If prepared, but last_error is set,
- * cancel_read is executed and FALSE is returned.
- * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
- * and skipping disp_check from prepare to disp_dispatch.
- * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
- if (wl_egl_display->prepared && wl_egl_display->last_error) {
- wl_display_cancel_read(wl_egl_display->wl_display);
- return ret;
- }
-
- if (tpl_gsource_check_io_condition(gsource)) {
- if (wl_display_read_events(wl_egl_display->wl_display) == -1)
- _wl_display_print_err(wl_egl_display, "read_event");
- ret = TPL_TRUE;
- } else {
- wl_display_cancel_read(wl_egl_display->wl_display);
- ret = TPL_FALSE;
- }
-
- wl_egl_display->prepared = TPL_FALSE;
-
- return ret;
-}
-
-static tpl_bool_t
-__thread_func_disp_dispatch(tpl_gsource *gsource)
-{
- tpl_wl_egl_display_t *wl_egl_display =
- (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
-
- /* If there is last_error, SOURCE_REMOVE should be returned
- * to remove the gsource from the main loop.
- * This is because wl_egl_display is not valid since last_error was set.*/
- if (wl_egl_display->last_error) {
- return TPL_GSOURCE_REMOVE;
- }
-
- g_mutex_lock(&wl_egl_display->wl_event_mutex);
- if (tpl_gsource_check_io_condition(gsource)) {
- if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
- wl_egl_display->ev_queue) == -1) {
- _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
- }
- }
-
- wl_display_flush(wl_egl_display->wl_display);
- g_mutex_unlock(&wl_egl_display->wl_event_mutex);
-
- return TPL_GSOURCE_CONTINUE;
-}
-
-static void
-__thread_func_disp_finalize(tpl_gsource *source)
-{
- tpl_wl_egl_display_t *wl_egl_display =
- (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
-
- if (wl_egl_display->wl_initialized)
- _thread_wl_display_fini(wl_egl_display);
-
- TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)",
- wl_egl_display, source);
-
- return;
-}
-
-static tpl_gsource_functions disp_funcs = {
- .prepare = __thread_func_disp_prepare,
- .check = __thread_func_disp_check,
- .dispatch = __thread_func_disp_dispatch,
- .finalize = __thread_func_disp_finalize,
-};
-
static void*
_thread_init(void *data)
{
wl_egl_display->wl_display);
}
+static tpl_bool_t
+__thread_func_disp_prepare(tpl_gsource *gsource)
+{
+ tpl_wl_egl_display_t *wl_egl_display =
+ (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+
+ /* If this wl_egl_display is already prepared,
+ * do nothing in this function. */
+ if (wl_egl_display->prepared)
+ return TPL_FALSE;
+
+ /* If there is a last_error, there is no need to poll,
+ * so skip directly to dispatch.
+ * prepare -> dispatch */
+ if (wl_egl_display->last_error)
+ return TPL_TRUE;
+
+ while (wl_display_prepare_read_queue(wl_egl_display->wl_display,
+ wl_egl_display->ev_queue) != 0) {
+ if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
+ wl_egl_display->ev_queue) == -1) {
+ _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
+ }
+ }
+
+ wl_egl_display->prepared = TPL_TRUE;
+
+ wl_display_flush(wl_egl_display->wl_display);
+
+ return TPL_FALSE;
+}
+
+static tpl_bool_t
+__thread_func_disp_check(tpl_gsource *gsource)
+{
+ tpl_wl_egl_display_t *wl_egl_display =
+ (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+ tpl_bool_t ret = TPL_FALSE;
+
+ if (!wl_egl_display->prepared)
+ return ret;
+
+ /* If prepared, but last_error is set,
+ * cancel_read is executed and FALSE is returned.
+ * That can lead to G_SOURCE_REMOVE by calling disp_prepare again
+ * and skipping disp_check from prepare to disp_dispatch.
+ * check -> prepare -> dispatch -> G_SOURCE_REMOVE */
+ if (wl_egl_display->prepared && wl_egl_display->last_error) {
+ wl_display_cancel_read(wl_egl_display->wl_display);
+ return ret;
+ }
+
+ if (tpl_gsource_check_io_condition(gsource)) {
+ if (wl_display_read_events(wl_egl_display->wl_display) == -1)
+ _wl_display_print_err(wl_egl_display, "read_event");
+ ret = TPL_TRUE;
+ } else {
+ wl_display_cancel_read(wl_egl_display->wl_display);
+ ret = TPL_FALSE;
+ }
+
+ wl_egl_display->prepared = TPL_FALSE;
+
+ return ret;
+}
+
+static tpl_bool_t
+__thread_func_disp_dispatch(tpl_gsource *gsource, uint64_t message)
+{
+ tpl_wl_egl_display_t *wl_egl_display =
+ (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+
+ TPL_IGNORE(message);
+
+ /* If there is last_error, SOURCE_REMOVE should be returned
+ * to remove the gsource from the main loop.
+ * This is because wl_egl_display is not valid since last_error was set.*/
+ if (wl_egl_display->last_error) {
+ return TPL_GSOURCE_REMOVE;
+ }
+
+ g_mutex_lock(&wl_egl_display->wl_event_mutex);
+ if (tpl_gsource_check_io_condition(gsource)) {
+ if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
+ wl_egl_display->ev_queue) == -1) {
+ _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
+ }
+ }
+
+ wl_display_flush(wl_egl_display->wl_display);
+ g_mutex_unlock(&wl_egl_display->wl_event_mutex);
+
+ return TPL_GSOURCE_CONTINUE;
+}
+
+static void
+__thread_func_disp_finalize(tpl_gsource *gsource)
+{
+ tpl_wl_egl_display_t *wl_egl_display =
+ (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+
+ if (wl_egl_display->wl_initialized)
+ _thread_wl_display_fini(wl_egl_display);
+
+ TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)",
+ wl_egl_display, gsource);
+
+ return;
+}
+
+static tpl_gsource_functions disp_funcs = {
+ .prepare = __thread_func_disp_prepare,
+ .check = __thread_func_disp_check,
+ .dispatch = __thread_func_disp_dispatch,
+ .finalize = __thread_func_disp_finalize,
+};
+
static tpl_result_t
__tpl_wl_egl_display_query_config(tpl_display_t *display,
tpl_surface_type_t surface_type,
if (width) *width = wl_egl_window->width;
if (height) *height = wl_egl_window->height;
if (format) {
- struct tizen_private *tizen_private = _get_tizen_private(wl_egl_window);
+ struct tizen_private *tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
if (tizen_private && tizen_private->data) {
tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
*format = wl_egl_surface->format;
return tbm_surface;
}
-
-
-
-
-static void
-__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue,
- void *data)
+static tpl_result_t
+__tpl_wl_egl_surface_init(tpl_surface_t *surface)
{
- tpl_surface_t *surface = NULL;
+ tpl_wl_egl_display_t *wl_egl_display = NULL;
tpl_wl_egl_surface_t *wl_egl_surface = NULL;
- tpl_bool_t is_activated = TPL_FALSE;
- int width, height;
+ tbm_surface_queue_h tbm_queue = NULL;
+ tpl_gsource *surf_source = NULL;
+ tpl_result_t ret = TPL_ERROR_NONE;
- surface = (tpl_surface_t *)data;
- TPL_CHECK_ON_NULL_RETURN(surface);
+ struct wl_egl_window *wl_egl_window =
+ (struct wl_egl_window *)surface->native_handle;
- wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
- TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
+ TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
+ TPL_ASSERT(surface->native_handle);
- /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
- * the changed window size at the next frame. */
- width = tbm_surface_queue_get_width(surface_queue);
- height = tbm_surface_queue_get_height(surface_queue);
- if (surface->width != width || surface->height != height) {
- TPL_LOG_T("WL_EGL",
- "[QUEUE_RESIZE_CB] wl_egl_surface(%p) tbm_queue(%p) (%dx%d)",
- wl_egl_surface, surface_queue, width, height);
+ wl_egl_display =
+ (tpl_wl_egl_display_t *)surface->display->backend.data;
+ if (!wl_egl_display) {
+ TPL_ERR("Invalid parameter. wl_egl_display(%p)",
+ wl_egl_display);
+ return TPL_ERROR_INVALID_PARAMETER;
}
- /* When queue_reset_callback is called, if is_activated is different from
- * its previous state change the reset flag to TPL_TRUE to get a new buffer
- * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
- is_activated = twe_surface_check_activated(wl_egl_surface->twe_surface);
- if (wl_egl_surface->is_activated != is_activated) {
- if (is_activated) {
- TPL_LOG_T("WL_EGL",
- "[ACTIVATED_CB] wl_egl_surface(%p) tbm_queue(%p)",
- wl_egl_surface, surface_queue);
- } else {
- TPL_LOG_T("WL_EGL",
- "[DEACTIVATED_CB] wl_egl_surface(%p) tbm_queue(%p)",
- wl_egl_surface, surface_queue);
+ wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1,
+ sizeof(tpl_wl_egl_surface_t));
+ if (!wl_egl_surface) {
+ TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+
+ surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface,
+ -1, surf_funcs, SOURCE_TYPE_NORMAL);
+ if (!surf_source) {
+ TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)",
+ wl_egl_surface);
+ goto surf_source_create_fail;
+ }
+
+ surface->backend.data = (void *)wl_egl_surface;
+ surface->width = wl_egl_window->width;
+ surface->height = wl_egl_window->height;
+ surface->rotation = 0;
+
+ wl_egl_surface->tpl_surface = surface;
+ wl_egl_surface->width = wl_egl_window->width;
+ wl_egl_surface->height = wl_egl_window->height;
+ wl_egl_surface->format = surface->format;
+
+ wl_egl_surface->surf_source = surf_source;
+ wl_egl_surface->wl_egl_window = wl_egl_window;
+ wl_egl_surface->wl_surface = wl_egl_window->surface;
+
+ wl_egl_surface->wl_egl_display = wl_egl_display;
+
+ wl_egl_surface->reset = TPL_FALSE;
+ wl_egl_surface->is_activated = TPL_FALSE;
+ wl_egl_surface->need_to_enqueue = TPL_FALSE;
+ wl_egl_surface->prerotation_capability = TPL_FALSE;
+ wl_egl_surface->vblank_done = TPL_TRUE;
+ wl_egl_surface->use_render_done_fence = TPL_FALSE;
+ wl_egl_surface->set_serial_is_used = TPL_FALSE;
+
+ wl_egl_surface->latest_transform = 0;
+ wl_egl_surface->render_done_cnt = 0;
+ wl_egl_surface->serial = 0;
+
+ wl_egl_surface->vblank = NULL;
+ wl_egl_surface->tss_flusher = NULL;
+ wl_egl_surface->surface_sync = NULL;
+
+ wl_egl_surface->post_interval = surface->post_interval;
+
+ wl_egl_surface->commit_sync.fd = -1;
+ wl_egl_surface->presentation_sync.fd = -1;
+
+ {
+ struct tizen_private *tizen_private = NULL;
+
+ if (wl_egl_window->driver_private)
+ tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
+ else {
+ tizen_private = tizen_private_create();
+ wl_egl_window->driver_private = (void *)tizen_private;
+ }
+
+ if (tizen_private) {
+ tizen_private->data = (void *)wl_egl_surface;
+ tizen_private->rotate_callback = (void *)__cb_rotate_callback;
+ tizen_private->get_rotation_capability = (void *)
+ __cb_get_rotation_capability;
+ tizen_private->set_window_serial_callback = (void *)
+ __cb_set_window_serial_callback;
+ tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
+ tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
+
+ wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
+ wl_egl_window->resize_callback = (void *)__cb_resize_callback;
}
}
- wl_egl_surface->reset = TPL_TRUE;
+ tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
+ tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
- if (surface->reset_cb)
- surface->reset_cb(surface->reset_data);
-}
+ tpl_gmutex_init(&wl_egl_surface->free_queue_mutex);
+ tpl_gmutex_init(&wl_egl_surface->surf_mutex);
+ tpl_gcond_init(&wl_egl_surface->free_queue_cond);
+ tpl_gcond_init(&wl_egl_surface->surf_cond);
-void __cb_window_rotate_callback(void *data)
-{
- tpl_surface_t *surface = (tpl_surface_t *)data;
- tpl_wl_egl_surface_t *wl_egl_surface = NULL;
- int rotation;
+ /* Initialize in thread */
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ tpl_gsource_send_message(wl_egl_surface->surf_source, 1);
+ tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- if (!surface) {
- TPL_ERR("Inavlid parameter. surface is NULL.");
- return;
- }
+ TPL_ASSERT(wl_egl_surface->tbm_queue);
- wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
- if (!wl_egl_surface) {
- TPL_ERR("Invalid parameter. surface->backend.data is NULL");
- return;
- }
+ TPL_INFO("[SURFACE_INIT]",
+ "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)",
+ surface, wl_egl_surface, wl_egl_surface->surf_source);
- rotation = twe_surface_get_rotation(wl_egl_surface->twe_surface);
+ return TPL_ERROR_NONE;
- surface->rotation = rotation;
+surf_source_create_fail:
+ free(wl_egl_surface);
+ surface->backend.data = NULL;
+ return TPL_ERROR_INVALID_OPERATION;
}
-static tpl_result_t
-__tpl_wl_egl_surface_init(tpl_surface_t *surface)
+static tbm_surface_queue_h
+_thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface,
+ struct wayland_tbm_client *wl_tbm_client,
+ int num_buffers)
{
- tpl_wl_egl_display_t *wl_egl_display = NULL;
- tpl_wl_egl_surface_t *wl_egl_surface = NULL;
tbm_surface_queue_h tbm_queue = NULL;
- twe_surface_h twe_surface = NULL;
- tpl_result_t ret = TPL_ERROR_NONE;
+ tbm_bufmgr bufmgr = NULL;
+ unsigned int capability;
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->display);
- TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
- TPL_ASSERT(surface->native_handle);
+ struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
+ int width = wl_egl_surface->width;
+ int height = wl_egl_surface->height;
+ int format = wl_egl_surface->format;
- wl_egl_display =
- (tpl_wl_egl_display_t *)surface->display->backend.data;
- if (!wl_egl_display) {
- TPL_ERR("Invalid parameter. wl_egl_display(%p)",
- wl_egl_display);
- return TPL_ERROR_INVALID_PARAMETER;
+ if (!wl_tbm_client || !wl_surface) {
+ TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)",
+ wl_tbm_client, wl_surface);
+ return NULL;
}
- wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1,
- sizeof(tpl_wl_egl_surface_t));
- if (!wl_egl_surface) {
- TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
- return TPL_ERROR_OUT_OF_MEMORY;
+ bufmgr = tbm_bufmgr_init(-1);
+ capability = tbm_bufmgr_get_capability(bufmgr);
+ tbm_bufmgr_deinit(bufmgr);
+
+ if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
+ tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
+ wl_tbm_client,
+ wl_surface,
+ num_buffers,
+ width,
+ height,
+ format);
+ } else {
+ tbm_queue = wayland_tbm_client_create_surface_queue(
+ wl_tbm_client,
+ wl_surface,
+ num_buffers,
+ width,
+ height,
+ format);
+ }
+
+ if (tbm_queue) {
+ TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)",
+ wl_tbm_client);
+ return NULL;
}
- surface->backend.data = (void *)wl_egl_surface;
+ if (tbm_surface_queue_set_modes(
+ tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
+ TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
+ }
- if (__tpl_object_init(&wl_egl_surface->base,
- TPL_OBJECT_SURFACE,
- NULL) != TPL_ERROR_NONE) {
- TPL_ERR("Failed to initialize backend surface's base object!");
- goto object_init_fail;
+ if (tbm_surface_queue_add_reset_cb(
+ tbm_queue,
+ __cb_tbm_queue_reset_callback,
+ (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
}
- twe_surface = twe_surface_add(wl_egl_display->wl_egl_thread,
- wl_egl_display->twe_display,
- surface->native_handle,
- surface->format, surface->num_buffers);
- if (!twe_surface) {
- TPL_ERR("Failed to add native_window(%p) to thread(%p)",
- surface->native_handle, wl_egl_display->wl_egl_thread);
- goto create_twe_surface_fail;
+ if (tbm_surface_queue_add_trace_cb(
+ tbm_queue,
+ __cb_tbm_queue_trace_callback,
+ (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register trace callback to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
}
- tbm_queue = twe_surface_get_tbm_queue(twe_surface);
- if (!tbm_queue) {
- TPL_ERR("Failed to get tbm_queue from twe_surface(%p)", twe_surface);
- goto queue_create_fail;
+ if (tbm_surface_queue_add_acquirable_cb(
+ tbm_queue,
+ __cb_tbm_queue_acquirable_callback,
+ (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
}
- /* Set reset_callback to tbm_queue */
- if (tbm_surface_queue_add_reset_cb(tbm_queue,
- __cb_tbm_surface_queue_reset_callback,
- (void *)surface)) {
- TPL_ERR("TBM surface queue add reset cb failed!");
- goto add_reset_cb_fail;
+ return tbm_queue;
+}
+
+static tdm_client_vblank*
+_thread_create_tdm_client_vblank(tdm_client *tdm_client)
+{
+ tdm_client_vblank *vblank = NULL;
+ tdm_client_output *tdm_output = NULL;
+ tdm_error tdm_err = TDM_ERROR_NONE;
+
+ if (!tdm_client) {
+ TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
+ return NULL;
}
- wl_egl_surface->reset = TPL_FALSE;
- wl_egl_surface->twe_surface = twe_surface;
- wl_egl_surface->tbm_queue = tbm_queue;
- wl_egl_surface->is_activated = TPL_FALSE;
- wl_egl_surface->need_to_enqueue = TPL_TRUE;
+ tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
+ if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
+ TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
+ return NULL;
+ }
+
+ vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
+ if (!vblank || tdm_err != TDM_ERROR_NONE) {
+ TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
+ return NULL;
+ }
- surface->width = tbm_surface_queue_get_width(tbm_queue);
- surface->height = tbm_surface_queue_get_height(tbm_queue);
- surface->rotation = twe_surface_get_rotation(twe_surface);
+ tdm_client_vblank_set_enable_fake(vblank, 1);
+ tdm_client_vblank_set_sync(vblank, 0);
- ret = twe_surface_set_rotate_callback(twe_surface, (void *)surface,
- (tpl_surface_cb_func_t)__cb_window_rotate_callback);
- if (ret != TPL_ERROR_NONE) {
- TPL_WARN("Failed to register rotate callback.");
+ return vblank;
+}
+
+static void
+_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+
+ wl_egl_surface->tbm_queue = _thread_create_tbm_queue(
+ wl_egl_surface->wl_surface,
+ wl_egl_display->wl_tbm_client,
+ wl_egl_surface->width,
+ wl_egl_surface->height,
+ wl_egl_surface->format,
+ CLIENT_QUEUE_SIZE);
+ if (!wl_egl_surface->tbm_queue) {
+ TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)",
+ wl_egl_surface, wl_egl_display->wl_tbm_client);
+ return;
}
- TPL_LOG_T("WL_EGL",
- "[INIT1/2]tpl_surface(%p) tpl_wl_egl_surface(%p) twe_surface(%p)",
- surface, wl_egl_surface, twe_surface);
- TPL_LOG_T("WL_EGL",
- "[INIT2/2]size(%dx%d)rot(%d)|tbm_queue(%p)|native_window(%p)",
- surface->width, surface->height, surface->rotation,
- tbm_queue, surface->native_handle);
+ TPL_INFO("[QUEUE_CREATION]",
+ "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)",
+ wl_egl_surface, wl_egl_surface->wl_surface,
+ wl_egl_display->wl_tbm_client);
+ TPL_INFO("[QUEUE_CREATION]",
+ "tbm_queue(%p) size(%d x %d) X %d format(%d)",
+ wl_egl_surface->tbm_queue,
+ wl_egl_surface->width,
+ wl_egl_surface->height,
+ CLIENT_QUEUE_SIZE,
+ wl_egl_surface->format);
- return TPL_ERROR_NONE;
+ wl_egl_surface->vblank = _thread_create_tdm_client_vblank(
+ wl_egl_display->tdm_client);
+ if (wl_egl_surface->vblank) {
+ TPL_INFO("[VBLANK_INIT]",
+ "wl_egl_surface(%p) tdm_client(%p) vblank(%p)",
+ wl_egl_surface, wl_egl_display->tdm_client,
+ wl_egl_surface->vblank);
+ }
-add_reset_cb_fail:
-queue_create_fail:
- twe_surface_del(twe_surface);
-create_twe_surface_fail:
-object_init_fail:
- free(wl_egl_surface);
- surface->backend.data = NULL;
- return TPL_ERROR_INVALID_OPERATION;
+ if (wl_egl_display->tss) {
+ wl_egl_surface->tss_flusher =
+ tizen_surface_shm_get_flusher(wl_egl_display->tss,
+ wl_egl_surface->wl_surface);
+ }
+
+ if (wl_egl_surface->tss_flusher) {
+ tizen_surface_shm_flusher_add_listener(surf_source->tss_flusher,
+ &tss_flusher_listener,
+ wl_egl_surface);
+ TPL_INFO("[FLUSHER_INIT]",
+ "wl_egl_surface(%p) tss_flusher(%p)",
+ wl_egl_surface, wl_egl_surface->tss_flusher);
+ }
+
+ if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) {
+ wl_egl_surface->surface_sync =
+ zwp_linux_explicit_synchronization_v1_get_synchronization(
+ wl_egl_display->explicit_sync, wl_egl_surface->wl_surface);
+ if (wl_egl_surface->surface_sync) {
+ TPL_INFO("[EXPLICIT_SYNC_INIT]",
+ "wl_egl_surface(%p) surface_sync(%p)",
+ wl_egl_surface, wl_egl_surface->surface_sync);
+ } else {
+ TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)",
+ wl_egl_surface);
+ wl_egl_display->use_explicit_sync = TPL_FALSE;
+ }
+ }
+
+ wl_egl_surface->committed_buffers = __tpl_list_alloc();
+ wl_egl_surface->in_use_buffers = __tpl_list_alloc();
+ wl_egl_surface->fence_waiting_buffers = __tpl_list_alloc();
+ wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc();
+ wl_egl_surface->render_done_fences = __tpl_list_alloc();
+ wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
}
static void
TPL_ASSERT(surface);
TPL_ASSERT(surface->display);
+ TPL_CHECK_ON_NULL_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW);
+
wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data;
TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
- TPL_OBJECT_LOCK(wl_egl_surface);
+ wl_egl_display = wl_egl_surface->wl_egl_display;
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
+
+ TPL_INFO("[SURFACE_FINI][BEGIN]",
+ "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface,
+ wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
+
+ if (wl_egl_surface->surf_source)
+ tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
+ wl_egl_surface->surf_source = NULL;
+
+ if (wl_egl_surface->wl_egl_window) {
+ struct tizen_private *tizen_private = NULL;
+ struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
+ TPL_INFO("[WL_EGL_WINDOW_FINI]",
+ "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
+ wl_egl_surface, wl_egl_window,
+ wl_egl_surface->wl_surface);
+ tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
+ if (tizen_private) {
+ tizen_private->set_window_serial_callback = NULL;
+ tizen_private->rotate_callback = NULL;
+ tizen_private->get_rotation_capability = NULL;
+ tizen_private->create_presentation_sync_fd = NULL;
+ tizen_private->create_commit_sync_fd = NULL;
+ tizen_private->set_frontbuffer_callback = NULL;
+ tizen_private->merge_sync_fds = NULL;
+ tizen_private->data = NULL;
+ free(tizen_private);
+
+ wl_egl_window->dirver_private = NULL;
+ }
- wl_egl_display = (tpl_wl_egl_display_t *)
- surface->display->backend.data;
+ wl_egl_window->destroy_window_callback = NULL;
+ wl_egl_window->resize_callback = NULL;
- if (wl_egl_display == NULL) {
- TPL_ERR("check failed: wl_egl_display == NULL");
- TPL_OBJECT_UNLOCK(wl_egl_surface);
- return;
+ wl_egl_surface->wl_egl_window = NULL;
}
- if (surface->type == TPL_SURFACE_TYPE_WINDOW) {
- TPL_LOG_T("WL_EGL",
- "[FINI] wl_egl_surface(%p) native_window(%p) twe_surface(%p)",
- wl_egl_surface, surface->native_handle,
- wl_egl_surface->twe_surface);
-
- if (twe_surface_del(wl_egl_surface->twe_surface)
- != TPL_ERROR_NONE) {
- TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)",
- wl_egl_surface->twe_surface,
- wl_egl_display->wl_egl_thread);
+ wl_egl_surface->wl_surface = NULL;
+ wl_egl_surface->wl_egl_display = NULL;
+ wl_egl_surface->tpl_surface = NULL;
+
+ tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
+ tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+ tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex);
+
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+ tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex);
+
+ tpl_gmutex_lock(&wl_egl_surface->free_queue_mutex);
+ tpl_gmutex_unlock(&wl_egl_surface->free_queue_mutex);
+ tpl_gmutex_clear(&wl_egl_surface->free_queue_cond);
+
+ tpl_gmutex_clear(&wl_egl_surface->surf_mutex);
+ tpl_gcond_clear(&wl_egl_surface->surf_cond);
+
+ g_cond_clear(&wl_egl_surface->free_queue_cond);
+ g_mutex_clear(&wl_egl_surface->free_queue_mutex);
+
+ TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface);
+
+ free(wl_egl_surface);
+ surface->backend.data = NULL;
+}
+
+static void
+_thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+
+ TPL_INFO("[SURFACE_FINI]",
+ "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
+ wl_egl_surface, wl_egl_surface->wl_egl_window,
+ wl_egl_surface->wl_surface);
+
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+
+ /* TODO
+ if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) {
+ while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) {
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(wl_egl_surface->presentation_feedbacks, NULL);
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ twe_wl_buffer_info *buf_info = NULL;
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+ (void **)&buf_info);
+ if (buf_info && buf_info->presentation_sync_fd != -1 &&
+ buf_info->presentation_feedback) {
+
+ _write_to_eventfd(buf_info->presentation_sync_fd);
+ close(buf_info->presentation_sync_fd);
+ buf_info->presentation_sync_fd = -1;
+
+ wp_presentation_feedback_destroy(buf_info->presentation_feedback);
+ buf_info->presentation_feedback = NULL;
+ }
+ }
+ }
+ }
+
+ if (wl_egl_surface->presentation_sync.fd != -1) {
+ _write_to_eventfd(surf_source->presentation_sync.fd);
+ close(surf_source->presentation_sync.fd);
+ surf_source->presentation_sync.fd = -1;
+ }
+ */
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+
+ /* TODO buffer
+ if (wl_egl_surface->in_use_buffers) {
+ __tpl_list_free(wl_egl_surface->in_use_buffers,
+ (tpl_free_func_t)__cb_buffer_remove_from_list);
+ wl_egl_surface->in_use_buffers = NULL;
+ }
+
+ if (surf_source->committed_buffers) {
+ while (!__tpl_list_is_empty(surf_source->committed_buffers)) {
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(surf_source->committed_buffers,
+ (tpl_free_func_t)__cb_buffer_remove_from_list);
+
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ tbm_surface, tsq_err);
+ }
+ __tpl_list_free(surf_source->committed_buffers, NULL);
+ surf_source->committed_buffers = NULL;
+ }
+
+ if (surf_source->vblank_waiting_buffers) {
+ while (!__tpl_list_is_empty(surf_source->vblank_waiting_buffers)) {
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(surf_source->vblank_waiting_buffers,
+ (tpl_free_func_t)__cb_buffer_remove_from_list);
+
+ tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ tbm_surface, tsq_err);
}
+ __tpl_list_free(surf_source->vblank_waiting_buffers, NULL);
+ surf_source->vblank_waiting_buffers = NULL;
+ }
+
+ if (surf_source->use_sync_fence && surf_source->fence_waiting_sources) {
+ while (!__tpl_list_is_empty(surf_source->fence_waiting_sources)) {
+ twe_fence_wait_source *wait_source =
+ __tpl_list_pop_front(surf_source->fence_waiting_sources,
+ NULL);
+ if (!g_source_is_destroyed(&wait_source->gsource)) {
+ tbm_surface_internal_unref(wait_source->tbm_surface);
+ wait_source->tbm_surface = NULL;
+
+ close(wait_source->fence_fd);
+ wait_source->fence_fd = -1;
+
+ g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag);
+ g_source_destroy(&wait_source->gsource);
+ g_source_unref(&wait_source->gsource);
+ }
+ }
+ }
+ */
+
+ if (wl_egl_surface->surface_sync) {
+ TPL_INFO("[SURFACE_SYNC_DESTROY]", "wl_egl_surface(%p) surface_sync(%p)",
+ wl_egl_surface, wl_egl_surface->surface_sync);
+ zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync);
+ wl_egl_surface->surface_sync = NULL;
+ }
+
+ if (wl_egl_surface->tss_flusher) {
+ TPL_INFO("[FLUSHER_DESTROY]",
+ "wl_egl_surface(%p) tss_flusher(%p)",
+ wl_egl_surface, wl_egl_surface->tss_flusher);
+ tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher);
+ wl_egl_surface->tss_flusher = NULL;
+ }
- wl_egl_surface->twe_surface = NULL;
+ if (wl_egl_surface->vblank) {
+ TPL_INFO("[VBLANK_DESTROY]",
+ "wl_egl_surface(%p) vblank(%p)",
+ wl_egl_surface, wl_egl_surface->vblank);
+ tdm_client_vblank_destroy(wl_egl_surface->vblank);
+ wl_egl_surface->vblank = NULL;
+ }
+
+ if (wl_egl_surface->tbm_queue) {
+ TPL_INFO("[TBM_QUEUE_DESTROY]",
+ "wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
+ tbm_surface_queue_destroy(wl_egl_surface->tbm_queue);
wl_egl_surface->tbm_queue = NULL;
}
- TPL_OBJECT_UNLOCK(wl_egl_surface);
- __tpl_object_fini(&wl_egl_surface->base);
- free(wl_egl_surface);
- surface->backend.data = NULL;
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
+static tpl_bool_t
+__thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+ tpl_result_t res = TPL_ERROR_NONE;
+ ssize_t s;
+ uint64_t message = 0;
+
+ wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
+
+ /* Initialize surface */
+ if (message == 1) {
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ TPL_DEBUG("wl_egl_surface(%p) initialize message received!",
+ wl_egl_surface);
+ _thread_wl_egl_surface_init(wl_egl_surface);
+ tpl_gcond_signal(wl_egl_surface->surf_cond);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ } else if (message == 2) {
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ TPL_DEBUG("wl_egl_surface(%p) acquirable message received!",
+ wl_egl_surface);
+ /* TODO acquirable */
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ }
+
+ return TPL_TRUE;
+}
+
+static void
+__thread_func_surf_finalize(tpl_gsource *gsource)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+
+ wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+
+ _thread_wl_egl_surface_fini(wl_egl_surface);
+
+ TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%d)",
+ gsource, wl_egl_surface);
+}
+
+static tpl_gsource_functions surf_funcs = {
+ .prepare = NULL,
+ .check = NULL,
+ .dispatch = __thread_func_surf_dispatch,
+ .finalize = __thread_func_surf_finalize,
+};
+
static tpl_result_t
__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
tpl_bool_t set)
{
tpl_wl_egl_surface_t *wl_egl_surface = NULL;
- if (!surface) {
- TPL_ERR("Invalid parameter. tpl_surface(%p)", surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
- if (!wl_egl_surface) {
- TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)",
- surface, wl_egl_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
- if (!wl_egl_surface->twe_surface) {
- TPL_ERR("Invalid parameter. wl_egl_surface(%p) twe_surface(%p)",
- wl_egl_surface, wl_egl_surface->twe_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
- twe_surface_set_rotation_capablity(wl_egl_surface->twe_surface,
- set);
+ TPL_INFO("[SET_PREROTATION_CAPABILITY]",
+ "wl_egl_surface(%p) prerotation capability set to [%s]",
+ wl_egl_surface, (set ? "TRUE" : "FALSE"));
+ wl_egl_surface->prerotation_capability = set;
return TPL_ERROR_NONE;
}
{
tpl_wl_egl_surface_t *wl_egl_surface = NULL;
- if (!surface) {
- TPL_ERR("Invalid parameter. tpl_surface(%p)", surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
- if (!wl_egl_surface) {
- TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)",
- surface, wl_egl_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
- if (!wl_egl_surface->twe_surface) {
- TPL_ERR("Invalid parameter. wl_egl_surface(%p) twe_surface(%p)",
- wl_egl_surface, wl_egl_surface->twe_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
- twe_surface_set_post_interval(wl_egl_surface->twe_surface,
- post_interval);
+ TPL_INFO("[SET_POST_INTERVAL]",
+ "wl_egl_surface(%p) post_interval(%d -> %d)",
+ wl_egl_surface, wl_egl_surface->post_interval, post_interval);
+
+ wl_egl_surface->post_interval = post_interval;
return TPL_ERROR_NONE;
}
tpl_bool_t
__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
{
- if (!native_dpy) return TPL_FALSE;
+ struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE);
- if (twe_check_native_handle_is_wl_display(native_dpy))
+ /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
+ is a memory address pointing the structure of wl_display_interface. */
+ if (wl_egl_native_dpy == &wl_display_interface)
return TPL_TRUE;
+ if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
+ strlen(wl_display_interface.name)) == 0) {
+ return TPL_TRUE;
+ }
+
return TPL_FALSE;
}
__tpl_wl_egl_surface_get_size;
}
+/* -- BEGIN -- wl_egl_window callback functions */
+static void
+__cb_destroy_callback(void *private)
+{
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+
+ if (!tizen_private) {
+ TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface");
+ return;
+ }
+
+ wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+ if (wl_egl_surface) {
+ TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.",
+ wl_egl_surface->wl_egl_window);
+ TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface.");
+
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ wl_egl_surface->wl_egl_window->destroy_window_callback = NULL;
+ wl_egl_surface->wl_egl_window->resize_callback = NULL;
+ wl_egl_surface->wl_egl_window->driver_private = NULL;
+ wl_egl_surface->wl_egl_window = NULL;
+ wl_egl_surface->surf = NULL;
+ wl_egl_surface->is_destroying = TPL_TRUE;
+
+ tizen_private->set_window_serial_callback = NULL;
+ tizen_private->rotate_callback = NULL;
+ tizen_private->get_rotation_capability = NULL;
+ tizen_private->set_frontbuffer_callback = NULL;
+ tizen_private->create_commit_sync_fd = NULL;
+ tizen_private->create_presentation_sync_fd = NULL;
+ tizen_private->data = NULL;
+
+ free(tizen_private);
+ tizen_private = NULL;
+ tpl_gmutex_unlock(&surf_source->surf_mutex);
+ }
+}
+
+static void
+__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+ int cur_w, cur_h, req_w, req_h, format;
+
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return;
+ }
+
+ format = wl_egl_surface->format;
+ cur_w = wl_egl_surface->width;
+ cur_h = wl_egl_surface->height;
+ req_w = wl_egl_window->width;
+ req_h = wl_egl_window->height;
+
+ TPL_INFO("[WINDOW_RESIZE]",
+ "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)",
+ wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h);
+
+ if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
+ != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue);
+ return;
+ }
+}
+/* -- END -- wl_egl_window callback functions */
+
+/* -- BEGIN -- wl_egl_window tizen private callback functions */
+
+/* There is no usecase for using prerotation callback below */
+static void
+__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+ int rotation = tizen_private->rotation;
+
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return;
+ }
+
+ TPL_INFO("[WINDOW_ROTATE]",
+ "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)",
+ wl_egl_surface, wl_egl_window,
+ wl_egl_surface->rotation, rotation);
+
+ wl_egl_surface->rotation = rotation;
+}
+
+/* There is no usecase for using prerotation callback below */
+static int
+__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
+ void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return rotation_capability;
+ }
+
+ if (wl_egl_surface->rotation_capability == TPL_TRUE)
+ rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
+ else
+ rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
+
+
+ return rotation_capability;
+}
+
+static void
+__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
+ void *private, unsigned int serial)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return;
+ }
+
+ wl_egl_surface->set_serial_is_used = TPL_TRUE;
+ wl_egl_surface->serial = serial;
+}
+
+static int
+__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ int commit_sync_fd = -1;
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid parameter. wl_egl_surface is NULL", wl_egl_surface);
+ return -1;
+ }
+
+ tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
+
+ if (wl_egl_surface->commit_sync.fd != -1) {
+ commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
+ TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
+ wl_egl_surface->commit_sync.fd, commit_sync_fd);
+ TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)",
+ wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd);
+ tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+ return commit_sync_fd;
+ }
+
+ wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
+ if (wl_egl_surface->commit_sync.fd == -1) {
+ TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)", wl_egl_surface);
+ tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+ return -1;
+ }
+
+ commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
+
+ TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
+ wl_egl_surface->commit_sync.fd, commit_sync_fd);
+ TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)",
+ wl_egl_surface, commit_sync_fd);
+
+ tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+
+ return commit_sync_fd;
+}
+
+static int
+__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ int presentation_sync_fd = -1;
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid parameter. wl_egl_surface is NULL", wl_egl_surface);
+ return -1;
+ }
+
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+ if (wl_egl_surface->presentation_sync.fd != -1) {
+ presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
+ TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
+ wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+ TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
+ wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+ return presentation_sync_fd;
+ }
+
+ wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
+ if (wl_egl_surface->presentation_sync.fd == -1) {
+ TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)", wl_egl_surface);
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+ return -1;
+ }
+
+ presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
+ TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
+ wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+ TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
+ wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+
+ return presentation_sync_fd;
+}
+/* -- END -- wl_egl_window tizen private callback functions */
+
+/* -- BEGIN -- tizen_surface_shm_flusher_listener */
+static void __cb_tss_flusher_flush_callback(void *data,
+ struct tizen_surface_shm_flusher *tss_flusher)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
+
+ tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
+ return;
+ }
+}
+
+static void __cb_tss_flusher_free_flush_callback(void *data,
+ struct tizen_surface_shm_flusher *tss_flusher)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
+
+ tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
+ return;
+ }
+}
+
+static const struct tizen_surface_shm_flusher_listener
+tss_flusher_listener = {
+ __cb_tss_flusher_flush_callback,
+ __cb_tss_flusher_free_flush_callback
+};
+/* -- END -- tizen_surface_shm_flusher_listener */
+
+
+/* -- BEGIN -- tbm_surface_queue callback funstions */
+static void
+__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
+ void *data)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+ tpl_wl_egl_display_t *wl_egl_display = NULL;
+ tpl_surface_t *surface = NULL;
+ tpl_bool_t is_activated = TPL_FALSE;
+ int width, height;
+
+ wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+
+ wl_egl_display = wl_egl_surface->wl_egl_display;
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
+
+ surface = wl_egl_surface->tpl_surface;
+ TPL_CHECK_ON_NULL_RETURN(surface);
+
+ /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
+ * the changed window size at the next frame. */
+ width = tbm_surface_queue_get_width(tbm_queue);
+ height = tbm_surface_queue_get_height(tbm_queue);
+ if (surface->width != width || surface->height != height) {
+ TPL_INFO("[QUEUE_RESIZE]",
+ "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
+ wl_egl_surface, tbm_queue,
+ surface->width, surface->height, width, height);
+ }
+
+ /* When queue_reset_callback is called, if is_activated is different from
+ * its previous state change the reset flag to TPL_TRUE to get a new buffer
+ * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
+ is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client,
+ wl_egl_surface->tbm_queue);
+ if (wl_egl_surface->is_activated != is_activated) {
+ if (is_activated) {
+ TPL_INFO("[ACTIVATED]",
+ "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
+ } else {
+ TPL_LOG_T("[DEACTIVATED]",
+ " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
+ }
+ }
+
+ wl_egl_surface->reset = TPL_TRUE;
+
+ if (surface->reset_cb)
+ surface->reset_cb(surface->reset_data);
+}
+
+static void __cb_tbm_queue_trace_callback(tbm_surface_queue_h tbm_queue,
+ tbm_surface_h tbm_surface,
+ tbm_surface_queue_trace trace,
+ void *data)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+
+ /* TODO */
+}
+
+static void
+__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
+ void *data)
+{
+ TPL_IGNORE(tbm_queue);
+
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+
+ tpl_gsource_send_message(wl_egl_surface->surf_source, 2);
+
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+}
+
+/* -- END -- tbm_surface_queue callback funstions */