int fd;
} presentation_sync;
- tpl_gmutex free_queue_mutex;
- tpl_gcond free_queue_cond;
-
tpl_gmutex surf_mutex;
tpl_gcond surf_cond;
tpl_wl_egl_surface_t *wl_egl_surface;
};
-tpl_bool_t
+
+static void
+__cb_buffer_remove_from_list(void *data);
+static int
+_get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
+static void
+_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface);
+static void
+__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer);
+static tpl_wl_egl_buffer_t *
+_get_wl_egl_buffer(tbm_surface_h tbm_surface);
+static int
+_write_to_eventfd(int eventfd);
+static void
+_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface);
+static tpl_result_t
+_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface);
+static void
+_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
+ tbm_surface_h tbm_surface);
+
+static tpl_bool_t
_check_native_handle_is_wl_display(tpl_handle_t display)
{
struct wl_interface *wl_egl_native_dpy = *(void **) display;
if (!wl_egl_display) {
TPL_ERR("Failed to get wl_egl_display from gsource(%p)", gsource);
TPL_WARN("tdm_source(%p) will be removed from thread.", gsource);
- return TPL_GSOURCE_REMOVE;
+ return TPL_FALSE;
}
tdm_err = tdm_client_handle_events(wl_egl_display->tdm_client);
wl_egl_display->tdm_source = NULL;
- return G_SOURCE_REMOVE;
+ return TPL_FALSE;
}
- return G_SOURCE_CONTINUE;
+ return TPL_TRUE;
}
static void
TPL_LOG_T("WL_EGL", "TPL_WAIT_VBLANK:DEFAULT_ENABLED");
TPL_LOG_T("WL_EGL", "wl_egl_display(%p) tdm_source(%p) tdm_client(%p)",
- wl_egl_display, tdm_source, client);
+ wl_egl_display, tdm_source, tdm_client);
return TPL_ERROR_NONE;
}
#define IMPL_TIZEN_SURFACE_SHM_VERSION 2
-void
+static void
__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
uint32_t name, const char *interface,
uint32_t version)
}
}
-void
+static void
__cb_wl_resistry_global_remove_callback(void *data,
struct wl_registry *wl_registry,
uint32_t name)
wl_egl_display->last_error = errno;
}
-static void*
-_thread_init(void *data)
-{
- tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
-
- if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) {
- TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)",
- wl_egl_display, wl_egl_display->wl_display);
- }
-
- if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
- TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
- }
-
- return wl_egl_display;
-}
-
-static void
-_thread_fini(void *data)
-{
- tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
-
- if (wl_egl_display->tdm_initialized)
- tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_FALSE);
- if (wl_egl_display->wl_initialized)
- _thread_wl_display_fini(wl_egl_display);
-}
-
-static tpl_result_t
-__tpl_wl_egl_display_init(tpl_display_t *display)
-{
- tpl_wl_egl_display_t *wl_egl_display = NULL;
-
- TPL_ASSERT(display);
-
- /* Do not allow default display in wayland. */
- if (!display->native_handle) {
- TPL_ERR("Invalid native handle for display.");
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- if (!_check_native_handle_is_wl_display(display->native_handle)) {
- TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
- return TPL_ERROR_INVALID_PARAMETER;
- }
-
- ev_queue = wl_display_create_queue(display->native_handle);
- if (!ev_queue) {
- TPL_ERR("Failed to create wl_event_queue.");
- return TPL_ERROR_OUT_OF_MEMORY;
- }
-
- wl_egl_display = (tpl_wl_egl_display_t *) calloc(1,
- sizeof(tpl_wl_egl_display_t));
- if (!wl_egl_display) {
- TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t.");
- return TPL_ERROR_OUT_OF_MEMORY;
- }
-
- display->backend.data = wl_egl_display;
- display->bufmgr_fd = -1;
-
- wl_egl_display->tdm_initialized = TPL_FALSE;
- wl_egl_display->wl_initialized = TPL_FALSE;
-
- wl_egl_display->ev_queue = ev_queue;
- wl_egl_display->wl_display = (struct wl_display *)display->native_handle;
- wl_egl_display->last_error = 0;
- wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled
- wl_egl_display->prepared = TPL_FALSE;
-
- /* Wayland Interfaces */
- wl_egl_display->tss = NULL;
- wl_egl_display->presentation = NULL;
- wl_egl_display->explicit_sync = NULL;
-
- wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled
- env = tpl_getenv("TPL_WAIT_VBLANK");
- if (env && !atoi(env)) {
- wl_egl_display->use_wait_vblank = TPL_FALSE;
- }
-
- tpl_gmutex_init(&wl_egl_display->wl_event_mutex);
-
- /* Create gthread */
- wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
- _thread_init, (void *)wl_egl_display);
- if (!wl_egl_display->thread) {
- TPL_ERR("Failed to create wl_egl_thread");
- goto free_display;
- }
-
- wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread,
- (void *)wl_egl_display,
- wl_display_get_fd(wl_egl_display->wl_display),
- &disp_funcs, SOURCE_TYPE_NORMAL);
- if (!wl_egl_display->disp_source) {
- TPL_ERR("Failed to add native_display(%p) to thread(%p)",
- display->native_handle,
- wl_egl_display->thread);
- goto free_display;
- }
-
- TPL_LOG_T("WL_EGL",
- "[INIT DISPLAY] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
- wl_egl_display,
- wl_egl_display->thread,
- wl_egl_display->wl_display);
-
- TPL_LOG_T("WL_EGL",
- "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%S) USE_EXPLICIT_SYNC(%s)",
- wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE",
- wl_egl_display->tss ? "TRUE" : "FALSE",
- wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE");
-
- return TPL_ERROR_NONE;
-
-free_display:
- if (wl_egl_display->thread) {
- tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
- tpl_gthread_destroy(wl_egl_display->thread, _thread_fini);
- }
-
- wl_egl_display->thread = NULL;
- free(wl_egl_display);
-
- display->backend.data = NULL;
- return TPL_ERROR_INVALID_OPERATION;
-}
-
-static void
-__tpl_wl_egl_display_fini(tpl_display_t *display)
-{
- tpl_wl_egl_display_t *wl_egl_display;
-
- TPL_ASSERT(display);
-
- wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data;
- if (wl_egl_display) {
- TPL_LOG_T("WL_EGL",
- "[FINI] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
- wl_egl_display,
- wl_egl_display->thread,
- wl_egl_display->wl_display);
-
- if (wl_egl_display->gsource) {
- tpl_gsource_destroy(wl_egl_display->gsource, TPL_TRUE);
- wl_egl_display->gsource = NULL;
- }
-
- if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) {
- tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
- wl_egl_display->tdm_source = NULL;
- }
-
- if (wl_egl_display->thread) {
- tpl_gthread_destroy(wl_egl_display->thread, NULL);
- wl_egl_display->wl_egl_thread = NULL;
- }
-
- tpl_gmutex_clear(&wl_egl_display->wl_event_mutex);
-
- free(wl_egl_display);
- }
-
- display->backend.data = NULL;
-}
-
-static tpl_result_t
+tpl_result_t
_thread_wl_display_init(tpl_wl_egl_display_t *wl_egl_display)
{
struct wl_registry *registry = NULL;
return result;
}
-static void
+void
_thread_wl_display_fini(tpl_wl_egl_display_t *wl_egl_display)
{
/* If wl_egl_display is in prepared state, cancel it */
wl_egl_display->wl_display);
}
+
+static void*
+_thread_init(void *data)
+{
+ tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
+
+ if (_thread_wl_display_init(wl_egl_display) != TPL_ERROR_NONE) {
+ TPL_ERR("Failed to initialize wl_egl_display(%p) with wl_display(%p)",
+ wl_egl_display, wl_egl_display->wl_display);
+ }
+
+ if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
+ TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
+ }
+
+ return wl_egl_display;
+}
+
+static void
+_thread_fini(void *data)
+{
+ tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
+
+ if (wl_egl_display->tdm_initialized)
+ tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_FALSE);
+ if (wl_egl_display->wl_initialized)
+ _thread_wl_display_fini(wl_egl_display);
+}
+
static tpl_bool_t
__thread_func_disp_prepare(tpl_gsource *gsource)
{
* to remove the gsource from the main loop.
* This is because wl_egl_display is not valid since last_error was set.*/
if (wl_egl_display->last_error) {
- return TPL_GSOURCE_REMOVE;
+ return TPL_FALSE;
}
tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
wl_display_flush(wl_egl_display->wl_display);
tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
- return TPL_GSOURCE_CONTINUE;
+ return TPL_TRUE;
}
static void
return;
}
+
static tpl_gsource_functions disp_funcs = {
.prepare = __thread_func_disp_prepare,
.check = __thread_func_disp_check,
};
static tpl_result_t
-__tpl_wl_egl_display_query_config(tpl_display_t *display,
- tpl_surface_type_t surface_type,
- int red_size, int green_size,
- int blue_size, int alpha_size,
- int color_depth, int *native_visual_id,
- tpl_bool_t *is_slow)
+__tpl_wl_egl_display_init(tpl_display_t *display)
{
+ tpl_wl_egl_display_t *wl_egl_display = NULL;
+
TPL_ASSERT(display);
- if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
- green_size == 8 && blue_size == 8 &&
- (color_depth == 32 || color_depth == 24)) {
+ /* Do not allow default display in wayland. */
+ if (!display->native_handle) {
+ TPL_ERR("Invalid native handle for display.");
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
- if (alpha_size == 8) {
- if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
- if (is_slow) *is_slow = TPL_FALSE;
- return TPL_ERROR_NONE;
- }
- if (alpha_size == 0) {
- if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
- if (is_slow) *is_slow = TPL_FALSE;
- return TPL_ERROR_NONE;
- }
+ if (!_check_native_handle_is_wl_display(display->native_handle)) {
+ TPL_ERR("native_handle(%p) is not wl_display", display->native_handle);
+ return TPL_ERROR_INVALID_PARAMETER;
}
- return TPL_ERROR_INVALID_PARAMETER;
-}
+ wl_egl_display = (tpl_wl_egl_display_t *) calloc(1,
+ sizeof(tpl_wl_egl_display_t));
+ if (!wl_egl_display) {
+ TPL_ERR("Failed to allocate memory for new tpl_wl_egl_display_t.");
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
-static tpl_result_t
-__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
- int alpha_size)
-{
- TPL_IGNORE(display);
- TPL_IGNORE(visual_id);
+ display->backend.data = wl_egl_display;
+ display->bufmgr_fd = -1;
+
+ wl_egl_display->tdm_initialized = TPL_FALSE;
+ wl_egl_display->wl_initialized = TPL_FALSE;
+
+ wl_egl_display->ev_queue = NULL;
+ wl_egl_display->wl_display = (struct wl_display *)display->native_handle;
+ wl_egl_display->last_error = 0;
+ wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled
+ wl_egl_display->prepared = TPL_FALSE;
+
+ /* Wayland Interfaces */
+ wl_egl_display->tss = NULL;
+ wl_egl_display->presentation = NULL;
+ wl_egl_display->explicit_sync = NULL;
+
+ wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled
+ {
+ char *env = tpl_getenv("TPL_WAIT_VBLANK");
+ if (env && !atoi(env)) {
+ wl_egl_display->use_wait_vblank = TPL_FALSE;
+ }
+ }
+
+ tpl_gmutex_init(&wl_egl_display->wl_event_mutex);
+
+ /* Create gthread */
+ wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
+ (tpl_gthread_func)_thread_init, (void *)wl_egl_display);
+ if (!wl_egl_display->thread) {
+ TPL_ERR("Failed to create wl_egl_thread");
+ goto free_display;
+ }
+
+ wl_egl_display->disp_source = tpl_gsource_create(wl_egl_display->thread,
+ (void *)wl_egl_display,
+ wl_display_get_fd(wl_egl_display->wl_display),
+ &disp_funcs, SOURCE_TYPE_NORMAL);
+ if (!wl_egl_display->disp_source) {
+ TPL_ERR("Failed to add native_display(%p) to thread(%p)",
+ display->native_handle,
+ wl_egl_display->thread);
+ goto free_display;
+ }
+
+ TPL_LOG_T("WL_EGL",
+ "[INIT DISPLAY] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
+ wl_egl_display,
+ wl_egl_display->thread,
+ wl_egl_display->wl_display);
+
+ TPL_LOG_T("WL_EGL",
+ "USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)",
+ wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE",
+ wl_egl_display->tss ? "TRUE" : "FALSE",
+ wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE");
+
+ return TPL_ERROR_NONE;
+
+free_display:
+ if (wl_egl_display->thread) {
+ tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
+ tpl_gthread_destroy(wl_egl_display->thread, _thread_fini);
+ }
+
+ wl_egl_display->thread = NULL;
+ free(wl_egl_display);
+
+ display->backend.data = NULL;
+ return TPL_ERROR_INVALID_OPERATION;
+}
+
+static void
+__tpl_wl_egl_display_fini(tpl_display_t *display)
+{
+ tpl_wl_egl_display_t *wl_egl_display;
+
+ TPL_ASSERT(display);
+
+ wl_egl_display = (tpl_wl_egl_display_t *)display->backend.data;
+ if (wl_egl_display) {
+ TPL_LOG_T("WL_EGL",
+ "[FINI] wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
+ wl_egl_display,
+ wl_egl_display->thread,
+ wl_egl_display->wl_display);
+
+ if (wl_egl_display->disp_source) {
+ tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
+ wl_egl_display->disp_source = NULL;
+ }
+
+ if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) {
+ tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
+ wl_egl_display->tdm_source = NULL;
+ }
+
+ if (wl_egl_display->thread) {
+ tpl_gthread_destroy(wl_egl_display->thread, NULL);
+ wl_egl_display->thread = NULL;
+ }
+
+ tpl_gmutex_clear(&wl_egl_display->wl_event_mutex);
+
+ free(wl_egl_display);
+ }
+
+ display->backend.data = NULL;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_query_config(tpl_display_t *display,
+ tpl_surface_type_t surface_type,
+ int red_size, int green_size,
+ int blue_size, int alpha_size,
+ int color_depth, int *native_visual_id,
+ tpl_bool_t *is_slow)
+{
+ TPL_ASSERT(display);
+
+ if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 &&
+ green_size == 8 && blue_size == 8 &&
+ (color_depth == 32 || color_depth == 24)) {
+
+ if (alpha_size == 8) {
+ if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888;
+ if (is_slow) *is_slow = TPL_FALSE;
+ return TPL_ERROR_NONE;
+ }
+ if (alpha_size == 0) {
+ if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888;
+ if (is_slow) *is_slow = TPL_FALSE;
+ return TPL_ERROR_NONE;
+ }
+ }
+
+ return TPL_ERROR_INVALID_PARAMETER;
+}
+
+static tpl_result_t
+__tpl_wl_egl_display_filter_config(tpl_display_t *display, int *visual_id,
+ int alpha_size)
+{
+ TPL_IGNORE(display);
+ TPL_IGNORE(visual_id);
TPL_IGNORE(alpha_size);
return TPL_ERROR_NONE;
}
}
tpl_bool_t
-__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
+__tpl_display_choose_backend_wl_egl_thread2(tpl_handle_t native_dpy)
{
struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE);
return TPL_FALSE;
}
-static tpl_result_t
-__tpl_wl_egl_surface_init(tpl_surface_t *surface)
+/* -- BEGIN -- wl_egl_window callback functions */
+static void
+__cb_destroy_callback(void *private)
{
- tpl_wl_egl_display_t *wl_egl_display = NULL;
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
tpl_wl_egl_surface_t *wl_egl_surface = NULL;
- tbm_surface_queue_h tbm_queue = NULL;
- tpl_gsource *surf_source = NULL;
- tpl_result_t ret = TPL_ERROR_NONE;
- struct wl_egl_window *wl_egl_window =
- (struct wl_egl_window *)surface->native_handle;
+ if (!tizen_private) {
+ TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface");
+ return;
+ }
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->display);
- TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
- TPL_ASSERT(surface->native_handle);
+ wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+ if (wl_egl_surface) {
+ TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.",
+ wl_egl_surface->wl_egl_window);
+ TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface.");
- wl_egl_display =
- (tpl_wl_egl_display_t *)surface->display->backend.data;
- if (!wl_egl_display) {
- TPL_ERR("Invalid parameter. wl_egl_display(%p)",
- wl_egl_display);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ wl_egl_surface->wl_egl_window->destroy_window_callback = NULL;
+ wl_egl_surface->wl_egl_window->resize_callback = NULL;
+ wl_egl_surface->wl_egl_window->driver_private = NULL;
+ wl_egl_surface->wl_egl_window = NULL;
+ wl_egl_surface->wl_surface = NULL;
- wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1,
- sizeof(tpl_wl_egl_surface_t));
- if (!wl_egl_surface) {
- TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
- return TPL_ERROR_OUT_OF_MEMORY;
- }
+ tizen_private->set_window_serial_callback = NULL;
+ tizen_private->rotate_callback = NULL;
+ tizen_private->get_rotation_capability = NULL;
+ tizen_private->set_frontbuffer_callback = NULL;
+ tizen_private->create_commit_sync_fd = NULL;
+ tizen_private->create_presentation_sync_fd = NULL;
+ tizen_private->data = NULL;
- surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface,
- -1, surf_funcs, SOURCE_TYPE_NORMAL);
- if (!surf_source) {
- TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)",
- wl_egl_surface);
- goto surf_source_create_fail;
+ free(tizen_private);
+ tizen_private = NULL;
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
+}
- surface->backend.data = (void *)wl_egl_surface;
- surface->width = wl_egl_window->width;
- surface->height = wl_egl_window->height;
- surface->rotation = 0;
+static void
+__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
- wl_egl_surface->tpl_surface = surface;
- wl_egl_surface->width = wl_egl_window->width;
- wl_egl_surface->height = wl_egl_window->height;
- wl_egl_surface->format = surface->format;
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+ int cur_w, cur_h, req_w, req_h, format;
- wl_egl_surface->surf_source = surf_source;
- wl_egl_surface->wl_egl_window = wl_egl_window;
- wl_egl_surface->wl_surface = wl_egl_window->surface;
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return;
+ }
- wl_egl_surface->wl_egl_display = wl_egl_display;
+ format = wl_egl_surface->format;
+ cur_w = wl_egl_surface->width;
+ cur_h = wl_egl_surface->height;
+ req_w = wl_egl_window->width;
+ req_h = wl_egl_window->height;
- wl_egl_surface->reset = TPL_FALSE;
- wl_egl_surface->is_activated = TPL_FALSE;
- wl_egl_surface->need_to_enqueue = TPL_FALSE;
- wl_egl_surface->prerotation_capability = TPL_FALSE;
- wl_egl_surface->vblank_done = TPL_TRUE;
- wl_egl_surface->use_render_done_fence = TPL_FALSE;
- wl_egl_surface->set_serial_is_used = TPL_FALSE;
+ TPL_INFO("[WINDOW_RESIZE]",
+ "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)",
+ wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h);
- wl_egl_surface->latest_transform = 0;
- wl_egl_surface->render_done_cnt = 0;
- wl_egl_surface->serial = 0;
+ if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
+ != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue);
+ return;
+ }
+}
+/* -- END -- wl_egl_window callback functions */
- wl_egl_surface->vblank = NULL;
- wl_egl_surface->tss_flusher = NULL;
- wl_egl_surface->surface_sync = NULL;
+/* -- BEGIN -- wl_egl_window tizen private callback functions */
- wl_egl_surface->post_interval = surface->post_interval;
+/* There is no usecase for using prerotation callback below */
+static void
+__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
- wl_egl_surface->commit_sync.fd = -1;
- wl_egl_surface->presentation_sync.fd = -1;
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+ int rotation = tizen_private->rotation;
- {
- struct tizen_private *tizen_private = NULL;
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return;
+ }
- if (wl_egl_window->driver_private)
- tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
- else {
- tizen_private = tizen_private_create();
- wl_egl_window->driver_private = (void *)tizen_private;
- }
+ TPL_INFO("[WINDOW_ROTATE]",
+ "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)",
+ wl_egl_surface, wl_egl_window,
+ wl_egl_surface->rotation, rotation);
- if (tizen_private) {
- tizen_private->data = (void *)wl_egl_surface;
- tizen_private->rotate_callback = (void *)__cb_rotate_callback;
- tizen_private->get_rotation_capability = (void *)
- __cb_get_rotation_capability;
- tizen_private->set_window_serial_callback = (void *)
- __cb_set_window_serial_callback;
- tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
- tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
+ wl_egl_surface->rotation = rotation;
+}
- wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
- wl_egl_window->resize_callback = (void *)__cb_resize_callback;
- }
+/* There is no usecase for using prerotation callback below */
+static int
+__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
+ void *private)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
+
+ int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return rotation_capability;
}
- tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
- tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
+ if (wl_egl_surface->prerotation_capability == TPL_TRUE)
+ rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
+ else
+ rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
- tpl_gmutex_init(&wl_egl_surface->free_queue_mutex);
- tpl_gmutex_init(&wl_egl_surface->surf_mutex);
- tpl_gcond_init(&wl_egl_surface->free_queue_cond);
- tpl_gcond_init(&wl_egl_surface->surf_cond);
- /* Initialize in thread */
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- tpl_gsource_send_message(wl_egl_surface->surf_source, 1);
- tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ return rotation_capability;
+}
- TPL_ASSERT(wl_egl_surface->tbm_queue);
+static void
+__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
+ void *private, unsigned int serial)
+{
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
- TPL_INFO("[SURFACE_INIT]",
- "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)",
- surface, wl_egl_surface, wl_egl_surface->surf_source);
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
- return TPL_ERROR_NONE;
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
+ wl_egl_window);
+ return;
+ }
-surf_source_create_fail:
- free(wl_egl_surface);
- surface->backend.data = NULL;
- return TPL_ERROR_INVALID_OPERATION;
+ wl_egl_surface->set_serial_is_used = TPL_TRUE;
+ wl_egl_surface->serial = serial;
}
-static tbm_surface_queue_h
-_thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface,
- struct wayland_tbm_client *wl_tbm_client,
- int num_buffers)
+static int
+__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
{
- tbm_surface_queue_h tbm_queue = NULL;
- tbm_bufmgr bufmgr = NULL;
- unsigned int capability;
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
- struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
- int width = wl_egl_surface->width;
- int height = wl_egl_surface->height;
- int format = wl_egl_surface->format;
+ int commit_sync_fd = -1;
- if (!wl_tbm_client || !wl_surface) {
- TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)",
- wl_tbm_client, wl_surface);
- return NULL;
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid parameter. wl_egl_surface(%p) is NULL", wl_egl_surface);
+ return -1;
}
- bufmgr = tbm_bufmgr_init(-1);
- capability = tbm_bufmgr_get_capability(bufmgr);
- tbm_bufmgr_deinit(bufmgr);
+ tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
- if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
- tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
- wl_tbm_client,
- wl_surface,
- num_buffers,
- width,
- height,
- format);
- } else {
- tbm_queue = wayland_tbm_client_create_surface_queue(
- wl_tbm_client,
- wl_surface,
- num_buffers,
- width,
- height,
- format);
+ if (wl_egl_surface->commit_sync.fd != -1) {
+ commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
+ TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
+ wl_egl_surface->commit_sync.fd, commit_sync_fd);
+ TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)",
+ wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd);
+ tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+ return commit_sync_fd;
}
- if (tbm_queue) {
- TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)",
- wl_tbm_client);
- return NULL;
+ wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
+ if (wl_egl_surface->commit_sync.fd == -1) {
+ TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)", wl_egl_surface);
+ tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+ return -1;
}
- if (tbm_surface_queue_set_modes(
- tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
- TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
- tbm_queue);
- tbm_surface_queue_destroy(tbm_queue);
- return NULL;
- }
+ commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
- if (tbm_surface_queue_add_reset_cb(
- tbm_queue,
- __cb_tbm_queue_reset_callback,
- (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
- tbm_queue);
- tbm_surface_queue_destroy(tbm_queue);
- return NULL;
- }
+ TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
+ wl_egl_surface->commit_sync.fd, commit_sync_fd);
+ TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)",
+ wl_egl_surface, commit_sync_fd);
- if (tbm_surface_queue_add_acquirable_cb(
- tbm_queue,
- __cb_tbm_queue_acquirable_callback,
- (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
- tbm_queue);
- tbm_surface_queue_destroy(tbm_queue);
- return NULL;
- }
+ tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
- return tbm_queue;
+ return commit_sync_fd;
}
-static tdm_client_vblank*
-_thread_create_tdm_client_vblank(tdm_client *tdm_client)
+static int
+__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
{
- tdm_client_vblank *vblank = NULL;
- tdm_client_output *tdm_output = NULL;
- tdm_error tdm_err = TDM_ERROR_NONE;
+ TPL_ASSERT(private);
+ TPL_ASSERT(wl_egl_window);
- if (!tdm_client) {
- TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
- return NULL;
+ int presentation_sync_fd = -1;
+
+ struct tizen_private *tizen_private = (struct tizen_private *)private;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+
+ if (!wl_egl_surface) {
+ TPL_ERR("Invalid parameter. wl_egl_surface is NULL");
+ return -1;
}
- tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
- if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
- TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
- return NULL;
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+ if (wl_egl_surface->presentation_sync.fd != -1) {
+ presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
+ TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
+ wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+ TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
+ wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+ return presentation_sync_fd;
}
- vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
- if (!vblank || tdm_err != TDM_ERROR_NONE) {
- TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
- return NULL;
+ wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
+ if (wl_egl_surface->presentation_sync.fd == -1) {
+ TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)", wl_egl_surface);
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+ return -1;
}
- tdm_client_vblank_set_enable_fake(vblank, 1);
- tdm_client_vblank_set_sync(vblank, 0);
+ presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
+ TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
+ wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+ TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
+ wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
- return vblank;
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+
+ return presentation_sync_fd;
}
+/* -- END -- wl_egl_window tizen private callback functions */
-static void
-_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
+/* -- BEGIN -- tizen_surface_shm_flusher_listener */
+static void __cb_tss_flusher_flush_callback(void *data,
+ struct tizen_surface_shm_flusher *tss_flusher)
{
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- wl_egl_surface->tbm_queue = _thread_create_tbm_queue(
- wl_egl_surface->wl_surface,
- wl_egl_display->wl_tbm_client,
- wl_egl_surface->width,
- wl_egl_surface->height,
- wl_egl_surface->format,
- CLIENT_QUEUE_SIZE);
- if (!wl_egl_surface->tbm_queue) {
- TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)",
- wl_egl_surface, wl_egl_display->wl_tbm_client);
- return;
- }
+ TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
- TPL_INFO("[QUEUE_CREATION]",
- "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)",
- wl_egl_surface, wl_egl_surface->wl_surface,
- wl_egl_display->wl_tbm_client);
- TPL_INFO("[QUEUE_CREATION]",
- "tbm_queue(%p) size(%d x %d) X %d format(%d)",
- wl_egl_surface->tbm_queue,
- wl_egl_surface->width,
- wl_egl_surface->height,
- CLIENT_QUEUE_SIZE,
- wl_egl_surface->format);
-
- wl_egl_surface->vblank = _thread_create_tdm_client_vblank(
- wl_egl_display->tdm_client);
- if (wl_egl_surface->vblank) {
- TPL_INFO("[VBLANK_INIT]",
- "wl_egl_surface(%p) tdm_client(%p) vblank(%p)",
- wl_egl_surface, wl_egl_display->tdm_client,
- wl_egl_surface->vblank);
+ tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
+ return;
}
+}
- if (wl_egl_display->tss) {
- wl_egl_surface->tss_flusher =
- tizen_surface_shm_get_flusher(wl_egl_display->tss,
- wl_egl_surface->wl_surface);
- }
+static void __cb_tss_flusher_free_flush_callback(void *data,
+ struct tizen_surface_shm_flusher *tss_flusher)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- if (wl_egl_surface->tss_flusher) {
- tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher,
- &tss_flusher_listener,
- wl_egl_surface);
- TPL_INFO("[FLUSHER_INIT]",
- "wl_egl_surface(%p) tss_flusher(%p)",
- wl_egl_surface, wl_egl_surface->tss_flusher);
- }
+ TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
- if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) {
- wl_egl_surface->surface_sync =
- zwp_linux_explicit_synchronization_v1_get_synchronization(
- wl_egl_display->explicit_sync, wl_egl_surface->wl_surface);
- if (wl_egl_surface->surface_sync) {
- TPL_INFO("[EXPLICIT_SYNC_INIT]",
- "wl_egl_surface(%p) surface_sync(%p)",
- wl_egl_surface, wl_egl_surface->surface_sync);
- } else {
- TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)",
- wl_egl_surface);
- wl_egl_display->use_explicit_sync = TPL_FALSE;
- }
+ tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
+ return;
}
-
- wl_egl_surface->committed_buffers = __tpl_list_alloc();
- wl_egl_surface->in_use_buffers = __tpl_list_alloc();
- wl_egl_surface->fence_waiting_buffers = __tpl_list_alloc();
- wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc();
- wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
}
+static const struct tizen_surface_shm_flusher_listener
+tss_flusher_listener = {
+ __cb_tss_flusher_flush_callback,
+ __cb_tss_flusher_free_flush_callback
+};
+/* -- END -- tizen_surface_shm_flusher_listener */
+
+
+/* -- BEGIN -- tbm_surface_queue callback funstions */
static void
-__tpl_wl_egl_surface_fini(tpl_surface_t *surface)
+__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
+ void *data)
{
tpl_wl_egl_surface_t *wl_egl_surface = NULL;
tpl_wl_egl_display_t *wl_egl_display = NULL;
+ tpl_surface_t *surface = NULL;
+ tpl_bool_t is_activated = TPL_FALSE;
+ int width, height;
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->display);
-
- TPL_CHECK_ON_NULL_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW);
-
- wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data;
+ wl_egl_surface = (tpl_wl_egl_surface_t *)data;
TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
wl_egl_display = wl_egl_surface->wl_egl_display;
TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
- TPL_INFO("[SURFACE_FINI][BEGIN]",
- "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
- wl_egl_surface,
- wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
-
- if (wl_egl_surface->surf_source)
- tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
- wl_egl_surface->surf_source = NULL;
+ surface = wl_egl_surface->tpl_surface;
+ TPL_CHECK_ON_NULL_RETURN(surface);
- if (wl_egl_surface->wl_egl_window) {
- struct tizen_private *tizen_private = NULL;
- struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
- TPL_INFO("[WL_EGL_WINDOW_FINI]",
- "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
- wl_egl_surface, wl_egl_window,
- wl_egl_surface->wl_surface);
- tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
- if (tizen_private) {
- tizen_private->set_window_serial_callback = NULL;
- tizen_private->rotate_callback = NULL;
- tizen_private->get_rotation_capability = NULL;
- tizen_private->create_presentation_sync_fd = NULL;
- tizen_private->create_commit_sync_fd = NULL;
- tizen_private->set_frontbuffer_callback = NULL;
- tizen_private->merge_sync_fds = NULL;
- tizen_private->data = NULL;
- free(tizen_private);
+ /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
+ * the changed window size at the next frame. */
+ width = tbm_surface_queue_get_width(tbm_queue);
+ height = tbm_surface_queue_get_height(tbm_queue);
+ if (surface->width != width || surface->height != height) {
+ TPL_INFO("[QUEUE_RESIZE]",
+ "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
+ wl_egl_surface, tbm_queue,
+ surface->width, surface->height, width, height);
+ }
- wl_egl_window->dirver_private = NULL;
+ /* When queue_reset_callback is called, if is_activated is different from
+ * its previous state change the reset flag to TPL_TRUE to get a new buffer
+ * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
+ is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client,
+ wl_egl_surface->tbm_queue);
+ if (wl_egl_surface->is_activated != is_activated) {
+ if (is_activated) {
+ TPL_INFO("[ACTIVATED]",
+ "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
+ } else {
+ TPL_LOG_T("[DEACTIVATED]",
+ " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
}
-
- wl_egl_window->destroy_window_callback = NULL;
- wl_egl_window->resize_callback = NULL;
-
- wl_egl_surface->wl_egl_window = NULL;
}
- wl_egl_surface->wl_surface = NULL;
- wl_egl_surface->wl_egl_display = NULL;
- wl_egl_surface->tpl_surface = NULL;
-
- tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
- tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
- tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex);
+ wl_egl_surface->reset = TPL_TRUE;
- tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
- tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
- tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex);
+ if (surface->reset_cb)
+ surface->reset_cb(surface->reset_data);
+}
- tpl_gmutex_lock(&wl_egl_surface->free_queue_mutex);
- tpl_gmutex_unlock(&wl_egl_surface->free_queue_mutex);
- tpl_gmutex_clear(&wl_egl_surface->free_queue_cond);
+static void
+__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
+ void *data)
+{
+ TPL_IGNORE(tbm_queue);
- tpl_gmutex_clear(&wl_egl_surface->surf_mutex);
- tpl_gcond_clear(&wl_egl_surface->surf_cond);
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
- g_cond_clear(&wl_egl_surface->free_queue_cond);
- g_mutex_clear(&wl_egl_surface->free_queue_mutex);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface);
+ tpl_gsource_send_message(wl_egl_surface->surf_source, 2);
- free(wl_egl_surface);
- surface->backend.data = NULL;
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
+/* -- END -- tbm_surface_queue callback funstions */
static void
_thread_wl_egl_surface_fini(tpl_wl_egl_surface_t *wl_egl_surface)
__thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
{
tpl_wl_egl_surface_t *wl_egl_surface = NULL;
- tpl_result_t res = TPL_ERROR_NONE;
- ssize_t s;
- uint64_t message = 0;
wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
TPL_DEBUG("wl_egl_surface(%p) initialize message received!",
wl_egl_surface);
_thread_wl_egl_surface_init(wl_egl_surface);
- tpl_gcond_signal(wl_egl_surface->surf_cond);
+ tpl_gcond_signal(&wl_egl_surface->surf_cond);
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
} else if (message == 2) {
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
_thread_wl_egl_surface_fini(wl_egl_surface);
- TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%d)",
+ TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%p)",
gsource, wl_egl_surface);
}
};
static tpl_result_t
-__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
- tpl_bool_t set)
+__tpl_wl_egl_surface_init(tpl_surface_t *surface)
{
- tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+ tpl_wl_egl_display_t *wl_egl_display = NULL;
+ tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+ tpl_gsource *surf_source = NULL;
- TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
+ struct wl_egl_window *wl_egl_window =
+ (struct wl_egl_window *)surface->native_handle;
- wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
+ TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
+ TPL_ASSERT(surface->native_handle);
- TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
+ wl_egl_display =
+ (tpl_wl_egl_display_t *)surface->display->backend.data;
+ if (!wl_egl_display) {
+ TPL_ERR("Invalid parameter. wl_egl_display(%p)",
+ wl_egl_display);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
- TPL_INFO("[SET_PREROTATION_CAPABILITY]",
- "wl_egl_surface(%p) prerotation capability set to [%s]",
- wl_egl_surface, (set ? "TRUE" : "FALSE"));
+ wl_egl_surface = (tpl_wl_egl_surface_t *) calloc(1,
+ sizeof(tpl_wl_egl_surface_t));
+ if (!wl_egl_surface) {
+ TPL_ERR("Failed to allocate memory for new tpl_wl_egl_surface_t.");
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
- wl_egl_surface->prerotation_capability = set;
- return TPL_ERROR_NONE;
-}
+ surf_source = tpl_gsource_create(wl_egl_display->thread, (void *)wl_egl_surface,
+ -1, &surf_funcs, SOURCE_TYPE_NORMAL);
+ if (!surf_source) {
+ TPL_ERR("Failed to create surf_source with wl_egl_surface(%p)",
+ wl_egl_surface);
+ goto surf_source_create_fail;
+ }
-static tpl_result_t
-__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
- int post_interval)
-{
- tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+ surface->backend.data = (void *)wl_egl_surface;
+ surface->width = wl_egl_window->width;
+ surface->height = wl_egl_window->height;
+ surface->rotation = 0;
- TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
+ wl_egl_surface->tpl_surface = surface;
+ wl_egl_surface->width = wl_egl_window->width;
+ wl_egl_surface->height = wl_egl_window->height;
+ wl_egl_surface->format = surface->format;
- wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
+ wl_egl_surface->surf_source = surf_source;
+ wl_egl_surface->wl_egl_window = wl_egl_window;
+ wl_egl_surface->wl_surface = wl_egl_window->surface;
- TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
+ wl_egl_surface->wl_egl_display = wl_egl_display;
- TPL_INFO("[SET_POST_INTERVAL]",
- "wl_egl_surface(%p) post_interval(%d -> %d)",
- wl_egl_surface, wl_egl_surface->post_interval, post_interval);
+ wl_egl_surface->reset = TPL_FALSE;
+ wl_egl_surface->is_activated = TPL_FALSE;
+ wl_egl_surface->need_to_enqueue = TPL_FALSE;
+ wl_egl_surface->prerotation_capability = TPL_FALSE;
+ wl_egl_surface->vblank_done = TPL_TRUE;
+ wl_egl_surface->use_render_done_fence = TPL_FALSE;
+ wl_egl_surface->set_serial_is_used = TPL_FALSE;
- wl_egl_surface->post_interval = post_interval;
+ wl_egl_surface->latest_transform = 0;
+ wl_egl_surface->render_done_cnt = 0;
+ wl_egl_surface->serial = 0;
- return TPL_ERROR_NONE;
-}
+ wl_egl_surface->vblank = NULL;
+ wl_egl_surface->tss_flusher = NULL;
+ wl_egl_surface->surface_sync = NULL;
-static tpl_bool_t
-__tpl_wl_egl_surface_validate(tpl_surface_t *surface)
-{
- tpl_bool_t retval = TPL_TRUE;
-
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
-
- tpl_wl_egl_surface_t *wl_egl_surface =
- (tpl_wl_egl_surface_t *)surface->backend.data;
-
- retval = !(wl_egl_surface->reset);
+ wl_egl_surface->post_interval = surface->post_interval;
- return retval;
-}
+ wl_egl_surface->commit_sync.fd = -1;
+ wl_egl_surface->presentation_sync.fd = -1;
-void
-__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
-{
- tpl_wl_egl_surface_t *wl_egl_surface =
- (tpl_wl_egl_surface_t *)surface->backend.data;
+ {
+ struct tizen_private *tizen_private = NULL;
- if (width)
- *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
- if (height)
- *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
-}
+ if (wl_egl_window->driver_private)
+ tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
+ else {
+ tizen_private = tizen_private_create();
+ wl_egl_window->driver_private = (void *)tizen_private;
+ }
-#define CAN_DEQUEUE_TIMEOUT_MS 10000
+ if (tizen_private) {
+ tizen_private->data = (void *)wl_egl_surface;
+ tizen_private->rotate_callback = (void *)__cb_rotate_callback;
+ tizen_private->get_rotation_capability = (void *)
+ __cb_get_rotation_capability;
+ tizen_private->set_window_serial_callback = (void *)
+ __cb_set_window_serial_callback;
+ tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
+ tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
-tpl_result_t
-_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
-{
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
+ wl_egl_window->resize_callback = (void *)__cb_resize_callback;
+ }
+ }
- _print_buffer_lists(wl_egl_surface);
+ tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
+ tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
- if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue))
- != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
- wl_egl_surface->tbm_queue, tsq_err);
- return TPL_ERROR_INVALID_OPERATION;
- }
+ tpl_gmutex_init(&wl_egl_surface->surf_mutex);
+ tpl_gcond_init(&wl_egl_surface->surf_cond);
+ /* Initialize in thread */
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- if (wl_egl_surface->committed_buffers) {
- while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) {
- tbm_surface_h tbm_surface =
- __tpl_list_pop_front(wl_egl_surface->committed_buffers,
- (tpl_free_func_t)__cb_buffer_remove_from_list);
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
- tbm_surface, tsq_err);
- }
- }
+ tpl_gsource_send_message(wl_egl_surface->surf_source, 1);
+ tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- TPL_INFO("[FORCE_FLUSH]",
- "wl_egl_surface(%p) tbm_queue(%p)",
- wl_egl_surface, wl_egl_surface->tbm_queue);
+ TPL_ASSERT(wl_egl_surface->tbm_queue);
+
+ TPL_INFO("[SURFACE_INIT]",
+ "tpl_surface(%p) wl_egl_surface(%p) gsource(%p)",
+ surface, wl_egl_surface, wl_egl_surface->surf_source);
return TPL_ERROR_NONE;
+
+surf_source_create_fail:
+ free(wl_egl_surface);
+ surface->backend.data = NULL;
+ return TPL_ERROR_INVALID_OPERATION;
}
-static void
-_wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer,
- tpl_wl_egl_surface_t *wl_egl_surface)
+static tbm_surface_queue_h
+_thread_create_tbm_queue(tpl_wl_egl_surface_t *wl_egl_surface,
+ struct wayland_tbm_client *wl_tbm_client,
+ int num_buffers)
{
- struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
- struct tizen_private *tizen_private =
- (struct tizen_private *)wl_egl_window->driver_private;
+ tbm_surface_queue_h tbm_queue = NULL;
+ tbm_bufmgr bufmgr = NULL;
+ unsigned int capability;
- TPL_ASSERT(tizen_private);
+ struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
+ int width = wl_egl_surface->width;
+ int height = wl_egl_surface->height;
+ int format = wl_egl_surface->format;
- wl_egl_buffer->draw_done = TPL_FALSE;
- wl_egl_buffer->need_to_commit = TPL_TRUE;
+ if (!wl_tbm_client || !wl_surface) {
+ TPL_ERR("Invalid parameters. wl_tbm_client(%p) wl_surface(%p)",
+ wl_tbm_client, wl_surface);
+ return NULL;
+ }
- wl_egl_buffer->acquire_fence_fd = -1;
- wl_egl_buffer->release_fence_fd = -1;
- wl_egl_buffer->commit_sync_fd = -1;
- wl_egl_buffer->presentation_sync_fd = -1;
+ bufmgr = tbm_bufmgr_init(-1);
+ capability = tbm_bufmgr_get_capability(bufmgr);
+ tbm_bufmgr_deinit(bufmgr);
- wl_egl_buffer->presentation_feedback = NULL;
- wl_egl_buffer->buffer_release = NULL;
+ if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
+ tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
+ wl_tbm_client,
+ wl_surface,
+ num_buffers,
+ width,
+ height,
+ format);
+ } else {
+ tbm_queue = wayland_tbm_client_create_surface_queue(
+ wl_tbm_client,
+ wl_surface,
+ num_buffers,
+ width,
+ height,
+ format);
+ }
- wl_egl_buffer->transform = tizen_private->transform;
+ if (tbm_queue) {
+ TPL_ERR("Failed to create tbm_queue. wl_tbm_client(%p)",
+ wl_tbm_client);
+ return NULL;
+ }
- if (wl_egl_buffer->w_transform != tizen_private->window_transform) {
- wl_egl_buffer->w_transform = tizen_private->window_transform;
- wl_egl_buffer->w_rotated = TPL_TRUE;
+ if (tbm_surface_queue_set_modes(
+ tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
+ TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
}
- if (wl_egl_surface->set_serial_is_used) {
- wl_egl_buffer->serial = wl_egl_surface->serial;
- } else {
- wl_egl_buffer->serial = ++tizen_private->serial;
+ if (tbm_surface_queue_add_reset_cb(
+ tbm_queue,
+ __cb_tbm_queue_reset_callback,
+ (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
}
- if (wl_egl_buffer->rects) {
- free(wl_egl_buffer->rects);
- wl_egl_buffer->rects = NULL;
- wl_egl_buffer->num_rects = 0;
+ if (tbm_surface_queue_add_acquirable_cb(
+ tbm_queue,
+ __cb_tbm_queue_acquirable_callback,
+ (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return NULL;
}
-}
-static tpl_wl_egl_buffer_t *
-_get_wl_egl_buffer(tbm_surface_h tbm_surface)
-{
- tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
- tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
- (void **)&wl_egl_buffer);
- return wl_egl_buffer;
+ return tbm_queue;
}
-static tpl_wl_egl_buffer_t *
-_wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
- tbm_surface_h tbm_surface)
+static tdm_client_vblank*
+_thread_create_tdm_client_vblank(tdm_client *tdm_client)
{
- tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
- struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
- tpl_bool_t is_new_buffer = TPL_FALSE;
-
- wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
-
- if (!wl_egl_buffer) {
- wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t));
- TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL);
-
- tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
- (tbm_data_free)__cb_wl_egl_buffer_free);
- tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
- wl_egl_buffer);
- is_new_buffer = TPL_TRUE;
+ tdm_client_vblank *vblank = NULL;
+ tdm_client_output *tdm_output = NULL;
+ tdm_error tdm_err = TDM_ERROR_NONE;
- wl_egl_buffer->wl_buffer = NULL;
- wl_egl_buffer->tbm_surface = tbm_surface;
- wl_egl_buffer->wl_egl_surface = wl_egl_surface;
+ if (!tdm_client) {
+ TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client);
+ return NULL;
+ }
- wl_egl_buffer->dx = wl_egl_window->dx;
- wl_egl_buffer->dy = wl_egl_window->dy;
- wl_egl_buffer->width = tbm_surface_get_width(tbm_surface);
- wl_egl_buffer->height = tbm_surface_get_height(tbm_surface);
+ tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err);
+ if (!tdm_output || tdm_err != TDM_ERROR_NONE) {
+ TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err);
+ return NULL;
+ }
- TPL_INFO("[WL_EGL_BUFFER_CREATE]",
- "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
- wl_egl_surface, wl_egl_buffer, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
+ vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
+ if (!vblank || tdm_err != TDM_ERROR_NONE) {
+ TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
+ return NULL;
}
- _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface);
+ tdm_client_vblank_set_enable_fake(vblank, 1);
+ tdm_client_vblank_set_sync(vblank, 0);
- return wl_egl_buffer;
+ return vblank;
}
-static tbm_surface_h
-__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
- int32_t *release_fence)
+static void
+_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
{
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
- TPL_ASSERT(surface->display);
- TPL_ASSERT(surface->display->backend.data);
- TPL_OBJECT_CHECK_RETURN(surface, NULL);
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
- tpl_wl_egl_surface_t *wl_egl_surface =
- (tpl_wl_egl_surface_t *)surface->backend.data;
- tpl_wl_egl_display_t *wl_egl_display =
- (tpl_wl_egl_display_t *)surface->display->backend.data;
- tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ wl_egl_surface->tbm_queue = _thread_create_tbm_queue(
+ wl_egl_surface,
+ wl_egl_display->wl_tbm_client,
+ CLIENT_QUEUE_SIZE);
+ if (!wl_egl_surface->tbm_queue) {
+ TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)",
+ wl_egl_surface, wl_egl_display->wl_tbm_client);
+ return;
+ }
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- tpl_bool_t is_activated = 0;
- int bo_name = 0;
- tbm_surface_h tbm_surface = NULL;
+ TPL_INFO("[QUEUE_CREATION]",
+ "wl_egl_surface(%p) wl_surface(%p) wl_tbm_client(%p)",
+ wl_egl_surface, wl_egl_surface->wl_surface,
+ wl_egl_display->wl_tbm_client);
+ TPL_INFO("[QUEUE_CREATION]",
+ "tbm_queue(%p) size(%d x %d) X %d format(%d)",
+ wl_egl_surface->tbm_queue,
+ wl_egl_surface->width,
+ wl_egl_surface->height,
+ CLIENT_QUEUE_SIZE,
+ wl_egl_surface->format);
- TPL_OBJECT_UNLOCK(surface);
- tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
- wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
- TPL_OBJECT_LOCK(surface);
+ wl_egl_surface->vblank = _thread_create_tdm_client_vblank(
+ wl_egl_display->tdm_client);
+ if (wl_egl_surface->vblank) {
+ TPL_INFO("[VBLANK_INIT]",
+ "wl_egl_surface(%p) tdm_client(%p) vblank(%p)",
+ wl_egl_surface, wl_egl_display->tdm_client,
+ wl_egl_surface->vblank);
+ }
- /* After the can dequeue state, lock the wl_event_mutex to prevent other
- * events from being processed in wayland_egl_thread
- * during below dequeue procedure. */
- tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
+ if (wl_egl_display->tss) {
+ wl_egl_surface->tss_flusher =
+ tizen_surface_shm_get_flusher(wl_egl_display->tss,
+ wl_egl_surface->wl_surface);
+ }
- if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
- TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
- wl_egl_surface->tbm_queue, surface);
- if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
- TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
- wl_egl_surface->tbm_queue, surface);
- tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
- return NULL;
+ if (wl_egl_surface->tss_flusher) {
+ tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher,
+ &tss_flusher_listener,
+ wl_egl_surface);
+ TPL_INFO("[FLUSHER_INIT]",
+ "wl_egl_surface(%p) tss_flusher(%p)",
+ wl_egl_surface, wl_egl_surface->tss_flusher);
+ }
+
+ if (wl_egl_display->explicit_sync && wl_egl_display->use_explicit_sync) {
+ wl_egl_surface->surface_sync =
+ zwp_linux_explicit_synchronization_v1_get_synchronization(
+ wl_egl_display->explicit_sync, wl_egl_surface->wl_surface);
+ if (wl_egl_surface->surface_sync) {
+ TPL_INFO("[EXPLICIT_SYNC_INIT]",
+ "wl_egl_surface(%p) surface_sync(%p)",
+ wl_egl_surface, wl_egl_surface->surface_sync);
} else {
- tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ TPL_WARN("Failed to create surface_sync. | wl_egl_surface(%p)",
+ wl_egl_surface);
+ wl_egl_display->use_explicit_sync = TPL_FALSE;
}
}
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
- wl_egl_surface->tbm_queue, surface);
- tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
- return NULL;
- }
+ wl_egl_surface->committed_buffers = __tpl_list_alloc();
+ wl_egl_surface->in_use_buffers = __tpl_list_alloc();
+ wl_egl_surface->fence_waiting_buffers = __tpl_list_alloc();
+ wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc();
+ wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
+}
- /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
- * below function [wayland_tbm_client_queue_check_activate()].
- * This function has to be called before tbm_surface_queue_dequeue()
- * in order to know what state the buffer will be dequeued next.
- *
- * ACTIVATED state means non-composite mode. Client can get buffers which
- can be displayed directly(without compositing).
- * DEACTIVATED state means composite mode. Client's buffer will be displayed
- by compositor(E20) with compositing.
- */
- is_activated = wayland_tbm_client_queue_check_activate(
- wl_egl_display->wl_tbm_client,
- wl_egl_surface->tbm_queue);
+static void
+__tpl_wl_egl_surface_fini(tpl_surface_t *surface)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface = NULL;
+ tpl_wl_egl_display_t *wl_egl_display = NULL;
- wl_egl_surface->is_activated = is_activated;
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
- surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
- surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
- wl_egl_surface->width = surface->width;
- wl_egl_surface->height = surface->height;
+ TPL_CHECK_ON_TRUE_RETURN(surface->type == TPL_SURFACE_TYPE_WINDOW);
- if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
- /* If surface->frontbuffer is already set in frontbuffer mode,
- * it will return that frontbuffer if it is still activated,
- * otherwise dequeue the new buffer after initializing
- * surface->frontbuffer to NULL. */
- if (is_activated && !wl_egl_surface->reset) {
- bo_name = _get_tbm_surface_bo_name(surface->frontbuffer);
+ wl_egl_surface = (tpl_wl_egl_surface_t *) surface->backend.data;
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
- TPL_LOG_T("WL_EGL",
- "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
- surface->frontbuffer, bo_name);
- TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
- "[DEQ]~[ENQ] BO_NAME:%d",
- bo_name);
- tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
- return surface->frontbuffer;
- } else {
- surface->frontbuffer = NULL;
- wl_egl_surface->need_to_enqueue = TPL_TRUE;
- }
- } else {
- surface->frontbuffer = NULL;
- }
+ wl_egl_display = wl_egl_surface->wl_egl_display;
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
- tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue,
- &tbm_surface);
- if (!tbm_surface) {
- TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d",
- wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err);
- tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
- return NULL;
- }
+ TPL_INFO("[SURFACE_FINI][BEGIN]",
+ "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface,
+ wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
- tbm_surface_internal_ref(tbm_surface);
+ if (wl_egl_surface->surf_source)
+ tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
+ wl_egl_surface->surf_source = NULL;
- bo_name = _get_tbm_surface_bo_name(tbm_surface);
+ if (wl_egl_surface->wl_egl_window) {
+ struct tizen_private *tizen_private = NULL;
+ struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
+ TPL_INFO("[WL_EGL_WINDOW_FINI]",
+ "wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
+ wl_egl_surface, wl_egl_window,
+ wl_egl_surface->wl_surface);
+ tizen_private = (struct tizen_private *)wl_egl_window->driver_private;
+ if (tizen_private) {
+ tizen_private->set_window_serial_callback = NULL;
+ tizen_private->rotate_callback = NULL;
+ tizen_private->get_rotation_capability = NULL;
+ tizen_private->create_presentation_sync_fd = NULL;
+ tizen_private->create_commit_sync_fd = NULL;
+ tizen_private->set_frontbuffer_callback = NULL;
+ tizen_private->merge_sync_fds = NULL;
+ tizen_private->data = NULL;
+ free(tizen_private);
- wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
- TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
+ wl_egl_window->driver_private = NULL;
+ }
- /* If wl_egl_buffer->release_fence_fd is -1,
- * the tbm_surface can be used immediately.
- * If not, user(EGL) have to wait until signaled. */
- if (release_fence && wl_egl_surface->surface_sync) {
- *release_fence = wl_egl_buffer->release_fence_fd;
- TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)",
- wl_egl_surface, wl_egl_buffer, *release_fence);
+ wl_egl_window->destroy_window_callback = NULL;
+ wl_egl_window->resize_callback = NULL;
+
+ wl_egl_surface->wl_egl_window = NULL;
}
- if (surface->is_frontbuffer_mode && is_activated)
- surface->frontbuffer = tbm_surface;
+ wl_egl_surface->wl_surface = NULL;
+ wl_egl_surface->wl_egl_display = NULL;
+ wl_egl_surface->tpl_surface = NULL;
- wl_egl_surface->reset = TPL_FALSE;
+ tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
+ tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+ tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex);
- TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name);
- TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
- tbm_surface, bo_name, release_fence ? *release_fence : -1);
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+ tpl_gmutex_clear(&wl_egl_surface->presentation_sync.mutex);
- tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+ tpl_gmutex_clear(&wl_egl_surface->surf_mutex);
+ tpl_gcond_clear(&wl_egl_surface->surf_cond);
- return tbm_surface;
+ TPL_INFO("[SURFACE_FINI][END]", "wl_egl_surface(%p)", wl_egl_surface);
+
+ free(wl_egl_surface);
+ surface->backend.data = NULL;
}
static tpl_result_t
-__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
- tbm_surface_h tbm_surface)
+__tpl_wl_egl_surface_set_rotation_capability(tpl_surface_t *surface,
+ tpl_bool_t set)
{
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
-
- tpl_wl_egl_surface_t *wl_egl_surface =
- (tpl_wl_egl_surface_t *)surface->backend.data;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
-
- if (!tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ tpl_wl_egl_surface_t *wl_egl_surface = NULL;
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- /* Stop tracking of this canceled tbm_surface */
- __tpl_list_remove_data(wl_egl_surface->in_use_buffers,
- (void *)tbm_surface, TPL_FIRST, NULL);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
- tbm_surface_internal_unref(tbm_surface);
+ wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
- tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
- tbm_surface, surface);
- return TPL_ERROR_INVALID_OPERATION;
- }
+ TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
- TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
- wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
+ TPL_INFO("[SET_PREROTATION_CAPABILITY]",
+ "wl_egl_surface(%p) prerotation capability set to [%s]",
+ wl_egl_surface, (set ? "TRUE" : "FALSE"));
+ wl_egl_surface->prerotation_capability = set;
return TPL_ERROR_NONE;
}
static tpl_result_t
-__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
- tbm_surface_h tbm_surface,
- int num_rects, const int *rects, int32_t acquire_fence)
+__tpl_wl_egl_surface_set_post_interval(tpl_surface_t *surface,
+ int post_interval)
{
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->display);
- TPL_ASSERT(surface->backend.data);
- TPL_ASSERT(tbm_surface);
- TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
+ tpl_wl_egl_surface_t *wl_egl_surface = NULL;
- tpl_wl_egl_surface_t *wl_egl_surface =
- (tpl_wl_egl_surface_t *) surface->backend.data;
- tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- tpl_result_t ret = TPL_ERROR_NONE;
- int bo_name = -1;
+ TPL_CHECK_ON_NULL_RETURN_VAL(surface, TPL_ERROR_INVALID_PARAMETER);
- if (!tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
- tbm_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
- bo_name = _get_tbm_surface_bo_name(tbm_surface);
+ TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
- TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
+ TPL_INFO("[SET_POST_INTERVAL]",
+ "wl_egl_surface(%p) post_interval(%d -> %d)",
+ wl_egl_surface, wl_egl_surface->post_interval, post_interval);
- TPL_LOG_T("WL_EGL",
- "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)",
- wl_egl_surface, tbm_surface, bo_name, acquire_fence);
+ wl_egl_surface->post_interval = post_interval;
- wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+ return TPL_ERROR_NONE;
+}
- /* If there are received region information, save it to wl_egl_buffer */
- if (num_rects && rects) {
- if (wl_egl_buffer->rects != NULL) {
- free(wl_egl_buffer->rects);
- wl_egl_buffer->rects = NULL;
- wl_egl_buffer->num_rects = 0;
- }
+static tpl_bool_t
+__tpl_wl_egl_surface_validate(tpl_surface_t *surface)
+{
+ tpl_bool_t retval = TPL_TRUE;
- wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
- wl_egl_buffer->num_rects = num_rects;
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->backend.data);
- if (!wl_egl_buffer->rects) {
- TPL_ERR("Failed to allocate memory fo damage rects info.");
- return TPL_ERROR_OUT_OF_MEMORY;
- }
+ tpl_wl_egl_surface_t *wl_egl_surface =
+ (tpl_wl_egl_surface_t *)surface->backend.data;
- memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects);
- }
+ retval = !(wl_egl_surface->reset);
- if (!wl_egl_surface->need_to_enqueue ||
- !wl_egl_buffer->need_to_commit) {
- TPL_WARN("WL_EGL",
- "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
- ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- return TPL_ERROR_NONE;
- }
+ return retval;
+}
- /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
- * commit if surface->frontbuffer that is already set and the tbm_surface
- * client want to enqueue are the same.
- */
- if (surface->is_frontbuffer_mode) {
- /* The first buffer to be activated in frontbuffer mode must be
- * committed. Subsequence frames do not need to be committed because
- * the buffer is already displayed.
- */
- if (surface->frontbuffer == tbm_surface)
- wl_egl_surface->need_to_enqueue = TPL_FALSE;
+static void
+__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface =
+ (tpl_wl_egl_surface_t *)surface->backend.data;
- if (acquire_fence != -1) {
- close(acquire_fence);
- acquire_fence = -1;
- }
- }
+ if (width)
+ *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
+ if (height)
+ *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
+}
- if (wl_egl_buffer->acquire_fence_fd != -1)
- close(wl_egl_buffer->acquire_fence_fd);
-
- wl_egl_buffer->acquire_fence_fd = acquire_fence;
+#define CAN_DEQUEUE_TIMEOUT_MS 10000
- tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- tbm_surface_internal_unref(tbm_surface);
- TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d",
- tbm_surface, wl_egl_surface, tsq_err);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+tpl_result_t
+_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ _print_buffer_lists(wl_egl_surface);
+
+ if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue))
+ != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
+ wl_egl_surface->tbm_queue, tsq_err);
return TPL_ERROR_INVALID_OPERATION;
}
- tbm_surface_internal_unref(tbm_surface);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ if (wl_egl_surface->committed_buffers) {
+ while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) {
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(wl_egl_surface->committed_buffers,
+ (tpl_free_func_t)__cb_buffer_remove_from_list);
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ tbm_surface, tsq_err);
+ }
+ }
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TPL_INFO("[FORCE_FLUSH]",
+ "wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
return TPL_ERROR_NONE;
}
-static tpl_result_t
-_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
+static void
+_wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer,
+ tpl_wl_egl_surface_t *wl_egl_surface)
{
- tbm_surface_h tbm_surface = NULL;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
- tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
- tpl_bool_t ready_to_commit = TPL_FALSE;
+ struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
+ struct tizen_private *tizen_private =
+ (struct tizen_private *)wl_egl_window->driver_private;
- while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) {
- tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue,
- &tbm_surface);
- if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to acquire from tbm_queue(%p)",
- wl_egl_surface->tbm_queue);
- return TPL_ERROR_INVALID_OPERATION;
- }
+ TPL_ASSERT(tizen_private);
- tbm_surface_internal_ref(tbm_surface);
+ wl_egl_buffer->draw_done = TPL_FALSE;
+ wl_egl_buffer->need_to_commit = TPL_TRUE;
- wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
- TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
- "wl_egl_buffer sould be not NULL");
+ wl_egl_buffer->acquire_fence_fd = -1;
+ wl_egl_buffer->release_fence_fd = -1;
+ wl_egl_buffer->commit_sync_fd = -1;
+ wl_egl_buffer->presentation_sync_fd = -1;
- if (wl_egl_buffer->wl_buffer == NULL) {
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
- wl_egl_buffer->wl_buffer =
- (struct wl_proxy *)wayland_tbm_client_create_buffer(
- wl_egl_display->wl_tbm_client, tbm_surface);
+ wl_egl_buffer->presentation_feedback = NULL;
+ wl_egl_buffer->buffer_release = NULL;
- if (!wl_egl_buffer->wl_buffer) {
- TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
- wl_egl_display->wl_tbm_client, tbm_surface);
- }
- }
+ wl_egl_buffer->transform = tizen_private->transform;
- if (wl_egl_buffer->acquire_fence_fd != -1) {
- if (wl_egl_surface->surface_sync)
- ready_to_commit = TPL_TRUE;
- else {
- if (wl_egl_buffer->waiting_source) {
- tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
- wl_egl_buffer->waiting_source = NULL;
- }
+ if (wl_egl_buffer->w_transform != tizen_private->window_transform) {
+ wl_egl_buffer->w_transform = tizen_private->window_transform;
+ wl_egl_buffer->w_rotated = TPL_TRUE;
+ }
- wl_egl_buffer->waiting_source =
- tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
- wl_egl_buffer->acquire_fence_fd, buffer_funcs,
- SOURCE_TYPE_DISPOSABLE);
+ if (wl_egl_surface->set_serial_is_used) {
+ wl_egl_buffer->serial = wl_egl_surface->serial;
+ } else {
+ wl_egl_buffer->serial = ++tizen_private->serial;
+ }
- __tpl_list_push_back(wl_egl_surface->fence_waiting_buffers, tbm_surface);
+ if (wl_egl_buffer->rects) {
+ free(wl_egl_buffer->rects);
+ wl_egl_buffer->rects = NULL;
+ wl_egl_buffer->num_rects = 0;
+ }
+}
- TRACE_ASYNC_BEGIN(wl_egl_buffer, "FENCE WAIT fd(%d)",
- wl_egl_buffer->acquire_fence_fd);
+static tpl_wl_egl_buffer_t *
+_get_wl_egl_buffer(tbm_surface_h tbm_surface)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
+ (void **)&wl_egl_buffer);
+ return wl_egl_buffer;
+}
- ready_to_commit = TPL_FALSE;
- }
- }
+static tpl_wl_egl_buffer_t *
+_wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
+ tbm_surface_h tbm_surface)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
- if (ready_to_commit) {
- if (wl_egl_surface->vblank_done)
- ready_to_commit = TPL_TRUE;
- else {
- __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, tbm_surface);
- ready_to_commit = TPL_FALSE;
- }
- }
+ wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
- if (ready_to_commit)
- _thread_wl_surface_commit(wl_egl_surface, tbm_surface);
+ if (!wl_egl_buffer) {
+ wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t));
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL);
+
+ tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
+ (tbm_data_free)__cb_wl_egl_buffer_free);
+ tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
+ wl_egl_buffer);
+
+ wl_egl_buffer->wl_buffer = NULL;
+ wl_egl_buffer->tbm_surface = tbm_surface;
+ wl_egl_buffer->wl_egl_surface = wl_egl_surface;
+
+ wl_egl_buffer->dx = wl_egl_window->dx;
+ wl_egl_buffer->dy = wl_egl_window->dy;
+ wl_egl_buffer->width = tbm_surface_get_width(tbm_surface);
+ wl_egl_buffer->height = tbm_surface_get_height(tbm_surface);
+
+ TPL_INFO("[WL_EGL_BUFFER_CREATE]",
+ "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_surface, wl_egl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
}
- return TPL_ERROR_NONE;
+ _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface);
+
+ return wl_egl_buffer;
}
-static const struct wl_buffer_listener wl_buffer_release_listener = {
- (void *)__cb_wl_buffer_release,
-};
-
-static void
-__cb_presentation_feedback_sync_output(void *data,
- struct wp_presentation_feedback *presentation_feedback,
- struct wl_output *output)
+static tbm_surface_h
+__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
+ int32_t *release_fence)
{
- TPL_IGNORE(data);
- TPL_IGNORE(presentation_feedback);
- TPL_IGNORE(output);
- /* Nothing to do */
-}
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->backend.data);
+ TPL_ASSERT(surface->display);
+ TPL_ASSERT(surface->display->backend.data);
+ TPL_OBJECT_CHECK_RETURN(surface, NULL);
-static void
-__cb_presentation_feedback_presented(void *data,
- struct wp_presentation_feedback *presentation_feedback,
- uint32_t tv_sec_hi,
- uint32_t tv_sec_lo,
- uint32_t tv_nsec,
- uint32_t refresh_nsec,
- uint32_t seq_hi,
- uint32_t seq_lo,
- uint32_t flags)
-{
- TPL_IGNORE(tv_sec_hi);
- TPL_IGNORE(tv_sec_lo);
- TPL_IGNORE(tv_nsec);
- TPL_IGNORE(refresh_nsec);
- TPL_IGNORE(seq_hi);
- TPL_IGNORE(seq_lo);
- TPL_IGNORE(flags);
+ tpl_wl_egl_surface_t *wl_egl_surface =
+ (tpl_wl_egl_surface_t *)surface->backend.data;
+ tpl_wl_egl_display_t *wl_egl_display =
+ (tpl_wl_egl_display_t *)surface->display->backend.data;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
- tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
- tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tpl_bool_t is_activated = 0;
+ int bo_name = 0;
+ tbm_surface_h tbm_surface = NULL;
- tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+ TPL_OBJECT_UNLOCK(surface);
+ tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
+ wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
+ TPL_OBJECT_LOCK(surface);
- TPL_DEBUG("[FEEDBACK][PRESENTED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
- wl_egl_surface, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
+ /* After the can dequeue state, lock the wl_event_mutex to prevent other
+ * events from being processed in wayland_egl_thread
+ * during below dequeue procedure. */
+ tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
- if (wl_egl_buffer->presentation_sync_fd != -1) {
- int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
- if (ret == -1) {
- TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
- wl_egl_buffer->presentation_sync_fd);
+ if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
+ TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
+ wl_egl_surface->tbm_queue, surface);
+ if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
+ TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
+ wl_egl_surface->tbm_queue, surface);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+ return NULL;
+ } else {
+ tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
}
+ }
- TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd,
- "[PRESENTATION_SYNC] bo(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
-
- close(wl_egl_buffer->presentation_sync_fd);
- wl_egl_buffer->presentation_sync_fd = -1;
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
+ wl_egl_surface->tbm_queue, surface);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+ return NULL;
}
- if (wl_egl_buffer->presentation_feedback)
- wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback);
+ /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
+ * below function [wayland_tbm_client_queue_check_activate()].
+ * This function has to be called before tbm_surface_queue_dequeue()
+ * in order to know what state the buffer will be dequeued next.
+ *
+ * ACTIVATED state means non-composite mode. Client can get buffers which
+ can be displayed directly(without compositing).
+ * DEACTIVATED state means composite mode. Client's buffer will be displayed
+ by compositor(E20) with compositing.
+ */
+ is_activated = wayland_tbm_client_queue_check_activate(
+ wl_egl_display->wl_tbm_client,
+ wl_egl_surface->tbm_queue);
- wl_egl_buffer->presentation_feedback = NULL;
+ wl_egl_surface->is_activated = is_activated;
- __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface,
- TPL_FIRST, NULL);
+ surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
+ surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
+ wl_egl_surface->width = surface->width;
+ wl_egl_surface->height = surface->height;
- tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
-}
+ if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
+ /* If surface->frontbuffer is already set in frontbuffer mode,
+ * it will return that frontbuffer if it is still activated,
+ * otherwise dequeue the new buffer after initializing
+ * surface->frontbuffer to NULL. */
+ if (is_activated && !wl_egl_surface->reset) {
+ bo_name = _get_tbm_surface_bo_name(surface->frontbuffer);
-static void
-__cb_presentation_feedback_discarded(void *data,
- struct wp_presentation_feedback *presentation_feedback)
-{
- tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
- tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
+ TPL_LOG_T("WL_EGL",
+ "[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
+ surface->frontbuffer, bo_name);
+ TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
+ "[DEQ]~[ENQ] BO_NAME:%d",
+ bo_name);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+ return surface->frontbuffer;
+ } else {
+ surface->frontbuffer = NULL;
+ wl_egl_surface->need_to_enqueue = TPL_TRUE;
+ }
+ } else {
+ surface->frontbuffer = NULL;
+ }
- tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+ tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue,
+ &tbm_surface);
+ if (!tbm_surface) {
+ TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d",
+ wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+ return NULL;
+ }
- TPL_DEBUG("[FEEDBACK][DISCARDED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
- wl_egl_surface, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
+ tbm_surface_internal_ref(tbm_surface);
- if (wl_egl_buffer->presentation_sync_fd != -1) {
- int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
- if (ret == -1) {
- TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
- wl_egl_buffer->presentation_sync_fd);
- }
+ bo_name = _get_tbm_surface_bo_name(tbm_surface);
- TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd,
- "[PRESENTATION_SYNC] bo(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
+ wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
- close(wl_egl_buffer->presentation_sync_fd);
- wl_egl_buffer->presentation_sync_fd = -1;
+ /* If wl_egl_buffer->release_fence_fd is -1,
+ * the tbm_surface can be used immediately.
+ * If not, user(EGL) have to wait until signaled. */
+ if (release_fence && wl_egl_surface->surface_sync) {
+ *release_fence = wl_egl_buffer->release_fence_fd;
+ TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)",
+ wl_egl_surface, wl_egl_buffer, *release_fence);
}
- if (wl_egl_buffer->presentation_feedback)
- wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback);
+ if (surface->is_frontbuffer_mode && is_activated)
+ surface->frontbuffer = tbm_surface;
- wl_egl_buffer->presentation_feedback = NULL;
+ wl_egl_surface->reset = TPL_FALSE;
- __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface,
- TPL_FIRST, NULL);
+ TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name);
+ TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ wl_egl_buffer, tbm_surface, bo_name, release_fence ? *release_fence : -1);
- tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
-}
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
-static const struct wp_presentation_feedback_listener feedback_listener = {
- __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
- __cb_presentation_feedback_presented,
- __cb_presentation_feedback_discarded
-};
+ return tbm_surface;
+}
-static void
-_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
- tbm_surface_h tbm_surface)
+static tpl_result_t
+__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface)
{
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
- tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
- struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
- struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
- uint32_t version;
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->backend.data);
- wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
- TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
- "wl_egl_buffer sould be not NULL");
+ tpl_wl_egl_surface_t *wl_egl_surface =
+ (tpl_wl_egl_surface_t *)surface->backend.data;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- if (wl_egl_buffer->wl_buffer == NULL) {
- wl_egl_buffer->wl_buffer =
- (struct wl_proxy *)wayland_tbm_client_create_buffer(
- wl_egl_display->wl_tbm_client, tbm_surface);
+ if (!tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
}
- TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
- "[FATAL] Failed to create wl_buffer");
- wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer,
- &wl_buffer_release_listener, wl_egl_buffer);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ /* Stop tracking of this canceled tbm_surface */
+ __tpl_list_remove_data(wl_egl_surface->in_use_buffers,
+ (void *)tbm_surface, TPL_FIRST, NULL);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
+ tbm_surface_internal_unref(tbm_surface);
- tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
- if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) {
- wl_egl_buffer->presentation_feedback =
- wp_presentation_feedback(wl_egl_display->presentation,
- wl_surface);
- wp_presentation_feedback_add_listener(wl_egl_buffer->presentation_feedback,
- &feedback_listener, wl_egl_buffer);
- __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, tbm_surface);
- TRACE_ASYNC_BEGIN(wl_egl_buffer->presentation_sync_fd,
- "[PRESENTATION_SYNC] bo(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
+ tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
+ tbm_surface, surface);
+ return TPL_ERROR_INVALID_OPERATION;
}
- tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
- if (wl_egl_buffer->w_rotated == TPL_TRUE) {
- wayland_tbm_client_set_buffer_transform(
- wl_egl_display->wl_tbm_client,
- (void *)wl_egl_buffer->wl_buffer,
- wl_egl_buffer->w_transform);
- wl_egl_buffer->w_rotated = TPL_FALSE;
- }
+ TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
- if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) {
- wl_egl_surface->latest_transform = wl_egl_buffer->transform;
- wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
- }
+ return TPL_ERROR_NONE;
+}
- if (wl_egl_window) {
- wl_egl_window->attached_width = wl_egl_buffer->width;
- wl_egl_window->attached_height = wl_egl_buffer->height;
- }
+static tpl_result_t
+__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface,
+ int num_rects, const int *rects, int32_t acquire_fence)
+{
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
+ TPL_ASSERT(surface->backend.data);
+ TPL_ASSERT(tbm_surface);
+ TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
- wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer,
- wl_egl_buffer->dx, wl_egl_buffer->dy);
+ tpl_wl_egl_surface_t *wl_egl_surface =
+ (tpl_wl_egl_surface_t *) surface->backend.data;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ int bo_name = -1;
- if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) {
- if (version < 4) {
- wl_surface_damage(wl_surface,
- wl_egl_buffer->dx, wl_egl_buffer->dy,
- wl_egl_buffer->width, wl_egl_buffer->height);
- } else {
- wl_surface_damage_buffer(wl_surface,
- 0, 0,
- wl_egl_buffer->width, wl_egl_buffer->height);
- }
- } else {
- int i;
- for (i = 0; i < wl_egl_buffer->num_rects; i++) {
- int inverted_y =
- wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] +
- wl_egl_buffer->rects[i * 4 + 3]);
- if (version < 4) {
- wl_surface_damage(wl_surface,
- wl_egl_buffer->rects[i * 4 + 0],
- inverted_y,
- wl_egl_buffer->rects[i * 4 + 2],
- wl_egl_buffer->rects[i * 4 + 3]);
- } else {
- wl_surface_damage_buffer(wl_surface,
- wl_egl_buffer->rects[i * 4 + 0],
- inverted_y,
- wl_egl_buffer->rects[i * 4 + 2],
- wl_egl_buffer->rects[i * 4 + 3]);
- }
- }
+ if (!tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
+ tbm_surface);
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ return TPL_ERROR_INVALID_PARAMETER;
}
- wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client,
- (void *)wl_egl_buffer->wl_buffer,
- wl_egl_buffer->serial);
+ bo_name = _get_tbm_surface_bo_name(tbm_surface);
- wl_egl_buffer->need_to_release = TPL_TRUE;
+ TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
- if (wl_egl_display->use_explicit_sync &&
- wl_egl_surface->surface_sync) {
+ TPL_LOG_T("WL_EGL",
+ "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ wl_egl_surface, tbm_surface, bo_name, acquire_fence);
- zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync,
- wl_egl_buffer->acquire_fence_fd);
- TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)",
- wl_egl_surface, tbm_surface, wl_egl_buffer->acquire_fence_fd);
- close(wl_egl_buffer->acquire_fence_fd);
- wl_egl_buffer->acquire_fence_fd = -1;
+ wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
- wl_egl_buffer->buffer_release =
- zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync);
- if (!wl_egl_buffer->buffer_release) {
- TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface);
- } else {
- zwp_linux_buffer_release_v1_add_listener(
- wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer);
- TPL_DEBUG("add explicit_sync_release_listener.");
+ /* If there are received region information, save it to wl_egl_buffer */
+ if (num_rects && rects) {
+ if (wl_egl_buffer->rects != NULL) {
+ free(wl_egl_buffer->rects);
+ wl_egl_buffer->rects = NULL;
+ wl_egl_buffer->num_rects = 0;
}
- }
-
- wl_surface_commit(wl_surface);
-
- wl_display_flush(wl_egl_display->wl_display);
-
- TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- wl_egl_buffer->need_to_commit = TPL_FALSE;
+ wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
+ wl_egl_buffer->num_rects = num_rects;
- TPL_LOG_T("WL_EGL", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)",
- wl_egl_buffer->wl_buffer, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
+ if (!wl_egl_buffer->rects) {
+ TPL_ERR("Failed to allocate memory fo damage rects info.");
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
- if (wl_egl_display->tdm_initialized &&
- _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
- TPL_ERR("Failed to set wait vblank.");
+ memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects);
+ }
- if (wl_egl_surface->committed_buffers) {
- __tpl_list_push_back(wl_egl_surface->committed_buffers, tbm_surface);
+ if (!wl_egl_surface->need_to_enqueue ||
+ !wl_egl_buffer->need_to_commit) {
+ TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
+ ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ return TPL_ERROR_NONE;
}
- tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
+ /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
+ * commit if surface->frontbuffer that is already set and the tbm_surface
+ * client want to enqueue are the same.
+ */
+ if (surface->is_frontbuffer_mode) {
+ /* The first buffer to be activated in frontbuffer mode must be
+ * committed. Subsequence frames do not need to be committed because
+ * the buffer is already displayed.
+ */
+ if (surface->frontbuffer == tbm_surface)
+ wl_egl_surface->need_to_enqueue = TPL_FALSE;
- if (wl_egl_buffer->commit_sync_fd != -1) {
- int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
- if (ret == -1) {
- TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd);
+ if (acquire_fence != -1) {
+ close(acquire_fence);
+ acquire_fence = -1;
}
+ }
- TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)",
- wl_egl_surface, wl_egl_buffer->commit_sync_fd);
+ if (wl_egl_buffer->acquire_fence_fd != -1)
+ close(wl_egl_buffer->acquire_fence_fd);
+
+ wl_egl_buffer->acquire_fence_fd = acquire_fence;
- close(wl_egl_buffer->commit_sync_fd);
- wl_egl_buffer->commit_sync_fd = -1;
+ tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ tbm_surface_internal_unref(tbm_surface);
+ TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d",
+ tbm_surface, wl_egl_surface, tsq_err);
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ return TPL_ERROR_INVALID_OPERATION;
}
- tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+ tbm_surface_internal_unref(tbm_surface);
+
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+
+ return TPL_ERROR_NONE;
}
static tpl_bool_t
wl_egl_surface->render_done_cnt++;
- TRACE_ASYNC_END(wl_egl_buffer, "FENCE WAIT fd(%d)",
+ TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
wl_egl_buffer->acquire_fence_fd);
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- /* This source is used only once and does not allow reuse.
- * So finalize will be executed immediately. */
- g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag);
- g_source_destroy(&wait_source->gsource);
- g_source_unref(&wait_source->gsource);
-
return TPL_FALSE;
}
};
static tpl_result_t
-_thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface)
+_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
{
- tdm_error tdm_err = TDM_ERROR_NONE;
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ tbm_surface_h tbm_surface = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ tpl_bool_t ready_to_commit = TPL_FALSE;
- if (wl_egl_surface->vblank == NULL) {
- wl_egl_surface->vblank =
- _thread_create_tdm_client_vblank(wl_egl_display->tdm_client);
- if (!wl_egl_surface->vblank) {
- TPL_WARN("Failed to create vblank. wl_egl_surface(%p)",
- wl_egl_surface);
- return TPL_ERROR_OUT_OF_MEMORY;
+ while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) {
+ tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue,
+ &tbm_surface);
+ if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to acquire from tbm_queue(%p)",
+ wl_egl_surface->tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
}
- }
-
- tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank,
- wl_egl_surface->post_interval,
- __cb_tdm_client_vblank,
- (void *)wl_egl_surface);
-
- if (tdm_err == TDM_ERROR_NONE) {
- wl_egl_surface->vblank_done = TPL_FALSE;
- TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK");
- } else {
- TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- return TPL_ERROR_NONE;
-}
-
-static int
-_write_to_eventfd(int eventfd)
-{
- uint64_t value = 1;
- int ret;
-
- if (eventfd == -1) {
- TPL_ERR("Invalid fd(-1)");
- return -1;
- }
-
- ret = write(eventfd, &value, sizeof(uint64_t));
- if (ret == -1) {
- TPL_ERR("failed to write to fd(%d)", eventfd);
- return ret;
- }
-
- return ret;
-}
-
-void
-__tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend)
-{
- TPL_ASSERT(backend);
-
- backend->type = TPL_BACKEND_WAYLAND_THREAD;
- backend->data = NULL;
-
- backend->init = __tpl_wl_egl_display_init;
- backend->fini = __tpl_wl_egl_display_fini;
- backend->query_config = __tpl_wl_egl_display_query_config;
- backend->filter_config = __tpl_wl_egl_display_filter_config;
- backend->get_window_info = __tpl_wl_egl_display_get_window_info;
- backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
- backend->get_buffer_from_native_pixmap =
- __tpl_wl_egl_display_get_buffer_from_native_pixmap;
-}
-
-void
-__tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
-{
- TPL_ASSERT(backend);
-
- backend->type = TPL_BACKEND_WAYLAND_THREAD;
- backend->data = NULL;
-
- backend->init = __tpl_wl_egl_surface_init;
- backend->fini = __tpl_wl_egl_surface_fini;
- backend->validate = __tpl_wl_egl_surface_validate;
- backend->cancel_dequeued_buffer =
- __tpl_wl_egl_surface_cancel_dequeued_buffer;
- backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
- backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
- backend->set_rotation_capability =
- __tpl_wl_egl_surface_set_rotation_capability;
- backend->set_post_interval =
- __tpl_wl_egl_surface_set_post_interval;
- backend->get_size =
- __tpl_wl_egl_surface_get_size;
-}
-
-/* -- BEGIN -- wl_egl_window callback functions */
-static void
-__cb_destroy_callback(void *private)
-{
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- tpl_wl_egl_surface_t *wl_egl_surface = NULL;
-
- if (!tizen_private) {
- TPL_LOG_B("WL_EGL", "[DESTROY_CB] Already destroyed surface");
- return;
- }
-
- wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
- if (wl_egl_surface) {
- TPL_WARN("[DESTROY_CB][!!!ABNORMAL BEHAVIOR!!!] wl_egl_window(%p) is destroyed.",
- wl_egl_surface->wl_egl_window);
- TPL_WARN("[DESTROY_CB] native window should be destroyed after eglDestroySurface.");
-
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- wl_egl_surface->wl_egl_window->destroy_window_callback = NULL;
- wl_egl_surface->wl_egl_window->resize_callback = NULL;
- wl_egl_surface->wl_egl_window->driver_private = NULL;
- wl_egl_surface->wl_egl_window = NULL;
- wl_egl_surface->surf = NULL;
- wl_egl_surface->is_destroying = TPL_TRUE;
-
- tizen_private->set_window_serial_callback = NULL;
- tizen_private->rotate_callback = NULL;
- tizen_private->get_rotation_capability = NULL;
- tizen_private->set_frontbuffer_callback = NULL;
- tizen_private->create_commit_sync_fd = NULL;
- tizen_private->create_presentation_sync_fd = NULL;
- tizen_private->data = NULL;
- free(tizen_private);
- tizen_private = NULL;
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- }
-}
+ tbm_surface_internal_ref(tbm_surface);
-static void
-__cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
-{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
+ wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
+ "wl_egl_buffer sould be not NULL");
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
- int cur_w, cur_h, req_w, req_h, format;
+ if (wl_egl_buffer->wl_buffer == NULL) {
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ wl_egl_buffer->wl_buffer =
+ (struct wl_proxy *)wayland_tbm_client_create_buffer(
+ wl_egl_display->wl_tbm_client, tbm_surface);
- if (!wl_egl_surface) {
- TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
- wl_egl_window);
- return;
- }
+ if (!wl_egl_buffer->wl_buffer) {
+ TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
+ wl_egl_display->wl_tbm_client, tbm_surface);
+ }
+ }
- format = wl_egl_surface->format;
- cur_w = wl_egl_surface->width;
- cur_h = wl_egl_surface->height;
- req_w = wl_egl_window->width;
- req_h = wl_egl_window->height;
+ if (wl_egl_buffer->acquire_fence_fd != -1) {
+ if (wl_egl_surface->surface_sync)
+ ready_to_commit = TPL_TRUE;
+ else {
+ if (wl_egl_buffer->waiting_source) {
+ tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
+ wl_egl_buffer->waiting_source = NULL;
+ }
- TPL_INFO("[WINDOW_RESIZE]",
- "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)",
- wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h);
+ wl_egl_buffer->waiting_source =
+ tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
+ wl_egl_buffer->acquire_fence_fd, &buffer_funcs,
+ SOURCE_TYPE_DISPOSABLE);
- if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
- != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue);
- return;
- }
-}
-/* -- END -- wl_egl_window callback functions */
+ __tpl_list_push_back(wl_egl_surface->fence_waiting_buffers, tbm_surface);
-/* -- BEGIN -- wl_egl_window tizen private callback functions */
+ TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
+ wl_egl_buffer->acquire_fence_fd);
-/* There is no usecase for using prerotation callback below */
-static void
-__cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private)
-{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
+ ready_to_commit = TPL_FALSE;
+ }
+ }
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
- int rotation = tizen_private->rotation;
+ if (ready_to_commit) {
+ if (wl_egl_surface->vblank_done)
+ ready_to_commit = TPL_TRUE;
+ else {
+ __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, tbm_surface);
+ ready_to_commit = TPL_FALSE;
+ }
+ }
- if (!wl_egl_surface) {
- TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
- wl_egl_window);
- return;
+ if (ready_to_commit)
+ _thread_wl_surface_commit(wl_egl_surface, tbm_surface);
}
- TPL_INFO("[WINDOW_ROTATE]",
- "wl_egl_surface(%p) wl_egl_window(%p) (%d) -> (%d)",
- wl_egl_surface, wl_egl_window,
- wl_egl_surface->rotation, rotation);
-
- wl_egl_surface->rotation = rotation;
+ return TPL_ERROR_NONE;
}
-/* There is no usecase for using prerotation callback below */
-static int
-__cb_get_rotation_capability(struct wl_egl_window *wl_egl_window,
- void *private)
+/* -- BEGIN -- tdm_client vblank callback function */
+static void
+__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
+ unsigned int sequence, unsigned int tv_sec,
+ unsigned int tv_usec, void *user_data)
{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
-
- int rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_NONE;
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data;
+ tbm_surface_h tbm_surface = NULL;
- if (!wl_egl_surface) {
- TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
- wl_egl_window);
- return rotation_capability;
- }
+ TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK");
- if (wl_egl_surface->rotation_capability == TPL_TRUE)
- rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_SUPPORTED;
- else
- rotation_capability = WL_EGL_WINDOW_TIZEN_CAPABILITY_ROTATION_UNSUPPORTED;
+ if (error == TDM_ERROR_TIMEOUT)
+ TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)",
+ wl_egl_surface);
+ wl_egl_surface->vblank_done = TPL_TRUE;
- return rotation_capability;
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ tbm_surface = (tbm_surface_h)__tpl_list_pop_front(
+ wl_egl_surface->vblank_waiting_buffers,
+ NULL);
+ _thread_wl_surface_commit(wl_egl_surface, tbm_surface);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
+/* -- END -- tdm_client vblank callback function */
static void
-__cb_set_window_serial_callback(struct wl_egl_window *wl_egl_window,
- void *private, unsigned int serial)
+__cb_buffer_fenced_release(void *data,
+ struct zwp_linux_buffer_release_v1 *release, int32_t fence)
{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
+ tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+ tbm_surface_h tbm_surface = NULL;
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+ if (wl_egl_buffer)
+ tbm_surface = wl_egl_buffer->tbm_surface;
- if (!wl_egl_surface) {
- TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
- wl_egl_window);
- return;
- }
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ if (wl_egl_buffer->need_to_release) {
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_queue_error_e tsq_err;
- wl_egl_surface->set_serial_is_used = TPL_TRUE;
- wl_egl_surface->serial = serial;
-}
+ if (wl_egl_surface->committed_buffers) {
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ __tpl_list_remove_data(wl_egl_surface->committed_buffers,
+ (void *)tbm_surface,
+ TPL_FIRST, NULL);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ }
-static int
-__cb_create_commit_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
-{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
+ wl_egl_buffer->need_to_release = TPL_FALSE;
- int commit_sync_fd = -1;
+ zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
+ wl_egl_buffer->buffer_release = NULL;
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+ wl_egl_buffer->release_fence_fd = fence;
- if (!wl_egl_surface) {
- TPL_ERR("Invalid parameter. wl_egl_surface is NULL", wl_egl_surface);
- return -1;
- }
+ TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
+ _get_tbm_surface_bo_name(tbm_surface),
+ fence);
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
- tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
+ TPL_LOG_T("WL_EGL",
+ "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ wl_egl_buffer->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface),
+ fence);
- if (wl_egl_surface->commit_sync.fd != -1) {
- commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
- TRACE_MARK("[ONLY_DUP] commit_sync_fd(%d) dup(%d)",
- wl_egl_surface->commit_sync.fd, commit_sync_fd);
- TPL_DEBUG("[DUP_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d) dup(%d)",
- wl_egl_surface, wl_egl_surface->commit_sync.fd, commit_sync_fd);
- tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
- return commit_sync_fd;
- }
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
- wl_egl_surface->commit_sync.fd = eventfd(0, EFD_CLOEXEC);
- if (wl_egl_surface->commit_sync.fd == -1) {
- TPL_ERR("Failed to create commit_sync_fd. wl_egl_surface(%p)", wl_egl_surface);
- tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
- return -1;
+ tbm_surface_internal_unref(tbm_surface);
+ }
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
}
+}
- commit_sync_fd = dup(wl_egl_surface->commit_sync.fd);
+static void
+__cb_buffer_immediate_release(void *data,
+ struct zwp_linux_buffer_release_v1 *release)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+ tbm_surface_h tbm_surface = NULL;
- TRACE_MARK("[CREATE] commit_sync_fd(%d) dup(%d)",
- wl_egl_surface->commit_sync.fd, commit_sync_fd);
- TPL_DEBUG("[CREATE_COMMIT_SYNC] wl_egl_surface(%p) commit_sync_fd(%d)",
- wl_egl_surface, commit_sync_fd);
+ if (wl_egl_buffer)
+ tbm_surface = wl_egl_buffer->tbm_surface;
- tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ if (wl_egl_buffer->need_to_release) {
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_queue_error_e tsq_err;
- return commit_sync_fd;
-}
+ if (wl_egl_surface->committed_buffers) {
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ __tpl_list_remove_data(wl_egl_surface->committed_buffers,
+ (void *)tbm_surface,
+ TPL_FIRST, NULL);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ }
-static int
-__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
-{
- TPL_ASSERT(private);
- TPL_ASSERT(wl_egl_window);
+ wl_egl_buffer->need_to_release = TPL_FALSE;
- int presentation_sync_fd = -1;
+ zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
+ wl_egl_buffer->buffer_release = NULL;
- struct tizen_private *tizen_private = (struct tizen_private *)private;
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)tizen_private->data;
+ wl_egl_buffer->release_fence_fd = -1;
- if (!wl_egl_surface) {
- TPL_ERR("Invalid parameter. wl_egl_surface is NULL", wl_egl_surface);
- return -1;
- }
+ TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
- tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
- if (wl_egl_surface->presentation_sync.fd != -1) {
- presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
- TRACE_MARK("[ONLY_DUP] presentation_sync_fd(%d) dup(%d)",
- wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
- TPL_DEBUG("[DUP_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
- wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
- tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
- return presentation_sync_fd;
- }
+ TPL_LOG_T("WL_EGL",
+ "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_buffer->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
- wl_egl_surface->presentation_sync.fd = eventfd(0, EFD_CLOEXEC);
- if (wl_egl_surface->presentation_sync.fd == -1) {
- TPL_ERR("Failed to create presentation_sync_fd. wl_egl_surface(%p)", wl_egl_surface);
- tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
- return -1;
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+ tbm_surface_internal_unref(tbm_surface);
+ }
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
}
+}
- presentation_sync_fd = dup(wl_egl_surface->presentation_sync.fd);
- TRACE_MARK("[CREATE] presentation_sync_fd(%d) dup(%d)",
- wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
- TPL_DEBUG("[CREATE_PRESENTATION_SYNC] wl_egl_surface(%p) presentation_sync_fd(%d) dup(%d)",
- wl_egl_surface, wl_egl_surface->presentation_sync.fd, presentation_sync_fd);
+static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
+ __cb_buffer_fenced_release,
+ __cb_buffer_immediate_release,
+};
- tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+static void
+__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+ tbm_surface_h tbm_surface = NULL;
- return presentation_sync_fd;
-}
-/* -- END -- wl_egl_window tizen private callback functions */
+ if (wl_egl_buffer)
+ tbm_surface = wl_egl_buffer->tbm_surface;
-/* -- BEGIN -- tizen_surface_shm_flusher_listener */
-static void __cb_tss_flusher_flush_callback(void *data,
- struct tizen_surface_shm_flusher *tss_flusher)
-{
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ if (wl_egl_buffer->need_to_release) {
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_queue_error_e tsq_err;
- TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
- wl_egl_surface, wl_egl_surface->tbm_queue);
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
- tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
- return;
- }
-}
+ if (wl_egl_surface->committed_buffers) {
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ __tpl_list_remove_data(wl_egl_surface->committed_buffers,
+ (void *)tbm_surface,
+ TPL_FIRST, NULL);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ }
+
+ wl_egl_buffer->need_to_release = TPL_FALSE;
-static void __cb_tss_flusher_free_flush_callback(void *data,
- struct tizen_surface_shm_flusher *tss_flusher)
-{
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
- TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
- wl_egl_surface, wl_egl_surface->tbm_queue);
+ TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_buffer->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
- tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to free flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
- return;
+ tbm_surface_internal_unref(tbm_surface);
+ }
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
}
}
-static const struct tizen_surface_shm_flusher_listener
-tss_flusher_listener = {
- __cb_tss_flusher_flush_callback,
- __cb_tss_flusher_free_flush_callback
+static const struct wl_buffer_listener wl_buffer_release_listener = {
+ (void *)__cb_wl_buffer_release,
};
-/* -- END -- tizen_surface_shm_flusher_listener */
-
-/* -- BEGIN -- tbm_surface_queue callback funstions */
static void
-__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
- void *data)
+__cb_presentation_feedback_sync_output(void *data,
+ struct wp_presentation_feedback *presentation_feedback,
+ struct wl_output *output)
{
- tpl_wl_egl_surface_t *wl_egl_surface = NULL;
- tpl_wl_egl_display_t *wl_egl_display = NULL;
- tpl_surface_t *surface = NULL;
- tpl_bool_t is_activated = TPL_FALSE;
- int width, height;
+ TPL_IGNORE(data);
+ TPL_IGNORE(presentation_feedback);
+ TPL_IGNORE(output);
+ /* Nothing to do */
+}
- wl_egl_surface = (tpl_wl_egl_surface_t *)data;
- TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+static void
+__cb_presentation_feedback_presented(void *data,
+ struct wp_presentation_feedback *presentation_feedback,
+ uint32_t tv_sec_hi,
+ uint32_t tv_sec_lo,
+ uint32_t tv_nsec,
+ uint32_t refresh_nsec,
+ uint32_t seq_hi,
+ uint32_t seq_lo,
+ uint32_t flags)
+{
+ TPL_IGNORE(tv_sec_hi);
+ TPL_IGNORE(tv_sec_lo);
+ TPL_IGNORE(tv_nsec);
+ TPL_IGNORE(refresh_nsec);
+ TPL_IGNORE(seq_hi);
+ TPL_IGNORE(seq_lo);
+ TPL_IGNORE(flags);
- wl_egl_display = wl_egl_surface->wl_egl_display;
- TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
+ tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
- surface = wl_egl_surface->tpl_surface;
- TPL_CHECK_ON_NULL_RETURN(surface);
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
- /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
- * the changed window size at the next frame. */
- width = tbm_surface_queue_get_width(tbm_queue);
- height = tbm_surface_queue_get_height(tbm_queue);
- if (surface->width != width || surface->height != height) {
- TPL_INFO("[QUEUE_RESIZE]",
- "wl_egl_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
- wl_egl_surface, tbm_queue,
- surface->width, surface->height, width, height);
- }
+ TPL_DEBUG("[FEEDBACK][PRESENTED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_surface, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
- /* When queue_reset_callback is called, if is_activated is different from
- * its previous state change the reset flag to TPL_TRUE to get a new buffer
- * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
- is_activated = wayland_tbm_client_queue_check_activate(wl_egl_display->wl_tbm_client,
- wl_egl_surface->tbm_queue);
- if (wl_egl_surface->is_activated != is_activated) {
- if (is_activated) {
- TPL_INFO("[ACTIVATED]",
- "wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
- wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
- } else {
- TPL_LOG_T("[DEACTIVATED]",
- " wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
- wl_egl_surface, wl_egl_surface->wl_surface, tbm_queue);
+ if (wl_egl_buffer->presentation_sync_fd != -1) {
+ int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
+ if (ret == -1) {
+ TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+ wl_egl_buffer->presentation_sync_fd);
}
+
+ TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd,
+ "[PRESENTATION_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ close(wl_egl_buffer->presentation_sync_fd);
+ wl_egl_buffer->presentation_sync_fd = -1;
}
- wl_egl_surface->reset = TPL_TRUE;
+ if (wl_egl_buffer->presentation_feedback)
+ wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback);
- if (surface->reset_cb)
- surface->reset_cb(surface->reset_data);
+ wl_egl_buffer->presentation_feedback = NULL;
+
+ __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface,
+ TPL_FIRST, NULL);
+
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
}
static void
-__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
- void *data)
+__cb_presentation_feedback_discarded(void *data,
+ struct wp_presentation_feedback *presentation_feedback)
{
- TPL_IGNORE(tbm_queue);
+ tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
- TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ TPL_DEBUG("[FEEDBACK][DISCARDED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_surface, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
- tpl_gsource_send_message(wl_egl_surface->surf_source, 2);
+ if (wl_egl_buffer->presentation_sync_fd != -1) {
+ int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
+ if (ret == -1) {
+ TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+ wl_egl_buffer->presentation_sync_fd);
+ }
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd,
+ "[PRESENTATION_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ close(wl_egl_buffer->presentation_sync_fd);
+ wl_egl_buffer->presentation_sync_fd = -1;
+ }
+
+ if (wl_egl_buffer->presentation_feedback)
+ wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback);
+
+ wl_egl_buffer->presentation_feedback = NULL;
+
+ __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface,
+ TPL_FIRST, NULL);
+
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
}
-/* -- END -- tbm_surface_queue callback funstions */
+static const struct wp_presentation_feedback_listener feedback_listener = {
+ __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
+ __cb_presentation_feedback_presented,
+ __cb_presentation_feedback_discarded
+};
-/* tdm_client vblank callback function */
-static void
-__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
- unsigned int sequence, unsigned int tv_sec,
- unsigned int tv_usec, void *user_data)
+static tpl_result_t
+_thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface)
{
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data;
- tbm_surface_h tbm_surface = NULL;
+ tdm_error tdm_err = TDM_ERROR_NONE;
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
- TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK");
+ if (wl_egl_surface->vblank == NULL) {
+ wl_egl_surface->vblank =
+ _thread_create_tdm_client_vblank(wl_egl_display->tdm_client);
+ if (!wl_egl_surface->vblank) {
+ TPL_WARN("Failed to create vblank. wl_egl_surface(%p)",
+ wl_egl_surface);
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+ }
- if (error == TDM_ERROR_TIMEOUT)
- TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)",
- wl_egl_surface);
+ tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank,
+ wl_egl_surface->post_interval,
+ __cb_tdm_client_vblank,
+ (void *)wl_egl_surface);
- wl_egl_surface->vblank_done = TPL_TRUE;
+ if (tdm_err == TDM_ERROR_NONE) {
+ wl_egl_surface->vblank_done = TPL_FALSE;
+ TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK");
+ } else {
+ TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- tbm_surface = (tbm_surface_h)__tpl_list_pop_front(
- wl_egl_surface->vblank_waiting_buffers,
- NULL);
- _thread_wl_surface_commit(wl_egl_surface, tbm_surface);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ return TPL_ERROR_NONE;
}
static void
-__cb_buffer_fenced_release(void *data,
- struct zwp_linux_buffer_release_v1 *release, int32_t fence)
+_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
+ tbm_surface_h tbm_surface)
{
- tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
- tbm_surface_h tbm_surface = NULL;
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
+ struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
+ uint32_t version;
- if (wl_egl_buffer)
- tbm_surface = wl_egl_buffer->tbm_surface;
+ wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
+ "wl_egl_buffer sould be not NULL");
+
+ if (wl_egl_buffer->wl_buffer == NULL) {
+ wl_egl_buffer->wl_buffer =
+ (struct wl_proxy *)wayland_tbm_client_create_buffer(
+ wl_egl_display->wl_tbm_client, tbm_surface);
+ }
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
+ "[FATAL] Failed to create wl_buffer");
+
+ wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer,
+ &wl_buffer_release_listener, wl_egl_buffer);
+
+ version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
+
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+ if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) {
+ wl_egl_buffer->presentation_feedback =
+ wp_presentation_feedback(wl_egl_display->presentation,
+ wl_surface);
+ wp_presentation_feedback_add_listener(wl_egl_buffer->presentation_feedback,
+ &feedback_listener, wl_egl_buffer);
+ __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, tbm_surface);
+ TRACE_ASYNC_BEGIN(wl_egl_buffer->presentation_sync_fd,
+ "[PRESENTATION_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ }
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+
+ if (wl_egl_buffer->w_rotated == TPL_TRUE) {
+ wayland_tbm_client_set_buffer_transform(
+ wl_egl_display->wl_tbm_client,
+ (void *)wl_egl_buffer->wl_buffer,
+ wl_egl_buffer->w_transform);
+ wl_egl_buffer->w_rotated = TPL_FALSE;
+ }
- if (tbm_surface_internal_is_valid(tbm_surface)) {
- if (wl_egl_buffer->need_to_release) {
- tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tbm_surface_queue_error_e tsq_err;
+ if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) {
+ wl_egl_surface->latest_transform = wl_egl_buffer->transform;
+ wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
+ }
- if (wl_egl_surface->committed_buffers) {
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- __tpl_list_remove_data(wl_egl_surface->committed_buffers,
- (void *)tbm_surface,
- TPL_FIRST, NULL);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- }
+ if (wl_egl_window) {
+ wl_egl_window->attached_width = wl_egl_buffer->width;
+ wl_egl_window->attached_height = wl_egl_buffer->height;
+ }
- wl_egl_buffer->need_to_release = TPL_FALSE;
+ wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer,
+ wl_egl_buffer->dx, wl_egl_buffer->dy);
- zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
- wl_egl_buffer->buffer_release = NULL;
+ if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) {
+ if (version < 4) {
+ wl_surface_damage(wl_surface,
+ wl_egl_buffer->dx, wl_egl_buffer->dy,
+ wl_egl_buffer->width, wl_egl_buffer->height);
+ } else {
+ wl_surface_damage_buffer(wl_surface,
+ 0, 0,
+ wl_egl_buffer->width, wl_egl_buffer->height);
+ }
+ } else {
+ int i;
+ for (i = 0; i < wl_egl_buffer->num_rects; i++) {
+ int inverted_y =
+ wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] +
+ wl_egl_buffer->rects[i * 4 + 3]);
+ if (version < 4) {
+ wl_surface_damage(wl_surface,
+ wl_egl_buffer->rects[i * 4 + 0],
+ inverted_y,
+ wl_egl_buffer->rects[i * 4 + 2],
+ wl_egl_buffer->rects[i * 4 + 3]);
+ } else {
+ wl_surface_damage_buffer(wl_surface,
+ wl_egl_buffer->rects[i * 4 + 0],
+ inverted_y,
+ wl_egl_buffer->rects[i * 4 + 2],
+ wl_egl_buffer->rects[i * 4 + 3]);
+ }
+ }
+ }
- wl_egl_buffer->release_fence_fd = fence;
+ wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client,
+ (void *)wl_egl_buffer->wl_buffer,
+ wl_egl_buffer->serial);
- TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
- _get_tbm_surface_bo_name(tbm_surface),
- fence);
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
+ wl_egl_buffer->need_to_release = TPL_TRUE;
- TPL_LOG_T("WL_EGL",
- "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
- wl_egl_buffer->wl_buffer, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface),
- fence);
+ if (wl_egl_display->use_explicit_sync &&
+ wl_egl_surface->surface_sync) {
- tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+ zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync,
+ wl_egl_buffer->acquire_fence_fd);
+ TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)",
+ wl_egl_surface, tbm_surface, wl_egl_buffer->acquire_fence_fd);
+ close(wl_egl_buffer->acquire_fence_fd);
+ wl_egl_buffer->acquire_fence_fd = -1;
- tbm_surface_internal_unref(tbm_surface);
+ wl_egl_buffer->buffer_release =
+ zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync);
+ if (!wl_egl_buffer->buffer_release) {
+ TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface);
+ } else {
+ zwp_linux_buffer_release_v1_add_listener(
+ wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer);
+ TPL_DEBUG("add explicit_sync_release_listener.");
}
- } else {
- TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
}
-}
-static void
-__cb_buffer_immediate_release(void *data,
- struct zwp_linux_buffer_release_v1 *release)
-{
- tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
- tbm_surface_h tbm_surface = NULL;
+ wl_surface_commit(wl_surface);
- if (wl_egl_buffer)
- tbm_surface = wl_egl_buffer->tbm_surface;
+ wl_display_flush(wl_egl_display->wl_display);
- if (tbm_surface_internal_is_valid(tbm_surface)) {
- if (wl_egl_buffer->need_to_release) {
- tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tbm_surface_queue_error_e tsq_err;
+ TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
- if (wl_egl_surface->committed_buffers) {
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- __tpl_list_remove_data(wl_egl_surface->committed_buffers,
- (void *)tbm_surface,
- TPL_FIRST, NULL);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- }
+ wl_egl_buffer->need_to_commit = TPL_FALSE;
- wl_egl_buffer->need_to_release = TPL_FALSE;
+ TPL_LOG_T("WL_EGL", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_buffer->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
- zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
- wl_egl_buffer->buffer_release = NULL;
+ if (wl_egl_display->tdm_initialized &&
+ _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
+ TPL_ERR("Failed to set wait vblank.");
- wl_egl_buffer->release_fence_fd = -1;
+ if (wl_egl_surface->committed_buffers) {
+ __tpl_list_push_back(wl_egl_surface->committed_buffers, tbm_surface);
+ }
- TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
+ tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
- TPL_LOG_T("WL_EGL",
- "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)",
- wl_egl_buffer->wl_buffer, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
+ if (wl_egl_buffer->commit_sync_fd != -1) {
+ int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
+ if (ret == -1) {
+ TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd);
+ }
- tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+ TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)",
+ wl_egl_surface, wl_egl_buffer->commit_sync_fd);
- tbm_surface_internal_unref(tbm_surface);
- }
- } else {
- TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+ close(wl_egl_buffer->commit_sync_fd);
+ wl_egl_buffer->commit_sync_fd = -1;
}
-}
-static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
- __cb_buffer_fenced_release,
- __cb_buffer_immediate_release,
-};
+ tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+}
-static void
-__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
+static int
+_write_to_eventfd(int eventfd)
{
- tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
- tbm_surface_h tbm_surface = NULL;
+ uint64_t value = 1;
+ int ret;
- if (wl_egl_buffer)
- tbm_surface = wl_egl_buffer->tbm_surface;
+ if (eventfd == -1) {
+ TPL_ERR("Invalid fd(-1)");
+ return -1;
+ }
- if (tbm_surface_internal_is_valid(tbm_surface)) {
- if (wl_egl_buffer->need_to_release) {
- tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tbm_surface_queue_error_e tsq_err;
+ ret = write(eventfd, &value, sizeof(uint64_t));
+ if (ret == -1) {
+ TPL_ERR("failed to write to fd(%d)", eventfd);
+ return ret;
+ }
- tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+ return ret;
+}
- if (wl_egl_surface->committed_buffers) {
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- __tpl_list_remove_data(wl_egl_surface->committed_buffers,
- (void *)tbm_surface,
- TPL_FIRST, NULL);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- }
+void
+__tpl_display_init_backend_wl_egl_thread2(tpl_display_backend_t *backend)
+{
+ TPL_ASSERT(backend);
- wl_egl_buffer->need_to_release = TPL_FALSE;
+ backend->type = TPL_BACKEND_WAYLAND_THREAD;
+ backend->data = NULL;
- TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
+ backend->init = __tpl_wl_egl_display_init;
+ backend->fini = __tpl_wl_egl_display_fini;
+ backend->query_config = __tpl_wl_egl_display_query_config;
+ backend->filter_config = __tpl_wl_egl_display_filter_config;
+ backend->get_window_info = __tpl_wl_egl_display_get_window_info;
+ backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
+ backend->get_buffer_from_native_pixmap =
+ __tpl_wl_egl_display_get_buffer_from_native_pixmap;
+}
- TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
- wl_egl_buffer->wl_buffer, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
+void
+__tpl_surface_init_backend_wl_egl_thread2(tpl_surface_backend_t *backend)
+{
+ TPL_ASSERT(backend);
- tbm_surface_internal_unref(tbm_surface);
- }
- } else {
- TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
- }
+ backend->type = TPL_BACKEND_WAYLAND_THREAD;
+ backend->data = NULL;
+
+ backend->init = __tpl_wl_egl_surface_init;
+ backend->fini = __tpl_wl_egl_surface_fini;
+ backend->validate = __tpl_wl_egl_surface_validate;
+ backend->cancel_dequeued_buffer =
+ __tpl_wl_egl_surface_cancel_dequeued_buffer;
+ backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
+ backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
+ backend->set_rotation_capability =
+ __tpl_wl_egl_surface_set_rotation_capability;
+ backend->set_post_interval =
+ __tpl_wl_egl_surface_set_post_interval;
+ backend->get_size =
+ __tpl_wl_egl_surface_get_size;
}
-void
+static void
__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
{
tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->disp_source;
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);