#include "tpl_utils_gthread.h"
-static int buffer_info_key;
-#define KEY_BUFFER_INFO (unsigned long)(&buffer_info_key)
+static int wl_egl_buffer_key;
+#define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key)
/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
#define CLIENT_QUEUE_SIZE 3
typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
+typedef struct _tpl_wl_egl_buffer tpl_wl_egl_buffer_t;
struct _tpl_wl_egl_display {
tpl_gsource *disp_source;
/* the lists for buffer tracing */
tpl_list_t *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */
tpl_list_t *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */
- tpl_list_t *fence_waiting_bufferss; /* Trace buffers from ENQUEUE to fence signaled */
+ tpl_list_t *fence_waiting_buffers; /* Trace buffers from ENQUEUE to fence signaled */
tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
- tpl_list_t *render_done_fences; /* for attaching to twe_thread with fences passed by enqueue */
tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
struct {
tpl_bool_t set_serial_is_used;
};
-struct _tpl_wl_egl_bufer {
- tbm_surface_h tbm_surface;
+struct _tpl_wl_egl_buffer {
+ tbm_surface_h tbm_surface;
struct wl_proxy *wl_buffer;
- int dx, dy;
- int width, height;
+ int dx, dy; /* position to attach to wl_surface */
+ int width, height; /* size to attach to wl_surface */
- tpl_wl_egl_surface_t *wl_egl_surface;
+ /* for damage region */
+ int num_rects;
+ int *rects;
/* for wayland_tbm_client_set_buffer_transform */
int w_transform;
/* for wl_surface_set_buffer_transform */
int transform;
- /* for damage region */
- int num_rects;
- int *rects;
-
- unsigned int commit_sync_ts_backup;
-
/* for wayland_tbm_client_set_buffer_serial */
unsigned int serial;
/* each buffers own its release_fence_fd, until it passes ownership
* to it to EGL */
- int release_fence_fd;
+ int32_t release_fence_fd;
- /* each buffers own its acquire_fence_fd. until it passes ownership
- * to it to SERVER */
- int acquire_fence_fd;
-};
+ /* each buffers own its acquire_fence_fd.
+ * If it use zwp_linux_buffer_release_v1 the ownership of this fd
+ * will be passed to display server
+ * Otherwise it will be used as a fence waiting for render done
+ * on tpl thread */
+ int32_t acquire_fence_fd;
-struct sync_info {
- tbm_surface_h tbm_surface;
- int sync_fd;
-};
+ /* Fd to send a signal when wl_surface_commit with this buffer */
+ int32_t commit_sync_fd;
+
+ /* to get presentation feedback from display server */
+ struct wp_presentation_feedback *presentation_feedback;
+
+ /* Fd to send a siganl when receive the
+ * presentation feedback from display server */
+ int32_t presentation_sync_fd;
+
+ tpl_gsource *waiting_source;
-struct _twe_fence_wait_source {
- tpl_gsource *fence_source;
- tbm_fd fence_fd;
- tbm_surface_h tbm_surface;
tpl_wl_egl_surface_t *wl_egl_surface;
};
/* If an error occurs in tdm_client_handle_events, it cannot be recovered.
* When tdm_source is no longer available due to an unexpected situation,
- * twe_thread must remove it from the thread and destroy it.
+ * wl_egl_thread must remove it from the thread and destroy it.
* In that case, tdm_vblank can no longer be used for surfaces and displays
* that used this tdm_source. */
if (tdm_err != TDM_ERROR_NONE) {
__thread_func_tdm_finalize(tpl_gsource *gsource)
{
tpl_wl_egl_display_t *wl_egl_display = NULL;
- twe_tdm_source *tdm_source = (twe_tdm_source *)source;
wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
- TPL_LOG_T("WL_EGL", "tdm_destroy| tdm_source(%p) tdm_client(%p)",
- gsource, wl_egl_display->tdm_client);
+ TPL_LOG_T("WL_EGL", "tdm_destroy| wl_egl_display(%p) tdm_client(%p)",
+ wl_egl_display, wl_egl_display->tdm_client);
if (wl_egl_display->tdm_client) {
tdm_client_destroy(wl_egl_display->tdm_client);
ret = wl_display_roundtrip_queue(wl_egl_display->wl_display, queue);
if (ret == -1) {
- _twe_display_print_err(wl_egl_display, "roundtrip_queue");
+ _wl_display_print_err(wl_egl_display, "roundtrip_queue");
result = TPL_ERROR_INVALID_OPERATION;
goto fini;
}
return TPL_GSOURCE_REMOVE;
}
- g_mutex_lock(&wl_egl_display->wl_event_mutex);
+ tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
if (tpl_gsource_check_io_condition(gsource)) {
if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
wl_egl_display->ev_queue) == -1) {
}
wl_display_flush(wl_egl_display->wl_display);
- g_mutex_unlock(&wl_egl_display->wl_event_mutex);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
return TPL_GSOURCE_CONTINUE;
}
return tbm_surface;
}
+tpl_bool_t
+__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
+{
+ struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE);
+
+ /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
+ is a memory address pointing the structure of wl_display_interface. */
+ if (wl_egl_native_dpy == &wl_display_interface)
+ return TPL_TRUE;
+
+ if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
+ strlen(wl_display_interface.name)) == 0) {
+ return TPL_TRUE;
+ }
+
+ return TPL_FALSE;
+}
+
static tpl_result_t
__tpl_wl_egl_surface_init(tpl_surface_t *surface)
{
return NULL;
}
- if (tbm_surface_queue_add_trace_cb(
- tbm_queue,
- __cb_tbm_queue_trace_callback,
- (void *)wl_egl_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to register trace callback to tbm_surface_queue(%p)",
- tbm_queue);
- tbm_surface_queue_destroy(tbm_queue);
- return NULL;
- }
-
if (tbm_surface_queue_add_acquirable_cb(
tbm_queue,
__cb_tbm_queue_acquirable_callback,
}
if (wl_egl_surface->tss_flusher) {
- tizen_surface_shm_flusher_add_listener(surf_source->tss_flusher,
+ tizen_surface_shm_flusher_add_listener(wl_egl_surface->tss_flusher,
&tss_flusher_listener,
wl_egl_surface);
TPL_INFO("[FLUSHER_INIT]",
wl_egl_surface->in_use_buffers = __tpl_list_alloc();
wl_egl_surface->fence_waiting_buffers = __tpl_list_alloc();
wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc();
- wl_egl_surface->render_done_fences = __tpl_list_alloc();
wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
}
tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
- /* TODO
if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) {
while (!__tpl_list_is_empty(wl_egl_surface->presentation_feedbacks)) {
tbm_surface_h tbm_surface =
__tpl_list_pop_front(wl_egl_surface->presentation_feedbacks, NULL);
if (tbm_surface_internal_is_valid(tbm_surface)) {
- twe_wl_buffer_info *buf_info = NULL;
- tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
- (void **)&buf_info);
- if (buf_info && buf_info->presentation_sync_fd != -1 &&
- buf_info->presentation_feedback) {
-
- _write_to_eventfd(buf_info->presentation_sync_fd);
- close(buf_info->presentation_sync_fd);
- buf_info->presentation_sync_fd = -1;
-
- wp_presentation_feedback_destroy(buf_info->presentation_feedback);
- buf_info->presentation_feedback = NULL;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+ if (wl_egl_buffer &&
+ wl_egl_buffer->presentation_sync_fd != -1 &&
+ wl_egl_buffer->presentation_feedback) {
+
+ _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
+ close(wl_egl_buffer->presentation_sync_fd);
+ wl_egl_buffer->presentation_sync_fd = -1;
+
+ wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback);
+ wl_egl_buffer->presentation_feedback = NULL;
}
}
}
}
if (wl_egl_surface->presentation_sync.fd != -1) {
- _write_to_eventfd(surf_source->presentation_sync.fd);
- close(surf_source->presentation_sync.fd);
- surf_source->presentation_sync.fd = -1;
+ _write_to_eventfd(wl_egl_surface->presentation_sync.fd);
+ close(wl_egl_surface->presentation_sync.fd);
+ wl_egl_surface->presentation_sync.fd = -1;
}
- */
+
tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
- /* TODO buffer
if (wl_egl_surface->in_use_buffers) {
__tpl_list_free(wl_egl_surface->in_use_buffers,
(tpl_free_func_t)__cb_buffer_remove_from_list);
wl_egl_surface->in_use_buffers = NULL;
}
- if (surf_source->committed_buffers) {
- while (!__tpl_list_is_empty(surf_source->committed_buffers)) {
+ if (wl_egl_surface->committed_buffers) {
+ while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) {
tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
tbm_surface_h tbm_surface =
- __tpl_list_pop_front(surf_source->committed_buffers,
+ __tpl_list_pop_front(wl_egl_surface->committed_buffers,
(tpl_free_func_t)__cb_buffer_remove_from_list);
TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
_get_tbm_surface_bo_name(tbm_surface));
- tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
tbm_surface, tsq_err);
}
- __tpl_list_free(surf_source->committed_buffers, NULL);
- surf_source->committed_buffers = NULL;
+ __tpl_list_free(wl_egl_surface->committed_buffers, NULL);
+ wl_egl_surface->committed_buffers = NULL;
}
- if (surf_source->vblank_waiting_buffers) {
- while (!__tpl_list_is_empty(surf_source->vblank_waiting_buffers)) {
+ if (wl_egl_surface->vblank_waiting_buffers) {
+ while (!__tpl_list_is_empty(wl_egl_surface->vblank_waiting_buffers)) {
tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
tbm_surface_h tbm_surface =
- __tpl_list_pop_front(surf_source->vblank_waiting_buffers,
+ __tpl_list_pop_front(wl_egl_surface->vblank_waiting_buffers,
(tpl_free_func_t)__cb_buffer_remove_from_list);
- tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
tbm_surface, tsq_err);
}
- __tpl_list_free(surf_source->vblank_waiting_buffers, NULL);
- surf_source->vblank_waiting_buffers = NULL;
+ __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL);
+ wl_egl_surface->vblank_waiting_buffers = NULL;
}
- if (surf_source->use_sync_fence && surf_source->fence_waiting_sources) {
- while (!__tpl_list_is_empty(surf_source->fence_waiting_sources)) {
- twe_fence_wait_source *wait_source =
- __tpl_list_pop_front(surf_source->fence_waiting_sources,
+ if (wl_egl_surface->fence_waiting_buffers) {
+ while (!__tpl_list_is_empty(wl_egl_surface->fence_waiting_buffers)) {
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(wl_egl_surface->fence_waiting_buffers,
NULL);
- if (!g_source_is_destroyed(&wait_source->gsource)) {
- tbm_surface_internal_unref(wait_source->tbm_surface);
- wait_source->tbm_surface = NULL;
-
- close(wait_source->fence_fd);
- wait_source->fence_fd = -1;
-
- g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag);
- g_source_destroy(&wait_source->gsource);
- g_source_unref(&wait_source->gsource);
- }
+ /* TODO */
}
}
- */
if (wl_egl_surface->surface_sync) {
- TPL_INFO("[SURFACE_SYNC_DESTROY]", "wl_egl_surface(%p) surface_sync(%p)",
+ TPL_INFO("[SURFACE_SYNC_DESTROY]",
+ "wl_egl_surface(%p) surface_sync(%p)",
wl_egl_surface, wl_egl_surface->surface_sync);
zwp_linux_surface_synchronization_v1_destroy(wl_egl_surface->surface_sync);
wl_egl_surface->surface_sync = NULL;
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
TPL_DEBUG("wl_egl_surface(%p) acquirable message received!",
wl_egl_surface);
- /* TODO acquirable */
+ _thread_surface_queue_acquire(wl_egl_surface);
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
return TPL_ERROR_NONE;
}
-static tpl_result_t
-__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
- tbm_surface_h tbm_surface,
- int num_rects, const int *rects, tbm_fd sync_fence)
+static tpl_bool_t
+__tpl_wl_egl_surface_validate(tpl_surface_t *surface)
{
+ tpl_bool_t retval = TPL_TRUE;
+
TPL_ASSERT(surface);
- TPL_ASSERT(surface->display);
- TPL_ASSERT(tbm_surface);
- TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
+ TPL_ASSERT(surface->backend.data);
tpl_wl_egl_surface_t *wl_egl_surface =
- (tpl_wl_egl_surface_t *) surface->backend.data;
- tbm_surface_queue_error_e tsq_err;
- tpl_result_t ret = TPL_ERROR_NONE;
- int bo_name = 0;
-
- TPL_OBJECT_LOCK(wl_egl_surface);
-
- bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+ (tpl_wl_egl_surface_t *)surface->backend.data;
- if (!wl_egl_surface) {
- TPL_ERR("Invalid parameter. surface(%p) wl_egl_surface(%p)",
- surface, wl_egl_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_OBJECT_UNLOCK(wl_egl_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ retval = !(wl_egl_surface->reset);
- if (!tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
- tbm_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_OBJECT_UNLOCK(wl_egl_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ return retval;
+}
- TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
+void
+__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface =
+ (tpl_wl_egl_surface_t *)surface->backend.data;
- TPL_LOG_T("WL_EGL",
- "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)",
- wl_egl_surface, tbm_surface, bo_name, sync_fence);
+ if (width)
+ *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
+ if (height)
+ *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
+}
- /* If there are received region information,
- * save it to buf_info in tbm_surface user_data using below API. */
- if (num_rects && rects) {
- ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects);
- if (ret != TPL_ERROR_NONE) {
- TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)",
- num_rects, rects);
- }
- }
+#define CAN_DEQUEUE_TIMEOUT_MS 10000
- if (!wl_egl_surface->need_to_enqueue ||
- !twe_surface_check_commit_needed(wl_egl_surface->twe_surface,
- tbm_surface)) {
- TPL_LOG_T("WL_EGL",
- "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
- ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_OBJECT_UNLOCK(wl_egl_surface);
- return TPL_ERROR_NONE;
- }
+tpl_result_t
+_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
- * commit if surface->frontbuffer that is already set and the tbm_surface
- * client want to enqueue are the same.
- */
- if (surface->is_frontbuffer_mode) {
- /* The first buffer to be activated in frontbuffer mode must be
- * committed. Subsequence frames do not need to be committed because
- * the buffer is already displayed.
- */
- if (surface->frontbuffer == tbm_surface)
- wl_egl_surface->need_to_enqueue = TPL_FALSE;
+ _print_buffer_lists(wl_egl_surface);
- if (sync_fence != -1) {
- close(sync_fence);
- sync_fence = -1;
- }
+ if ((tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue))
+ != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
+ wl_egl_surface->tbm_queue, tsq_err);
+ return TPL_ERROR_INVALID_OPERATION;
}
- if (sync_fence != -1) {
- ret = twe_surface_set_sync_fd(wl_egl_surface->twe_surface,
- tbm_surface, sync_fence);
- if (ret != TPL_ERROR_NONE) {
- TPL_WARN("Failed to set sync fd (%d). But it will continue.",
- sync_fence);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ if (wl_egl_surface->committed_buffers) {
+ while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) {
+ tbm_surface_h tbm_surface =
+ __tpl_list_pop_front(wl_egl_surface->committed_buffers,
+ (tpl_free_func_t)__cb_buffer_remove_from_list);
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ tbm_surface, tsq_err);
}
}
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- tbm_surface_internal_unref(tbm_surface);
- TPL_ERR("Failed to enqueue tbm_surface(%p). tpl_surface(%p) tsq_err=%d",
- tbm_surface, surface, tsq_err);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_OBJECT_UNLOCK(wl_egl_surface);
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- tbm_surface_internal_unref(tbm_surface);
-
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_OBJECT_UNLOCK(wl_egl_surface);
+ TPL_INFO("[FORCE_FLUSH]",
+ "wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
return TPL_ERROR_NONE;
}
-static tpl_bool_t
-__tpl_wl_egl_surface_validate(tpl_surface_t *surface)
+static void
+_wl_egl_buffer_init(tpl_wl_egl_buffer_t *wl_egl_buffer,
+ tpl_wl_egl_surface_t *wl_egl_surface)
{
- tpl_bool_t retval = TPL_TRUE;
+ struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
+ struct tizen_private *tizen_private =
+ (struct tizen_private *)wl_egl_window->driver_private;
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
+ TPL_ASSERT(tizen_private);
- tpl_wl_egl_surface_t *wl_egl_surface =
- (tpl_wl_egl_surface_t *)surface->backend.data;
+ wl_egl_buffer->draw_done = TPL_FALSE;
+ wl_egl_buffer->need_to_commit = TPL_TRUE;
- retval = !(wl_egl_surface->reset);
+ wl_egl_buffer->acquire_fence_fd = -1;
+ wl_egl_buffer->release_fence_fd = -1;
+ wl_egl_buffer->commit_sync_fd = -1;
+ wl_egl_buffer->presentation_sync_fd = -1;
- return retval;
-}
+ wl_egl_buffer->presentation_feedback = NULL;
+ wl_egl_buffer->buffer_release = NULL;
-static tpl_result_t
-__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
- tbm_surface_h tbm_surface)
-{
- tpl_wl_egl_surface_t *wl_egl_surface = NULL;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ wl_egl_buffer->transform = tizen_private->transform;
- wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
- if (!wl_egl_surface) {
- TPL_ERR("Invalid backend surface. surface(%p) wl_egl_surface(%p)",
- surface, wl_egl_surface);
- return TPL_ERROR_INVALID_PARAMETER;
+ if (wl_egl_buffer->w_transform != tizen_private->window_transform) {
+ wl_egl_buffer->w_transform = tizen_private->window_transform;
+ wl_egl_buffer->w_rotated = TPL_TRUE;
}
- if (!tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
- return TPL_ERROR_INVALID_PARAMETER;
+ if (wl_egl_surface->set_serial_is_used) {
+ wl_egl_buffer->serial = wl_egl_surface->serial;
+ } else {
+ wl_egl_buffer->serial = ++tizen_private->serial;
}
- tbm_surface_internal_unref(tbm_surface);
+ if (wl_egl_buffer->rects) {
+ free(wl_egl_buffer->rects);
+ wl_egl_buffer->rects = NULL;
+ wl_egl_buffer->num_rects = 0;
+ }
+}
- tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
- tbm_surface, surface);
- return TPL_ERROR_INVALID_OPERATION;
+static tpl_wl_egl_buffer_t *
+_get_wl_egl_buffer(tbm_surface_h tbm_surface)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
+ (void **)&wl_egl_buffer);
+ return wl_egl_buffer;
+}
+
+static tpl_wl_egl_buffer_t *
+_wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface,
+ tbm_surface_h tbm_surface)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ tpl_bool_t is_new_buffer = TPL_FALSE;
+
+ wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+
+ if (!wl_egl_buffer) {
+ wl_egl_buffer = (tpl_wl_egl_buffer_t *)calloc(1, sizeof(tpl_wl_egl_buffer_t));
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_buffer, NULL);
+
+ tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
+ (tbm_data_free)__cb_wl_egl_buffer_free);
+ tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_EGL_BUFFER,
+ wl_egl_buffer);
+ is_new_buffer = TPL_TRUE;
+
+ wl_egl_buffer->wl_buffer = NULL;
+ wl_egl_buffer->tbm_surface = tbm_surface;
+ wl_egl_buffer->wl_egl_surface = wl_egl_surface;
+
+ wl_egl_buffer->dx = wl_egl_window->dx;
+ wl_egl_buffer->dy = wl_egl_window->dy;
+ wl_egl_buffer->width = tbm_surface_get_width(tbm_surface);
+ wl_egl_buffer->height = tbm_surface_get_height(tbm_surface);
+
+ TPL_INFO("[WL_EGL_BUFFER_CREATE]",
+ "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_surface, wl_egl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
}
- TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
- surface, tbm_surface);
+ _wl_egl_buffer_init(wl_egl_buffer, wl_egl_surface);
- return TPL_ERROR_NONE;
+ return wl_egl_buffer;
}
-#define CAN_DEQUEUE_TIMEOUT_MS 10000
-
static tbm_surface_h
__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
- tbm_fd *sync_fence)
+ int32_t *release_fence)
{
TPL_ASSERT(surface);
TPL_ASSERT(surface->backend.data);
TPL_ASSERT(surface->display->backend.data);
TPL_OBJECT_CHECK_RETURN(surface, NULL);
- tbm_surface_h tbm_surface = NULL;
tpl_wl_egl_surface_t *wl_egl_surface =
(tpl_wl_egl_surface_t *)surface->backend.data;
tpl_wl_egl_display_t *wl_egl_display =
(tpl_wl_egl_display_t *)surface->display->backend.data;
- tbm_surface_queue_error_e tsq_err = 0;
- int is_activated = 0;
- int bo_name = 0;
- tpl_result_t lock_ret = TPL_FALSE;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tpl_bool_t is_activated = 0;
+ int bo_name = 0;
+ tbm_surface_h tbm_surface = NULL;
TPL_OBJECT_UNLOCK(surface);
tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
TPL_OBJECT_LOCK(surface);
- /* After the can dequeue state, call twe_display_lock to prevent other
+ /* After the can dequeue state, lock the wl_event_mutex to prevent other
* events from being processed in wayland_egl_thread
* during below dequeue procedure. */
- lock_ret = twe_display_lock(wl_egl_display->twe_display);
+ tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
- TPL_ERR("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
- wl_egl_surface->tbm_queue, surface);
- if (twe_surface_queue_force_flush(wl_egl_surface->twe_surface)
- != TPL_ERROR_NONE) {
+ TPL_WARN("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset. surface(%p)",
+ wl_egl_surface->tbm_queue, surface);
+ if (_tbm_queue_force_flush(wl_egl_surface) != TPL_ERROR_NONE) {
TPL_ERR("Failed to timeout reset. tbm_queue(%p) surface(%p)",
wl_egl_surface->tbm_queue, surface);
- if (lock_ret == TPL_ERROR_NONE)
- twe_display_unlock(wl_egl_display->twe_display);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
return NULL;
} else {
tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
TPL_ERR("Failed to query can_dequeue. tbm_queue(%p) surface(%p)",
wl_egl_surface->tbm_queue, surface);
- if (lock_ret == TPL_ERROR_NONE)
- twe_display_unlock(wl_egl_display->twe_display);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
return NULL;
}
* DEACTIVATED state means composite mode. Client's buffer will be displayed
by compositor(E20) with compositing.
*/
- is_activated = twe_surface_check_activated(wl_egl_surface->twe_surface);
+ is_activated = wayland_tbm_client_queue_check_activate(
+ wl_egl_display->wl_tbm_client,
+ wl_egl_surface->tbm_queue);
+
wl_egl_surface->is_activated = is_activated;
surface->width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
surface->height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
+ wl_egl_surface->width = surface->width;
+ wl_egl_surface->height = surface->height;
if (surface->is_frontbuffer_mode && surface->frontbuffer != NULL) {
/* If surface->frontbuffer is already set in frontbuffer mode,
* otherwise dequeue the new buffer after initializing
* surface->frontbuffer to NULL. */
if (is_activated && !wl_egl_surface->reset) {
+ bo_name = _get_tbm_surface_bo_name(surface->frontbuffer);
+
TPL_LOG_T("WL_EGL",
"[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
- surface->frontbuffer,
- tbm_bo_export(tbm_surface_internal_get_bo(
- surface->frontbuffer, 0)));
+ surface->frontbuffer, bo_name);
TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
"[DEQ]~[ENQ] BO_NAME:%d",
- tbm_bo_export(tbm_surface_internal_get_bo(
- surface->frontbuffer, 0)));
- if (lock_ret == TPL_ERROR_NONE)
- twe_display_unlock(wl_egl_display->twe_display);
+ bo_name);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
return surface->frontbuffer;
} else {
surface->frontbuffer = NULL;
tsq_err = tbm_surface_queue_dequeue(wl_egl_surface->tbm_queue,
&tbm_surface);
if (!tbm_surface) {
- TPL_ERR("Failed to dequeue from tbm_queue(%p) surface(%p)| tsq_err = %d",
- wl_egl_surface->tbm_queue, surface, tsq_err);
- if (lock_ret == TPL_ERROR_NONE)
- twe_display_unlock(wl_egl_display->twe_display);
+ TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_egl_surface(%p)| tsq_err = %d",
+ wl_egl_surface->tbm_queue, wl_egl_surface, tsq_err);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
return NULL;
}
tbm_surface_internal_ref(tbm_surface);
- /* If twe_surface_get_buffer_release_fence_fd return -1,
+ bo_name = _get_tbm_surface_bo_name(tbm_surface);
+
+ wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
+
+ /* If wl_egl_buffer->release_fence_fd is -1,
* the tbm_surface can be used immediately.
* If not, user(EGL) have to wait until signaled. */
- if (sync_fence) {
- *sync_fence = twe_surface_get_buffer_release_fence_fd(
- wl_egl_surface->twe_surface, tbm_surface);
+ if (release_fence && wl_egl_surface->surface_sync) {
+ *release_fence = wl_egl_buffer->release_fence_fd;
+ TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)",
+ wl_egl_surface, wl_egl_buffer, *release_fence);
}
- bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
-
if (surface->is_frontbuffer_mode && is_activated)
surface->frontbuffer = tbm_surface;
TRACE_MARK("[DEQ][NEW]BO_NAME:%d", bo_name);
TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
- TPL_LOG_T("WL_EGL", "[DEQ][N] tbm_surface(%p) bo(%d) fence(%d)",
- tbm_surface, bo_name, sync_fence ? *sync_fence : -1);
+ TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ tbm_surface, bo_name, release_fence ? *release_fence : -1);
- if (lock_ret == TPL_ERROR_NONE)
- twe_display_unlock(wl_egl_display->twe_display);
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
return tbm_surface;
}
-void
-__tpl_wl_egl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
+static tpl_result_t
+__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface)
{
- tpl_wl_egl_surface_t *wl_egl_surface =
- (tpl_wl_egl_surface_t *)surface->backend.data;
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->backend.data);
- if (width)
- *width = tbm_surface_queue_get_width(wl_egl_surface->tbm_queue);
- if (height)
- *height = tbm_surface_queue_get_height(wl_egl_surface->tbm_queue);
-}
+ tpl_wl_egl_surface_t *wl_egl_surface =
+ (tpl_wl_egl_surface_t *)surface->backend.data;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ if (!tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
-tpl_bool_t
-__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
-{
- struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
- TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_native_dpy, TPL_FALSE);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ /* Stop tracking of this canceled tbm_surface */
+ __tpl_list_remove_data(wl_egl_surface->in_use_buffers,
+ (void *)tbm_surface, TPL_FIRST, NULL);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
- is a memory address pointing the structure of wl_display_interface. */
- if (wl_egl_native_dpy == &wl_display_interface)
- return TPL_TRUE;
+ tbm_surface_internal_unref(tbm_surface);
- if (strncmp(wl_egl_native_dpy->name, wl_display_interface.name,
- strlen(wl_display_interface.name)) == 0) {
- return TPL_TRUE;
+ tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to release tbm_surface(%p) surface(%p)",
+ tbm_surface, surface);
+ return TPL_ERROR_INVALID_OPERATION;
}
- return TPL_FALSE;
+ TPL_LOG_T("WL_EGL", "[CANCEL BUFFER] wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
+
+ return TPL_ERROR_NONE;
}
-void
-__tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend)
+static tpl_result_t
+__tpl_wl_egl_surface_enqueue_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface,
+ int num_rects, const int *rects, int32_t acquire_fence)
{
- TPL_ASSERT(backend);
-
- backend->type = TPL_BACKEND_WAYLAND_THREAD;
- backend->data = NULL;
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
+ TPL_ASSERT(surface->backend.data);
+ TPL_ASSERT(tbm_surface);
+ TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
- backend->init = __tpl_wl_egl_display_init;
- backend->fini = __tpl_wl_egl_display_fini;
- backend->query_config = __tpl_wl_egl_display_query_config;
- backend->filter_config = __tpl_wl_egl_display_filter_config;
- backend->get_window_info = __tpl_wl_egl_display_get_window_info;
- backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
- backend->get_buffer_from_native_pixmap =
- __tpl_wl_egl_display_get_buffer_from_native_pixmap;
-}
+ tpl_wl_egl_surface_t *wl_egl_surface =
+ (tpl_wl_egl_surface_t *) surface->backend.data;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tpl_result_t ret = TPL_ERROR_NONE;
+ int bo_name = -1;
-void
-__tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
+ if (!tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
+ tbm_surface);
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ bo_name = _get_tbm_surface_bo_name(tbm_surface);
+
+ TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
+
+ TPL_LOG_T("WL_EGL",
+ "[ENQ] wl_egl_surface(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ wl_egl_surface, tbm_surface, bo_name, acquire_fence);
+
+ wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+
+ /* If there are received region information, save it to wl_egl_buffer */
+ if (num_rects && rects) {
+ if (wl_egl_buffer->rects != NULL) {
+ free(wl_egl_buffer->rects);
+ wl_egl_buffer->rects = NULL;
+ wl_egl_buffer->num_rects = 0;
+ }
+
+ wl_egl_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
+ wl_egl_buffer->num_rects = num_rects;
+
+ if (!wl_egl_buffer->rects) {
+ TPL_ERR("Failed to allocate memory fo damage rects info.");
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+
+ memcpy((char *)wl_egl_buffer->rects, (char *)rects, sizeof(int) * 4 * num_rects);
+ }
+
+ if (!wl_egl_surface->need_to_enqueue ||
+ !wl_egl_buffer->need_to_commit) {
+ TPL_WARN("WL_EGL",
+ "[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
+ ((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ return TPL_ERROR_NONE;
+ }
+
+ /* In frontbuffer mode, will skip tbm_surface_queue_enqueue, acquire, and
+ * commit if surface->frontbuffer that is already set and the tbm_surface
+ * client want to enqueue are the same.
+ */
+ if (surface->is_frontbuffer_mode) {
+ /* The first buffer to be activated in frontbuffer mode must be
+ * committed. Subsequence frames do not need to be committed because
+ * the buffer is already displayed.
+ */
+ if (surface->frontbuffer == tbm_surface)
+ wl_egl_surface->need_to_enqueue = TPL_FALSE;
+
+ if (acquire_fence != -1) {
+ close(acquire_fence);
+ acquire_fence = -1;
+ }
+ }
+
+ if (wl_egl_buffer->acquire_fence_fd != -1)
+ close(wl_egl_buffer->acquire_fence_fd);
+
+ wl_egl_buffer->acquire_fence_fd = acquire_fence;
+
+ tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ tbm_surface_internal_unref(tbm_surface);
+ TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d",
+ tbm_surface, wl_egl_surface, tsq_err);
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ tbm_surface_internal_unref(tbm_surface);
+
+ TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+
+ return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+ tbm_surface_h tbm_surface = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ tpl_bool_t ready_to_commit = TPL_FALSE;
+
+ while (tbm_surface_queue_can_acquire(wl_egl_surface->tbm_queue, 0)) {
+ tsq_err = tbm_surface_queue_acquire(wl_egl_surface->tbm_queue,
+ &tbm_surface);
+ if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to acquire from tbm_queue(%p)",
+ wl_egl_surface->tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ tbm_surface_internal_ref(tbm_surface);
+
+ wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
+ "wl_egl_buffer sould be not NULL");
+
+ if (wl_egl_buffer->wl_buffer == NULL) {
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ wl_egl_buffer->wl_buffer =
+ (struct wl_proxy *)wayland_tbm_client_create_buffer(
+ wl_egl_display->wl_tbm_client, tbm_surface);
+
+ if (!wl_egl_buffer->wl_buffer) {
+ TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
+ wl_egl_display->wl_tbm_client, tbm_surface);
+ }
+ }
+
+ if (wl_egl_buffer->acquire_fence_fd != -1) {
+ if (wl_egl_surface->surface_sync)
+ ready_to_commit = TPL_TRUE;
+ else {
+ if (wl_egl_buffer->waiting_source) {
+ tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
+ wl_egl_buffer->waiting_source = NULL;
+ }
+
+ wl_egl_buffer->waiting_source =
+ tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
+ wl_egl_buffer->acquire_fence_fd, buffer_funcs,
+ SOURCE_TYPE_DISPOSABLE);
+
+ __tpl_list_push_back(wl_egl_surface->fence_waiting_buffers, tbm_surface);
+
+ TRACE_ASYNC_BEGIN(wl_egl_buffer, "FENCE WAIT fd(%d)",
+ wl_egl_buffer->acquire_fence_fd);
+
+ ready_to_commit = TPL_FALSE;
+ }
+ }
+
+ if (ready_to_commit) {
+ if (wl_egl_surface->vblank_done)
+ ready_to_commit = TPL_TRUE;
+ else {
+ __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, tbm_surface);
+ ready_to_commit = TPL_FALSE;
+ }
+ }
+
+ if (ready_to_commit)
+ _thread_wl_surface_commit(wl_egl_surface, tbm_surface);
+ }
+
+ return TPL_ERROR_NONE;
+}
+
+static const struct wl_buffer_listener wl_buffer_release_listener = {
+ (void *)__cb_wl_buffer_release,
+};
+
+static void
+__cb_presentation_feedback_sync_output(void *data,
+ struct wp_presentation_feedback *presentation_feedback,
+ struct wl_output *output)
+{
+ TPL_IGNORE(data);
+ TPL_IGNORE(presentation_feedback);
+ TPL_IGNORE(output);
+ /* Nothing to do */
+}
+
+static void
+__cb_presentation_feedback_presented(void *data,
+ struct wp_presentation_feedback *presentation_feedback,
+ uint32_t tv_sec_hi,
+ uint32_t tv_sec_lo,
+ uint32_t tv_nsec,
+ uint32_t refresh_nsec,
+ uint32_t seq_hi,
+ uint32_t seq_lo,
+ uint32_t flags)
+{
+ TPL_IGNORE(tv_sec_hi);
+ TPL_IGNORE(tv_sec_lo);
+ TPL_IGNORE(tv_nsec);
+ TPL_IGNORE(refresh_nsec);
+ TPL_IGNORE(seq_hi);
+ TPL_IGNORE(seq_lo);
+ TPL_IGNORE(flags);
+
+ tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
+
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+
+ TPL_DEBUG("[FEEDBACK][PRESENTED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_surface, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ if (wl_egl_buffer->presentation_sync_fd != -1) {
+ int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
+ if (ret == -1) {
+ TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+ wl_egl_buffer->presentation_sync_fd);
+ }
+
+ TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd,
+ "[PRESENTATION_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ close(wl_egl_buffer->presentation_sync_fd);
+ wl_egl_buffer->presentation_sync_fd = -1;
+ }
+
+ if (wl_egl_buffer->presentation_feedback)
+ wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback);
+
+ wl_egl_buffer->presentation_feedback = NULL;
+
+ __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface,
+ TPL_FIRST, NULL);
+
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+}
+
+static void
+__cb_presentation_feedback_discarded(void *data,
+ struct wp_presentation_feedback *presentation_feedback)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
+
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+
+ TPL_DEBUG("[FEEDBACK][DISCARDED] wl_egl_surface(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_surface, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ if (wl_egl_buffer->presentation_sync_fd != -1) {
+ int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
+ if (ret == -1) {
+ TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+ wl_egl_buffer->presentation_sync_fd);
+ }
+
+ TRACE_ASYNC_END(wl_egl_buffer->presentation_sync_fd,
+ "[PRESENTATION_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ close(wl_egl_buffer->presentation_sync_fd);
+ wl_egl_buffer->presentation_sync_fd = -1;
+ }
+
+ if (wl_egl_buffer->presentation_feedback)
+ wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback);
+
+ wl_egl_buffer->presentation_feedback = NULL;
+
+ __tpl_list_remove_data(wl_egl_surface->presentation_feedbacks, tbm_surface,
+ TPL_FIRST, NULL);
+
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+}
+
+static const struct wp_presentation_feedback_listener feedback_listener = {
+ __cb_presentation_feedback_sync_output, /* sync_output feedback -*/
+ __cb_presentation_feedback_presented,
+ __cb_presentation_feedback_discarded
+};
+
+static void
+_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
+ tbm_surface_h tbm_surface)
+{
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ struct wl_surface *wl_surface = wl_egl_surface->wl_surface;
+ struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
+ uint32_t version;
+
+ wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
+ "wl_egl_buffer sould be not NULL");
+
+ if (wl_egl_buffer->wl_buffer == NULL) {
+ wl_egl_buffer->wl_buffer =
+ (struct wl_proxy *)wayland_tbm_client_create_buffer(
+ wl_egl_display->wl_tbm_client, tbm_surface);
+ }
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
+ "[FATAL] Failed to create wl_buffer");
+
+ wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer,
+ &wl_buffer_release_listener, wl_egl_buffer);
+
+ version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
+
+ tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
+ if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) {
+ wl_egl_buffer->presentation_feedback =
+ wp_presentation_feedback(wl_egl_display->presentation,
+ wl_surface);
+ wp_presentation_feedback_add_listener(wl_egl_buffer->presentation_feedback,
+ &feedback_listener, wl_egl_buffer);
+ __tpl_list_push_back(wl_egl_surface->presentation_feedbacks, tbm_surface);
+ TRACE_ASYNC_BEGIN(wl_egl_buffer->presentation_sync_fd,
+ "[PRESENTATION_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ }
+ tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+
+ if (wl_egl_buffer->w_rotated == TPL_TRUE) {
+ wayland_tbm_client_set_buffer_transform(
+ wl_egl_display->wl_tbm_client,
+ (void *)wl_egl_buffer->wl_buffer,
+ wl_egl_buffer->w_transform);
+ wl_egl_buffer->w_rotated = TPL_FALSE;
+ }
+
+ if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) {
+ wl_egl_surface->latest_transform = wl_egl_buffer->transform;
+ wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
+ }
+
+ if (wl_egl_window) {
+ wl_egl_window->attached_width = wl_egl_buffer->width;
+ wl_egl_window->attached_height = wl_egl_buffer->height;
+ }
+
+ wl_surface_attach(wl_surface, (void *)wl_egl_buffer->wl_buffer,
+ wl_egl_buffer->dx, wl_egl_buffer->dy);
+
+ if (wl_egl_buffer->num_rects < 1 || wl_egl_buffer->rects == NULL) {
+ if (version < 4) {
+ wl_surface_damage(wl_surface,
+ wl_egl_buffer->dx, wl_egl_buffer->dy,
+ wl_egl_buffer->width, wl_egl_buffer->height);
+ } else {
+ wl_surface_damage_buffer(wl_surface,
+ 0, 0,
+ wl_egl_buffer->width, wl_egl_buffer->height);
+ }
+ } else {
+ int i;
+ for (i = 0; i < wl_egl_buffer->num_rects; i++) {
+ int inverted_y =
+ wl_egl_buffer->height - (wl_egl_buffer->rects[i * 4 + 1] +
+ wl_egl_buffer->rects[i * 4 + 3]);
+ if (version < 4) {
+ wl_surface_damage(wl_surface,
+ wl_egl_buffer->rects[i * 4 + 0],
+ inverted_y,
+ wl_egl_buffer->rects[i * 4 + 2],
+ wl_egl_buffer->rects[i * 4 + 3]);
+ } else {
+ wl_surface_damage_buffer(wl_surface,
+ wl_egl_buffer->rects[i * 4 + 0],
+ inverted_y,
+ wl_egl_buffer->rects[i * 4 + 2],
+ wl_egl_buffer->rects[i * 4 + 3]);
+ }
+ }
+ }
+
+ wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client,
+ (void *)wl_egl_buffer->wl_buffer,
+ wl_egl_buffer->serial);
+
+ wl_egl_buffer->need_to_release = TPL_TRUE;
+
+ if (wl_egl_display->use_explicit_sync &&
+ wl_egl_surface->surface_sync) {
+
+ zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync,
+ wl_egl_buffer->acquire_fence_fd);
+ TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_egl_surface(%p) tbm_surface(%p) acquire_fence(%d)",
+ wl_egl_surface, tbm_surface, wl_egl_buffer->acquire_fence_fd);
+ close(wl_egl_buffer->acquire_fence_fd);
+ wl_egl_buffer->acquire_fence_fd = -1;
+
+ wl_egl_buffer->buffer_release =
+ zwp_linux_surface_synchronization_v1_get_release(wl_egl_surface->surface_sync);
+ if (!wl_egl_buffer->buffer_release) {
+ TPL_ERR("Failed to get buffer_release. wl_egl_surface(%p)", wl_egl_surface);
+ } else {
+ zwp_linux_buffer_release_v1_add_listener(
+ wl_egl_buffer->buffer_release, &zwp_release_listner, wl_egl_buffer);
+ TPL_DEBUG("add explicit_sync_release_listener.");
+ }
+ }
+
+ wl_surface_commit(wl_surface);
+
+ wl_display_flush(wl_egl_display->wl_display);
+
+ TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ wl_egl_buffer->need_to_commit = TPL_FALSE;
+
+ TPL_LOG_T("WL_EGL", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_buffer->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ if (wl_egl_display->tdm_initialized &&
+ _thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
+ TPL_ERR("Failed to set wait vblank.");
+
+ if (wl_egl_surface->committed_buffers) {
+ __tpl_list_push_back(wl_egl_surface->committed_buffers, tbm_surface);
+ }
+
+ tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
+
+ if (wl_egl_buffer->commit_sync_fd != -1) {
+ int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
+ if (ret == -1) {
+ TPL_ERR("Failed to send commit_sync signal to fd(%d)", wl_egl_buffer->commit_sync_fd);
+ }
+
+ TRACE_ASYNC_END(wl_egl_buffer->commit_sync_fd, "[COMMIT_SYNC] bo(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ TPL_DEBUG("[COMMIT_SYNC][SEND] wl_egl_surface(%p) commit_sync_fd(%d)",
+ wl_egl_surface, wl_egl_buffer->commit_sync_fd);
+
+ close(wl_egl_buffer->commit_sync_fd);
+ wl_egl_buffer->commit_sync_fd = -1;
+ }
+
+ tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
+}
+
+static tpl_bool_t
+__thread_func_waiting_source_dispatch(tpl_gsource *gsource, uint64_t message)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer =
+ (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
+
+ wl_egl_surface->render_done_cnt++;
+
+ TRACE_ASYNC_END(wl_egl_buffer, "FENCE WAIT fd(%d)",
+ wl_egl_buffer->acquire_fence_fd);
+
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ __tpl_list_remove_data(wl_egl_surface->fence_waiting_buffers,
+ (void *)tbm_surface, TPL_FIRST, NULL);
+
+ if (wl_egl_surface->vblank_done)
+ _thread_wl_surface_commit(wl_egl_surface, tbm_surface);
+ else
+ __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers,
+ tbm_surface);
+
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+ /* This source is used only once and does not allow reuse.
+ * So finalize will be executed immediately. */
+ g_source_remove_unix_fd(&wait_source->gsource, wait_source->tag);
+ g_source_destroy(&wait_source->gsource);
+ g_source_unref(&wait_source->gsource);
+
+ return TPL_FALSE;
+}
+
+static void
+__thread_func_waiting_source_finalize(tpl_gsource *gsource)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer =
+ (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
+
+ TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
+ wl_egl_buffer, wl_egl_buffer->waiting_source,
+ wl_egl_buffer->acquire_fence_fd);
+
+ close(wl_egl_buffer->acquire_fence_fd);
+ wl_egl_buffer->acquire_fence_fd = -1;
+ wl_egl_buffer->waiting_source = NULL;
+}
+
+static tpl_gsource_functions buffer_funcs = {
+ .prepare = NULL,
+ .check = NULL,
+ .dispatch = __thread_func_waiting_source_dispatch,
+ .finalize = __thread_func_waiting_source_finalize,
+};
+
+static tpl_result_t
+_thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+ tdm_error tdm_err = TDM_ERROR_NONE;
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+
+ if (wl_egl_surface->vblank == NULL) {
+ wl_egl_surface->vblank =
+ _thread_create_tdm_client_vblank(wl_egl_display->tdm_client);
+ if (!wl_egl_surface->vblank) {
+ TPL_WARN("Failed to create vblank. wl_egl_surface(%p)",
+ wl_egl_surface);
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank,
+ wl_egl_surface->post_interval,
+ __cb_tdm_client_vblank,
+ (void *)wl_egl_surface);
+
+ if (tdm_err == TDM_ERROR_NONE) {
+ wl_egl_surface->vblank_done = TPL_FALSE;
+ TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK");
+ } else {
+ TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ return TPL_ERROR_NONE;
+}
+
+static int
+_write_to_eventfd(int eventfd)
+{
+ uint64_t value = 1;
+ int ret;
+
+ if (eventfd == -1) {
+ TPL_ERR("Invalid fd(-1)");
+ return -1;
+ }
+
+ ret = write(eventfd, &value, sizeof(uint64_t));
+ if (ret == -1) {
+ TPL_ERR("failed to write to fd(%d)", eventfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+void
+__tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend)
+{
+ TPL_ASSERT(backend);
+
+ backend->type = TPL_BACKEND_WAYLAND_THREAD;
+ backend->data = NULL;
+
+ backend->init = __tpl_wl_egl_display_init;
+ backend->fini = __tpl_wl_egl_display_fini;
+ backend->query_config = __tpl_wl_egl_display_query_config;
+ backend->filter_config = __tpl_wl_egl_display_filter_config;
+ backend->get_window_info = __tpl_wl_egl_display_get_window_info;
+ backend->get_pixmap_info = __tpl_wl_egl_display_get_pixmap_info;
+ backend->get_buffer_from_native_pixmap =
+ __tpl_wl_egl_display_get_buffer_from_native_pixmap;
+}
+
+void
+__tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
{
TPL_ASSERT(backend);
free(tizen_private);
tizen_private = NULL;
- tpl_gmutex_unlock(&surf_source->surf_mutex);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
}
surface->reset_cb(surface->reset_data);
}
-static void __cb_tbm_queue_trace_callback(tbm_surface_queue_h tbm_queue,
- tbm_surface_h tbm_surface,
- tbm_surface_queue_trace trace,
- void *data)
-{
- tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
-
- /* TODO */
-}
-
static void
__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
void *data)
}
/* -- END -- tbm_surface_queue callback funstions */
+
+/* tdm_client vblank callback function */
+static void
+__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
+ unsigned int sequence, unsigned int tv_sec,
+ unsigned int tv_usec, void *user_data)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data;
+ tbm_surface_h tbm_surface = NULL;
+
+ TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK");
+
+ if (error == TDM_ERROR_TIMEOUT)
+ TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_egl_surface(%p)",
+ wl_egl_surface);
+
+ wl_egl_surface->vblank_done = TPL_TRUE;
+
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ tbm_surface = (tbm_surface_h)__tpl_list_pop_front(
+ wl_egl_surface->vblank_waiting_buffers,
+ NULL);
+ _thread_wl_surface_commit(wl_egl_surface, tbm_surface);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+}
+
+static void
+__cb_buffer_fenced_release(void *data,
+ struct zwp_linux_buffer_release_v1 *release, int32_t fence)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+ tbm_surface_h tbm_surface = NULL;
+
+ if (wl_egl_buffer)
+ tbm_surface = wl_egl_buffer->tbm_surface;
+
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ if (wl_egl_buffer->need_to_release) {
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_queue_error_e tsq_err;
+
+ if (wl_egl_surface->committed_buffers) {
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ __tpl_list_remove_data(wl_egl_surface->committed_buffers,
+ (void *)tbm_surface,
+ TPL_FIRST, NULL);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ }
+
+ wl_egl_buffer->need_to_release = TPL_FALSE;
+
+ zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
+ wl_egl_buffer->buffer_release = NULL;
+
+ wl_egl_buffer->release_fence_fd = fence;
+
+ TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
+ _get_tbm_surface_bo_name(tbm_surface),
+ fence);
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ TPL_LOG_T("WL_EGL",
+ "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ wl_egl_buffer->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface),
+ fence);
+
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+ tbm_surface_internal_unref(tbm_surface);
+ }
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+ }
+}
+
+static void
+__cb_buffer_immediate_release(void *data,
+ struct zwp_linux_buffer_release_v1 *release)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+ tbm_surface_h tbm_surface = NULL;
+
+ if (wl_egl_buffer)
+ tbm_surface = wl_egl_buffer->tbm_surface;
+
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ if (wl_egl_buffer->need_to_release) {
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_queue_error_e tsq_err;
+
+ if (wl_egl_surface->committed_buffers) {
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ __tpl_list_remove_data(wl_egl_surface->committed_buffers,
+ (void *)tbm_surface,
+ TPL_FIRST, NULL);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ }
+
+ wl_egl_buffer->need_to_release = TPL_FALSE;
+
+ zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
+ wl_egl_buffer->buffer_release = NULL;
+
+ wl_egl_buffer->release_fence_fd = -1;
+
+ TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ TPL_LOG_T("WL_EGL",
+ "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_buffer->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+ tbm_surface_internal_unref(tbm_surface);
+ }
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+ }
+}
+
+static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
+ __cb_buffer_fenced_release,
+ __cb_buffer_immediate_release,
+};
+
+static void
+__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
+{
+ tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
+ tbm_surface_h tbm_surface = NULL;
+
+ if (wl_egl_buffer)
+ tbm_surface = wl_egl_buffer->tbm_surface;
+
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ if (wl_egl_buffer->need_to_release) {
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_queue_error_e tsq_err;
+
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+ if (wl_egl_surface->committed_buffers) {
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ __tpl_list_remove_data(wl_egl_surface->committed_buffers,
+ (void *)tbm_surface,
+ TPL_FIRST, NULL);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ }
+
+ wl_egl_buffer->need_to_release = TPL_FALSE;
+
+ TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
+ TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_buffer->wl_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ tbm_surface_internal_unref(tbm_surface);
+ }
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+ }
+}
+
+void
+__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
+{
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->disp_source;
+
+ TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
+ wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);
+
+ wl_display_flush(wl_egl_display->wl_display);
+
+ if (wl_egl_buffer->wl_buffer)
+ wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
+ (void *)wl_egl_buffer->wl_buffer);
+
+ if (wl_egl_buffer->commit_sync_fd != -1) {
+ int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
+ if (ret == -1)
+ TPL_ERR("Failed to send commit_sync signal to fd(%d)",
+ wl_egl_buffer->commit_sync_fd);
+ close(wl_egl_buffer->commit_sync_fd);
+ wl_egl_buffer->commit_sync_fd = -1;
+ }
+
+ if (wl_egl_buffer->presentation_sync_fd != -1) {
+ int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
+ if (ret == -1)
+ TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
+ wl_egl_buffer->presentation_sync_fd);
+ close(wl_egl_buffer->presentation_sync_fd);
+ wl_egl_buffer->presentation_sync_fd = -1;
+
+ if (wl_egl_buffer->presentation_feedback)
+ wp_presentation_feedback_destroy(wl_egl_buffer->presentation_feedback);
+ wl_egl_buffer->presentation_feedback = NULL;
+ }
+
+ if (wl_egl_buffer->rects) {
+ free(wl_egl_buffer->rects);
+ wl_egl_buffer->rects = NULL;
+ wl_egl_buffer->num_rects = 0;
+ }
+
+ wl_egl_buffer->tbm_surface = NULL;
+
+ free(wl_egl_buffer);
+}
+
+static void
+__cb_buffer_remove_from_list(void *data)
+{
+ tbm_surface_h tbm_surface = (tbm_surface_h)data;
+
+ if (tbm_surface && tbm_surface_internal_is_valid(tbm_surface))
+ tbm_surface_internal_unref(tbm_surface);
+}
+
+static int
+_get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
+{
+ return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+}
+
+static void
+_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+ int count = 0;
+ int idx = 0;
+ tpl_list_node_t *node = NULL;
+ tbm_surface_h tbm_surface = NULL;
+
+ /* vblank waiting list */
+ count = __tpl_list_get_count(wl_egl_surface->vblank_waiting_buffers);
+ TPL_DEBUG("VBLANK WAITING BUFFERS | wl_egl_surface(%p) list(%p) count(%d)",
+ wl_egl_surface, wl_egl_surface->vblank_waiting_buffers, count);
+
+ while ((!node &&
+ (node = __tpl_list_get_front_node(wl_egl_surface->vblank_waiting_buffers))) ||
+ (node && (node = __tpl_list_node_next(node)))) {
+ tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
+ TPL_DEBUG("VBLANK WAITING BUFFERS | %d | tbm_surface(%p) bo(%d)",
+ idx, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+ idx++;
+ }
+
+ idx = 0;
+ node = NULL;
+
+ /* in use buffers list */
+ count = __tpl_list_get_count(wl_egl_surface->in_use_buffers);
+ TPL_DEBUG("DEQUEUED BUFFERS | wl_egl_surface(%p) list(%p) count(%d)",
+ wl_egl_surface, wl_egl_surface->in_use_buffers, count);
+
+ while ((!node &&
+ (node = __tpl_list_get_front_node(wl_egl_surface->in_use_buffers))) ||
+ (node && (node = __tpl_list_node_next(node)))) {
+ tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
+ TPL_DEBUG("DEQUEUED BUFFERS | %d | tbm_surface(%p) bo(%d)",
+ idx, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+ idx++;
+ }
+
+ idx = 0;
+ node = NULL;
+
+ /* committed buffers list */
+ count = __tpl_list_get_count(wl_egl_surface->committed_buffers);
+ TPL_DEBUG("COMMITTED BUFFERS | wl_egl_surface(%p) list(%p) count(%d)",
+ wl_egl_surface, wl_egl_surface->committed_buffers, count);
+
+ while ((!node &&
+ (node = __tpl_list_get_front_node(wl_egl_surface->committed_buffers))) ||
+ (node && (node = __tpl_list_node_next(node)))) {
+ tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
+ TPL_DEBUG("COMMITTED BUFFERS | %d | tbm_surface(%p) bo(%d)",
+ idx, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+ idx++;
+ }
+}