#include "wayland-egl-tizen/wayland-egl-tizen.h"
#include "wayland-egl-tizen/wayland-egl-tizen-priv.h"
+#ifndef TIZEN_FEATURE_ENABLE
+#define TIZEN_FEATURE_ENABLE 1
+#endif
+
+#if TIZEN_FEATURE_ENABLE
#include <tizen-surface-client-protocol.h>
#include <presentation-time-client-protocol.h>
#include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
+#endif
#include "tpl_utils_gthread.h"
#define KEY_WL_EGL_BUFFER (unsigned long)(&wl_egl_buffer_key)
/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
-#define CLIENT_QUEUE_SIZE 3
-#define BUFFER_ARRAY_SIZE (CLIENT_QUEUE_SIZE * 2)
+#define BUFFER_ARRAY_SIZE 9
typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
typedef struct _tpl_wl_egl_buffer tpl_wl_egl_buffer_t;
+typedef struct _surface_vblank tpl_surface_vblank_t;
struct _tpl_wl_egl_display {
tpl_gsource *disp_source;
struct wayland_tbm_client *wl_tbm_client;
int last_error; /* errno of the last wl_display error*/
- tpl_bool_t wl_initialized;
- tpl_bool_t tdm_initialized;
-
- tdm_client *tdm_client;
- tpl_gsource *tdm_source;
- int tdm_display_fd;
+ tpl_bool_t wl_initialized;
tpl_bool_t use_wait_vblank;
tpl_bool_t use_explicit_sync;
+ tpl_bool_t use_tss;
tpl_bool_t prepared;
-
+ /* To make sure that tpl_gsource has been successfully finalized. */
+ tpl_bool_t gsource_finalized;
+ tpl_gmutex disp_mutex;
+ tpl_gcond disp_cond;
+ struct {
+ tdm_client *tdm_client;
+ tpl_gsource *tdm_source;
+ int tdm_display_fd;
+ tpl_bool_t tdm_initialized;
+ tpl_list_t *surface_vblanks;
+
+ /* To make sure that tpl_gsource has been successfully finalized. */
+ tpl_bool_t gsource_finalized;
+ tpl_gmutex tdm_mutex;
+ tpl_gcond tdm_cond;
+ } tdm;
+
+#if TIZEN_FEATURE_ENABLE
struct tizen_surface_shm *tss; /* used for surface buffer_flush */
struct wp_presentation *presentation; /* for presentation feedback */
struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
+#endif
};
+typedef enum surf_message {
+ NONE_MESSAGE = 0,
+ INIT_SURFACE,
+ ACQUIRABLE,
+} surf_message;
+
struct _tpl_wl_egl_surface {
tpl_gsource *surf_source;
tbm_surface_queue_h tbm_queue;
+ int num_buffers;
struct wl_egl_window *wl_egl_window;
struct wl_surface *wl_surface;
+
+#if TIZEN_FEATURE_ENABLE
struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */
+#endif
- tdm_client_vblank *vblank;
+ tpl_surface_vblank_t *vblank;
/* surface information */
int render_done_cnt;
tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE];
int buffer_cnt; /* the number of using wl_egl_buffers */
tpl_gmutex buffers_mutex;
+ tbm_surface_h last_enq_buffer;
- tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
struct {
tpl_gmutex surf_mutex;
tpl_gcond surf_cond;
+ surf_message sent_message;
+
/* for waiting draw done */
tpl_bool_t use_render_done_fence;
tpl_bool_t is_activated;
tpl_bool_t prerotation_capability;
tpl_bool_t vblank_done;
tpl_bool_t set_serial_is_used;
+ tpl_bool_t initialized_in_thread;
+
+ /* To make sure that tpl_gsource has been successfully finalized. */
+ tpl_bool_t gsource_finalized;
+};
+
+struct _surface_vblank {
+ tdm_client_vblank *tdm_vblank;
+ tpl_wl_egl_surface_t *wl_egl_surface;
+ tpl_list_t *waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
+ tpl_gmutex mutex;
};
typedef enum buffer_status {
/* for checking draw done */
tpl_bool_t draw_done;
-
+#if TIZEN_FEATURE_ENABLE
/* to get release event via zwp_linux_buffer_release_v1 */
struct zwp_linux_buffer_release_v1 *buffer_release;
-
+#endif
/* each buffers own its release_fence_fd, until it passes ownership
* to it to EGL */
int32_t release_fence_fd;
tpl_wl_egl_surface_t *wl_egl_surface;
};
+#if TIZEN_FEATURE_ENABLE
struct pst_feedback {
/* to get presentation feedback from display server */
struct wp_presentation_feedback *presentation_feedback;
tpl_wl_egl_surface_t *wl_egl_surface;
};
+#endif
+
+static const struct wl_buffer_listener wl_buffer_release_listener;
static int
_get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
static void
_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface);
+static tpl_bool_t
+_check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface);
static void
__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer);
static tpl_wl_egl_buffer_t *
static void
_thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
tpl_wl_egl_buffer_t *wl_egl_buffer);
+static void
+__cb_surface_vblank_free(void *data);
+
+static struct tizen_private *
+tizen_private_create()
+{
+ struct tizen_private *private = NULL;
+ private = (struct tizen_private *)calloc(1, sizeof(struct tizen_private));
+ if (private) {
+ private->magic = WL_EGL_TIZEN_MAGIC;
+ private->rotation = 0;
+ private->frontbuffer_mode = 0;
+ private->transform = 0;
+ private->window_transform = 0;
+ private->serial = 0;
+
+ private->data = NULL;
+ private->rotate_callback = NULL;
+ private->get_rotation_capability = NULL;
+ private->set_window_serial_callback = NULL;
+ private->set_frontbuffer_callback = NULL;
+ private->create_commit_sync_fd = NULL;
+ private->create_presentation_sync_fd = NULL;
+ private->merge_sync_fds = NULL;
+ }
+
+ return private;
+}
static tpl_bool_t
_check_native_handle_is_wl_display(tpl_handle_t display)
return TPL_FALSE;
}
- tdm_err = tdm_client_handle_events(wl_egl_display->tdm_client);
+ tdm_err = tdm_client_handle_events(wl_egl_display->tdm.tdm_client);
/* If an error occurs in tdm_client_handle_events, it cannot be recovered.
* When tdm_source is no longer available due to an unexpected situation,
tpl_gsource_destroy(gsource, TPL_FALSE);
- wl_egl_display->tdm_source = NULL;
+ wl_egl_display->tdm.tdm_source = NULL;
return TPL_FALSE;
}
wl_egl_display = (tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
- TPL_LOG_T("WL_EGL", "tdm_destroy| wl_egl_display(%p) tdm_client(%p)",
- wl_egl_display, wl_egl_display->tdm_client);
+ tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
+
+ TPL_INFO("[TDM_CLIENT_FINI]",
+ "wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
+ wl_egl_display, wl_egl_display->tdm.tdm_client,
+ wl_egl_display->tdm.tdm_display_fd);
+
+ if (wl_egl_display->tdm.tdm_client) {
+
+ if (wl_egl_display->tdm.surface_vblanks) {
+ __tpl_list_free(wl_egl_display->tdm.surface_vblanks,
+ __cb_surface_vblank_free);
+ wl_egl_display->tdm.surface_vblanks = NULL;
+ }
- if (wl_egl_display->tdm_client) {
- tdm_client_destroy(wl_egl_display->tdm_client);
- wl_egl_display->tdm_client = NULL;
- wl_egl_display->tdm_display_fd = -1;
+ tdm_client_destroy(wl_egl_display->tdm.tdm_client);
+ wl_egl_display->tdm.tdm_client = NULL;
+ wl_egl_display->tdm.tdm_display_fd = -1;
+ wl_egl_display->tdm.tdm_source = NULL;
}
- wl_egl_display->tdm_initialized = TPL_FALSE;
+ wl_egl_display->use_wait_vblank = TPL_FALSE;
+ wl_egl_display->tdm.tdm_initialized = TPL_FALSE;
+ wl_egl_display->tdm.gsource_finalized = TPL_TRUE;
+
+ tpl_gcond_signal(&wl_egl_display->tdm.tdm_cond);
+ tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
}
static tpl_gsource_functions tdm_funcs = {
return TPL_ERROR_INVALID_OPERATION;
}
- wl_egl_display->tdm_display_fd = tdm_display_fd;
- wl_egl_display->tdm_client = tdm_client;
- wl_egl_display->tdm_source = NULL;
- wl_egl_display->tdm_initialized = TPL_TRUE;
+ wl_egl_display->tdm.tdm_display_fd = tdm_display_fd;
+ wl_egl_display->tdm.tdm_client = tdm_client;
+ wl_egl_display->tdm.tdm_source = NULL;
+ wl_egl_display->tdm.tdm_initialized = TPL_TRUE;
+ wl_egl_display->tdm.surface_vblanks = __tpl_list_alloc();
TPL_INFO("[TDM_CLIENT_INIT]",
"wl_egl_display(%p) tdm_client(%p) tdm_display_fd(%d)",
#define IMPL_TIZEN_SURFACE_SHM_VERSION 2
+
static void
__cb_wl_resistry_global_callback(void *data, struct wl_registry *wl_registry,
uint32_t name, const char *interface,
uint32_t version)
{
+#if TIZEN_FEATURE_ENABLE
tpl_wl_egl_display_t *wl_egl_display = (tpl_wl_egl_display_t *)data;
if (!strcmp(interface, "tizen_surface_shm")) {
&tizen_surface_shm_interface,
((version < IMPL_TIZEN_SURFACE_SHM_VERSION) ?
version : IMPL_TIZEN_SURFACE_SHM_VERSION));
+ wl_egl_display->use_tss = TPL_TRUE;
} else if (!strcmp(interface, wp_presentation_interface.name)) {
wl_egl_display->presentation =
wl_registry_bind(wl_registry,
TPL_DEBUG("bind zwp_linux_explicit_synchronization_v1_interface");
}
}
+#endif
}
static void
goto fini;
}
+#if TIZEN_FEATURE_ENABLE
/* set tizen_surface_shm's queue as client's private queue */
if (wl_egl_display->tss) {
wl_proxy_set_queue((struct wl_proxy *)wl_egl_display->tss,
TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.",
wl_egl_display->explicit_sync);
}
-
+#endif
wl_egl_display->wl_initialized = TPL_TRUE;
TPL_INFO("[WAYLAND_INIT]",
"wl_egl_display(%p) wl_display(%p) wl_tbm_client(%p) event_queue(%p)",
wl_egl_display, wl_egl_display->wl_display,
wl_egl_display->wl_tbm_client, wl_egl_display->ev_queue);
+#if TIZEN_FEATURE_ENABLE
TPL_INFO("[WAYLAND_INIT]",
"tizen_surface_shm(%p) wp_presentation(%p) explicit_sync(%p)",
wl_egl_display->tss, wl_egl_display->presentation,
wl_egl_display->explicit_sync);
-
+#endif
fini:
if (display_wrapper)
wl_proxy_wrapper_destroy(display_wrapper);
wl_egl_display->prepared = TPL_FALSE;
}
- if (wl_display_dispatch_queue_pending(wl_egl_display->wl_display,
- wl_egl_display->ev_queue) == -1) {
- _wl_display_print_err(wl_egl_display, "dispatch_queue_pending");
+ if (wl_display_roundtrip_queue(wl_egl_display->wl_display,
+ wl_egl_display->ev_queue) == -1) {
+ _wl_display_print_err(wl_egl_display, "roundtrip_queue");
}
+#if TIZEN_FEATURE_ENABLE
if (wl_egl_display->tss) {
TPL_INFO("[TIZEN_SURFACE_SHM_DESTROY]",
"wl_egl_display(%p) tizen_surface_shm(%p) fini.",
zwp_linux_explicit_synchronization_v1_destroy(wl_egl_display->explicit_sync);
wl_egl_display->explicit_sync = NULL;
}
-
+#endif
if (wl_egl_display->wl_tbm_client) {
struct wl_proxy *wl_tbm = NULL;
wl_event_queue_destroy(wl_egl_display->ev_queue);
+ wl_egl_display->ev_queue = NULL;
wl_egl_display->wl_initialized = TPL_FALSE;
TPL_INFO("[DISPLAY_FINI]", "wl_egl_display(%p) wl_display(%p)",
wl_egl_display, wl_egl_display->wl_display);
}
- if (_thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
+ if (wl_egl_display->use_wait_vblank &&
+ _thread_tdm_init(wl_egl_display) != TPL_ERROR_NONE) {
TPL_WARN("Failed to initialize tdm-client. TPL_WAIT_VLANK:DISABLED");
}
tpl_wl_egl_display_t *wl_egl_display =
(tpl_wl_egl_display_t *)tpl_gsource_get_data(gsource);
+ tpl_gmutex_lock(&wl_egl_display->disp_mutex);
+ TPL_DEBUG("[FINALIZE] wl_egl_display(%p) tpl_gsource(%p)",
+ wl_egl_display, gsource);
+
if (wl_egl_display->wl_initialized)
_thread_wl_display_fini(wl_egl_display);
- TPL_LOG_T("WL_EGL", "finalize| wl_egl_display(%p) tpl_gsource(%p)",
- wl_egl_display, gsource);
+ wl_egl_display->gsource_finalized = TPL_TRUE;
+
+ tpl_gcond_signal(&wl_egl_display->disp_cond);
+ tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
return;
}
display->backend.data = wl_egl_display;
display->bufmgr_fd = -1;
- wl_egl_display->tdm_initialized = TPL_FALSE;
+ wl_egl_display->tdm.tdm_initialized = TPL_FALSE;
+ wl_egl_display->tdm.tdm_client = NULL;
+ wl_egl_display->tdm.tdm_display_fd = -1;
+ wl_egl_display->tdm.tdm_source = NULL;
+
wl_egl_display->wl_initialized = TPL_FALSE;
wl_egl_display->ev_queue = NULL;
wl_egl_display->wl_display = (struct wl_display *)display->native_handle;
wl_egl_display->last_error = 0;
+ wl_egl_display->use_tss = TPL_FALSE;
wl_egl_display->use_explicit_sync = TPL_FALSE; // default disabled
wl_egl_display->prepared = TPL_FALSE;
+ wl_egl_display->gsource_finalized = TPL_FALSE;
+#if TIZEN_FEATURE_ENABLE
/* Wayland Interfaces */
wl_egl_display->tss = NULL;
wl_egl_display->presentation = NULL;
wl_egl_display->explicit_sync = NULL;
+#endif
wl_egl_display->wl_tbm_client = NULL;
wl_egl_display->use_wait_vblank = TPL_TRUE; // default enabled
tpl_gmutex_init(&wl_egl_display->wl_event_mutex);
+ tpl_gmutex_init(&wl_egl_display->disp_mutex);
+ tpl_gcond_init(&wl_egl_display->disp_cond);
+
/* Create gthread */
wl_egl_display->thread = tpl_gthread_create("wl_egl_thread",
(tpl_gthread_func)_thread_init,
goto free_display;
}
- wl_egl_display->tdm_source = tpl_gsource_create(wl_egl_display->thread,
- (void *)wl_egl_display,
- wl_egl_display->tdm_display_fd,
- &tdm_funcs, SOURCE_TYPE_NORMAL);
- if (!wl_egl_display->tdm_source) {
- TPL_ERR("Failed to create tdm_gsource\n");
- goto free_display;
+ if (wl_egl_display->use_wait_vblank &&
+ wl_egl_display->tdm.tdm_initialized) {
+ tpl_gmutex_init(&wl_egl_display->tdm.tdm_mutex);
+ tpl_gcond_init(&wl_egl_display->tdm.tdm_cond);
+ wl_egl_display->tdm.tdm_source = tpl_gsource_create(wl_egl_display->thread,
+ (void *)wl_egl_display,
+ wl_egl_display->tdm.tdm_display_fd,
+ &tdm_funcs, SOURCE_TYPE_NORMAL);
+ wl_egl_display->tdm.gsource_finalized = TPL_FALSE;
+ if (!wl_egl_display->tdm.tdm_source) {
+ TPL_ERR("Failed to create tdm_gsource\n");
+ goto free_display;
+ }
}
+ wl_egl_display->use_wait_vblank = (wl_egl_display->tdm.tdm_initialized &&
+ (wl_egl_display->tdm.tdm_source != NULL));
+
TPL_INFO("[DISPLAY_INIT]",
"wl_egl_display(%p) tpl_gthread(%p) wl_display(%p)",
wl_egl_display,
TPL_INFO("[DISPLAY_INIT]",
"USE_WAIT_VBLANK(%s) TIZEN_SURFACE_SHM(%s) USE_EXPLICIT_SYNC(%s)",
wl_egl_display->use_wait_vblank ? "TRUE" : "FALSE",
- wl_egl_display->tss ? "TRUE" : "FALSE",
+ wl_egl_display->use_tss ? "TRUE" : "FALSE",
wl_egl_display->use_explicit_sync ? "TRUE" : "FALSE");
return TPL_ERROR_NONE;
free_display:
- if (wl_egl_display->thread) {
- if (wl_egl_display->tdm_source)
- tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
- if (wl_egl_display->disp_source)
- tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
+ if (wl_egl_display->tdm.tdm_source) {
+ tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
+ // Send destroy mesage to thread
+ tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
+ while (!wl_egl_display->tdm.gsource_finalized) {
+ tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex);
+ }
+ tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
+ }
+ if (wl_egl_display->disp_source) {
+ tpl_gmutex_lock(&wl_egl_display->disp_mutex);
+ // Send destroy mesage to thread
+ tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
+ while (!wl_egl_display->gsource_finalized) {
+ tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex);
+ }
+ tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
+ }
+
+ if (wl_egl_display->thread) {
tpl_gthread_destroy(wl_egl_display->thread);
}
+ tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond);
+ tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex);
+ tpl_gcond_clear(&wl_egl_display->disp_cond);
+ tpl_gmutex_clear(&wl_egl_display->disp_mutex);
+
wl_egl_display->thread = NULL;
free(wl_egl_display);
wl_egl_display->thread,
wl_egl_display->wl_display);
- if (wl_egl_display->tdm_source && wl_egl_display->tdm_initialized) {
- tpl_gsource_destroy(wl_egl_display->tdm_source, TPL_TRUE);
- wl_egl_display->tdm_source = NULL;
+ if (wl_egl_display->tdm.tdm_source && wl_egl_display->tdm.tdm_initialized) {
+ /* This is a protection to prevent problems that arise in unexpected situations
+ * that g_cond_wait cannot work normally.
+ * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+ * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+ * */
+ tpl_gmutex_lock(&wl_egl_display->tdm.tdm_mutex);
+ // Send destroy mesage to thread
+ tpl_gsource_destroy(wl_egl_display->tdm.tdm_source, TPL_TRUE);
+ while (!wl_egl_display->tdm.gsource_finalized) {
+ tpl_gcond_wait(&wl_egl_display->tdm.tdm_cond, &wl_egl_display->tdm.tdm_mutex);
+ }
+ wl_egl_display->tdm.tdm_source = NULL;
+ tpl_gmutex_unlock(&wl_egl_display->tdm.tdm_mutex);
}
if (wl_egl_display->disp_source) {
+ tpl_gmutex_lock(&wl_egl_display->disp_mutex);
+ // Send destroy mesage to thread
tpl_gsource_destroy(wl_egl_display->disp_source, TPL_TRUE);
+ /* This is a protection to prevent problems that arise in unexpected situations
+ * that g_cond_wait cannot work normally.
+ * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+ * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+ * */
+ while (!wl_egl_display->gsource_finalized) {
+ tpl_gcond_wait(&wl_egl_display->disp_cond, &wl_egl_display->disp_mutex);
+ }
wl_egl_display->disp_source = NULL;
+ tpl_gmutex_unlock(&wl_egl_display->disp_mutex);
}
if (wl_egl_display->thread) {
wl_egl_display->thread = NULL;
}
+ tpl_gcond_clear(&wl_egl_display->tdm.tdm_cond);
+ tpl_gmutex_clear(&wl_egl_display->tdm.tdm_mutex);
+ tpl_gcond_clear(&wl_egl_display->disp_cond);
+ tpl_gmutex_clear(&wl_egl_display->disp_mutex);
+
tpl_gmutex_clear(&wl_egl_display->wl_event_mutex);
free(wl_egl_display);
}
tpl_bool_t
-__tpl_display_choose_backend_wl_egl_thread2(tpl_handle_t native_dpy)
+__tpl_display_choose_backend_wl_egl_thread(tpl_handle_t native_dpy)
{
struct wl_interface *wl_egl_native_dpy = *(void **) native_dpy;
return commit_sync_fd;
}
+#if TIZEN_FEATURE_ENABLE
static int
__cb_create_presentation_sync_fd(struct wl_egl_window *wl_egl_window, void *private)
{
TPL_INFO("[BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
wl_egl_surface, wl_egl_surface->tbm_queue);
+ _print_buffer_lists(wl_egl_surface);
+
tsq_err = tbm_surface_queue_flush(wl_egl_surface->tbm_queue);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
TPL_ERR("Failed to flush tbm_queue(%p)", wl_egl_surface->tbm_queue);
tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)data;
tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
- wl_egl_surface, wl_egl_surface->tbm_queue);
+ TPL_INFO("[FREE_BUFFER_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
+
+ _print_buffer_lists(wl_egl_surface);
tsq_err = tbm_surface_queue_free_flush(wl_egl_surface->tbm_queue);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
__cb_tss_flusher_free_flush_callback
};
/* -- END -- tizen_surface_shm_flusher_listener */
-
+#endif
/* -- BEGIN -- tbm_surface_queue callback funstions */
static void
TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-
- tpl_gsource_send_message(wl_egl_surface->surf_source, 2);
-
+ if (wl_egl_surface->sent_message == NONE_MESSAGE) {
+ wl_egl_surface->sent_message = ACQUIRABLE;
+ tpl_gsource_send_message(wl_egl_surface->surf_source,
+ wl_egl_surface->sent_message);
+ }
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
/* -- END -- tbm_surface_queue callback funstions */
{
tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
-
TPL_INFO("[SURFACE_FINI]",
"wl_egl_surface(%p) wl_egl_window(%p) wl_surface(%p)",
wl_egl_surface, wl_egl_surface->wl_egl_window,
wl_egl_surface->wl_surface);
-
+#if TIZEN_FEATURE_ENABLE
tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
if (wl_egl_display->presentation && wl_egl_surface->presentation_feedbacks) {
wl_egl_surface->presentation_sync.fd = -1;
}
- if (wl_egl_surface->vblank_waiting_buffers) {
- __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL);
- wl_egl_surface->vblank_waiting_buffers = NULL;
- }
-
tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
if (wl_egl_surface->surface_sync) {
tizen_surface_shm_flusher_destroy(wl_egl_surface->tss_flusher);
wl_egl_surface->tss_flusher = NULL;
}
-
- if (wl_egl_surface->vblank) {
- TPL_INFO("[VBLANK_DESTROY]",
- "wl_egl_surface(%p) vblank(%p)",
- wl_egl_surface, wl_egl_surface->vblank);
- tdm_client_vblank_destroy(wl_egl_surface->vblank);
- wl_egl_surface->vblank = NULL;
- }
+#endif
if (wl_egl_surface->tbm_queue) {
TPL_INFO("[TBM_QUEUE_DESTROY]",
wl_egl_surface->tbm_queue = NULL;
}
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
+ tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+ __tpl_list_free(wl_egl_surface->vblank->waiting_buffers, NULL);
+ wl_egl_surface->vblank->waiting_buffers = NULL;
+ tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+ }
+
+ if (wl_egl_surface->vblank) {
+ __tpl_list_remove_data(wl_egl_display->tdm.surface_vblanks,
+ (void *)wl_egl_surface->vblank,
+ TPL_FIRST,
+ __cb_surface_vblank_free);
+ wl_egl_surface->vblank = NULL;
+ }
}
static tpl_bool_t
wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
- /* Initialize surface */
- if (message == 1) {
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ if (message == INIT_SURFACE) { /* Initialize surface */
TPL_DEBUG("wl_egl_surface(%p) initialize message received!",
wl_egl_surface);
_thread_wl_egl_surface_init(wl_egl_surface);
+ wl_egl_surface->initialized_in_thread = TPL_TRUE;
tpl_gcond_signal(&wl_egl_surface->surf_cond);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- } else if (message == 2) {
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ } else if (message == ACQUIRABLE) { /* Acquirable */
TPL_DEBUG("wl_egl_surface(%p) acquirable message received!",
wl_egl_surface);
_thread_surface_queue_acquire(wl_egl_surface);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
+ wl_egl_surface->sent_message = NONE_MESSAGE;
+
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
return TPL_TRUE;
}
wl_egl_surface = (tpl_wl_egl_surface_t *)tpl_gsource_get_data(gsource);
TPL_CHECK_ON_NULL_RETURN(wl_egl_surface);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ TPL_DEBUG("[FINALIZE] wl_egl_surface(%p) tpl_gsource(%p)",
+ wl_egl_surface, gsource);
+
_thread_wl_egl_surface_fini(wl_egl_surface);
- TPL_DEBUG("[FINALIZE] gsource(%p) wl_egl_surface(%p)",
- gsource, wl_egl_surface);
+ wl_egl_surface->gsource_finalized = TPL_TRUE;
+
+ tpl_gcond_signal(&wl_egl_surface->surf_cond);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
static tpl_gsource_functions surf_funcs = {
wl_egl_surface->width = wl_egl_window->width;
wl_egl_surface->height = wl_egl_window->height;
wl_egl_surface->format = surface->format;
+ wl_egl_surface->num_buffers = surface->num_buffers;
wl_egl_surface->surf_source = surf_source;
wl_egl_surface->wl_egl_window = wl_egl_window;
wl_egl_surface->vblank_done = TPL_TRUE;
wl_egl_surface->use_render_done_fence = TPL_FALSE;
wl_egl_surface->set_serial_is_used = TPL_FALSE;
+ wl_egl_surface->gsource_finalized = TPL_FALSE;
+ wl_egl_surface->initialized_in_thread = TPL_FALSE;
- wl_egl_surface->latest_transform = 0;
+ wl_egl_surface->latest_transform = -1;
wl_egl_surface->render_done_cnt = 0;
wl_egl_surface->serial = 0;
wl_egl_surface->vblank = NULL;
+#if TIZEN_FEATURE_ENABLE
wl_egl_surface->tss_flusher = NULL;
wl_egl_surface->surface_sync = NULL;
+#endif
wl_egl_surface->post_interval = surface->post_interval;
wl_egl_surface->commit_sync.fd = -1;
wl_egl_surface->presentation_sync.fd = -1;
+ wl_egl_surface->sent_message = NONE_MESSAGE;
+
{
int i = 0;
for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
wl_egl_surface->buffer_cnt = 0;
}
+ wl_egl_surface->last_enq_buffer = NULL;
+
{
struct tizen_private *tizen_private = NULL;
tizen_private->set_window_serial_callback = (void *)
__cb_set_window_serial_callback;
tizen_private->create_commit_sync_fd = (void *)__cb_create_commit_sync_fd;
+#if TIZEN_FEATURE_ENABLE
tizen_private->create_presentation_sync_fd = (void *)__cb_create_presentation_sync_fd;
+#else
+ tizen_private->create_presentation_sync_fd = NULL;
+#endif
wl_egl_window->destroy_window_callback = (void *)__cb_destroy_callback;
wl_egl_window->resize_callback = (void *)__cb_resize_callback;
/* Initialize in thread */
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- tpl_gsource_send_message(wl_egl_surface->surf_source, 1);
- tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
+ wl_egl_surface->sent_message = INIT_SURFACE;
+ tpl_gsource_send_message(wl_egl_surface->surf_source,
+ wl_egl_surface->sent_message);
+ while (!wl_egl_surface->initialized_in_thread)
+ tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
TPL_ASSERT(wl_egl_surface->tbm_queue);
static tdm_client_vblank*
_thread_create_tdm_client_vblank(tdm_client *tdm_client)
{
- tdm_client_vblank *vblank = NULL;
+ tdm_client_vblank *tdm_vblank = NULL;
tdm_client_output *tdm_output = NULL;
tdm_error tdm_err = TDM_ERROR_NONE;
return NULL;
}
- vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
- if (!vblank || tdm_err != TDM_ERROR_NONE) {
- TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err);
+ tdm_vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err);
+ if (!tdm_vblank || tdm_err != TDM_ERROR_NONE) {
+ TPL_ERR("Failed to create tdm_vblank. tdm_err(%d)", tdm_err);
return NULL;
}
- tdm_client_vblank_set_enable_fake(vblank, 1);
- tdm_client_vblank_set_sync(vblank, 0);
+ tdm_client_vblank_set_enable_fake(tdm_vblank, 1);
+ tdm_client_vblank_set_sync(tdm_vblank, 0);
- return vblank;
+ return tdm_vblank;
+}
+
+static void
+__cb_surface_vblank_free(void *data)
+{
+ TPL_CHECK_ON_NULL_RETURN(data);
+
+ tpl_surface_vblank_t *vblank = (tpl_surface_vblank_t *)data;
+ tpl_wl_egl_surface_t *wl_egl_surface = vblank->wl_egl_surface;
+
+ TPL_INFO("[VBLANK_DESTROY]",
+ "wl_egl_surface(%p) surface_vblank(%p) tdm_vblank(%p)",
+ wl_egl_surface, vblank,
+ vblank->tdm_vblank);
+
+ tdm_client_vblank_destroy(vblank->tdm_vblank);
+ vblank->tdm_vblank = NULL;
+ vblank->wl_egl_surface = NULL;
+ tpl_gmutex_clear(&vblank->mutex);
+
+ free(vblank);
+
+ wl_egl_surface->vblank = NULL;
}
static void
_thread_wl_egl_surface_init(tpl_wl_egl_surface_t *wl_egl_surface)
{
tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ tpl_surface_vblank_t *vblank = NULL;
wl_egl_surface->tbm_queue = _thread_create_tbm_queue(
wl_egl_surface,
wl_egl_display->wl_tbm_client,
- CLIENT_QUEUE_SIZE);
+ wl_egl_surface->num_buffers);
if (!wl_egl_surface->tbm_queue) {
TPL_ERR("Failed to create tbm_queue. wl_egl_surface(%p) wl_tbm_client(%p)",
wl_egl_surface, wl_egl_display->wl_tbm_client);
wl_egl_surface->tbm_queue,
wl_egl_surface->width,
wl_egl_surface->height,
- CLIENT_QUEUE_SIZE,
+ wl_egl_surface->num_buffers,
wl_egl_surface->format);
- wl_egl_surface->vblank = _thread_create_tdm_client_vblank(
- wl_egl_display->tdm_client);
- if (wl_egl_surface->vblank) {
- TPL_INFO("[VBLANK_INIT]",
- "wl_egl_surface(%p) tdm_client(%p) vblank(%p)",
- wl_egl_surface, wl_egl_display->tdm_client,
- wl_egl_surface->vblank);
+ if (wl_egl_display->use_wait_vblank) {
+ vblank = (tpl_surface_vblank_t *)calloc(1, sizeof(tpl_surface_vblank_t));
+ if (vblank) {
+ vblank->tdm_vblank = _thread_create_tdm_client_vblank(
+ wl_egl_display->tdm.tdm_client);
+ if (!vblank->tdm_vblank) {
+ TPL_ERR("Failed to create tdm_vblank from tdm_client(%p)",
+ wl_egl_display->tdm.tdm_client);
+ free(vblank);
+ vblank = NULL;
+ } else {
+ vblank->waiting_buffers = __tpl_list_alloc();
+ vblank->wl_egl_surface = wl_egl_surface;
+ tpl_gmutex_init(&vblank->mutex);
+
+ __tpl_list_push_back(wl_egl_display->tdm.surface_vblanks,
+ (void *)vblank);
+
+ TPL_INFO("[VBLANK_INIT]",
+ "wl_egl_surface(%p) tdm_client(%p) tdm_vblank(%p)",
+ wl_egl_surface, wl_egl_display->tdm.tdm_client,
+ vblank->tdm_vblank);
+ }
+ }
}
+ wl_egl_surface->vblank = vblank;
+#if TIZEN_FEATURE_ENABLE
if (wl_egl_display->tss) {
wl_egl_surface->tss_flusher =
tizen_surface_shm_get_flusher(wl_egl_display->tss,
wl_egl_display->use_explicit_sync = TPL_FALSE;
}
}
-
- wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc();
+#endif
wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
}
tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
tpl_bool_t need_to_release = TPL_FALSE;
tpl_bool_t need_to_cancel = TPL_FALSE;
+ buffer_status_t status = RELEASED;
int idx = 0;
while (wl_egl_surface->buffer_cnt) {
tpl_gmutex_lock(&wl_egl_buffer->mutex);
+ status = wl_egl_buffer->status;
+
TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
idx, wl_egl_buffer,
wl_egl_buffer->tbm_surface,
- status_to_string[wl_egl_buffer->status]);
-
- /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
- /* It has been acquired but has not yet been released, so this
- * buffer must be released. */
- need_to_release = (wl_egl_buffer->status == ACQUIRED ||
- wl_egl_buffer->status == WAITING_SIGNALED ||
- wl_egl_buffer->status == WAITING_VBLANK ||
- wl_egl_buffer->status == COMMITTED);
- /* After dequeue, it has not been enqueued yet
- * so cancel_dequeue must be performed. */
- need_to_cancel = wl_egl_buffer->status == DEQUEUED;
+ status_to_string[status]);
- if (wl_egl_buffer->status >= ENQUEUED) {
- tpl_bool_t need_to_wait = TPL_FALSE;
+ if (status >= ENQUEUED) {
tpl_result_t wait_result = TPL_ERROR_NONE;
- if (!wl_egl_display->use_explicit_sync &&
- wl_egl_buffer->status < WAITING_VBLANK)
- need_to_wait = TPL_TRUE;
-
- if (wl_egl_display->use_explicit_sync &&
- wl_egl_buffer->status < COMMITTED)
- need_to_wait = TPL_TRUE;
+ while (status < COMMITTED && wait_result != TPL_ERROR_TIME_OUT) {
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+ /* The lock/unlock order of buffer->mutex and display->wl_event_mutex
+ * is important. display->mutex must surround buffer->mutex */
+ wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond,
+ &wl_egl_display->wl_event_mutex,
+ 200); /* 200ms */
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+ status = wl_egl_buffer->status; /* update status */
- if (need_to_wait) {
- tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
- wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
- &wl_egl_buffer->mutex,
- 16); /* 16ms */
- tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
if (wait_result == TPL_ERROR_TIME_OUT)
TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
wl_egl_buffer);
}
}
+ /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
+ /* It has been acquired but has not yet been released, so this
+ * buffer must be released. */
+ need_to_release = (status >= ACQUIRED && status <= COMMITTED);
+
+ /* After dequeue, it has not been enqueued yet
+ * so cancel_dequeue must be performed. */
+ need_to_cancel = (status == DEQUEUED);
+
if (need_to_release) {
tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
wl_egl_buffer->tbm_surface);
_tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
- if (wl_egl_surface->surf_source)
+ if (wl_egl_surface->surf_source) {
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ // Send destroy mesage to thread
tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
- wl_egl_surface->surf_source = NULL;
+ /* This is a protection to prevent problems that arise in unexpected situations
+ * that g_cond_wait cannot work normally.
+ * When calling tpl_gsource_destroy() with destroy_in_thread is TPL_TRUE,
+ * caller should use tpl_gcond_wait() in the loop with checking finalized flag
+ * */
+ while (!wl_egl_surface->gsource_finalized) {
+ tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
+ }
+ wl_egl_surface->surf_source = NULL;
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ }
_print_buffer_lists(wl_egl_surface);
wl_egl_surface->wl_egl_window = NULL;
}
+ wl_egl_surface->last_enq_buffer = NULL;
+
wl_egl_surface->wl_surface = NULL;
wl_egl_surface->wl_egl_display = NULL;
wl_egl_surface->tpl_surface = NULL;
wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
- TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
TPL_INFO("[SET_PREROTATION_CAPABILITY]",
"wl_egl_surface(%p) prerotation capability set to [%s]",
wl_egl_surface = (tpl_wl_egl_surface_t *)surface->backend.data;
- TPL_CHECK_ON_TRUE_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_ERROR_INVALID_PARAMETER);
TPL_INFO("[SET_POST_INTERVAL]",
"wl_egl_surface(%p) post_interval(%d -> %d)",
int i;
tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
for (i = 0; i < BUFFER_ARRAY_SIZE; i++) {
+ buffer_status_t status;
tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
wl_egl_buffer = wl_egl_surface->buffers[i];
tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
- if (wl_egl_buffer && wl_egl_buffer->status == COMMITTED) {
- wl_egl_buffer->status = RELEASED;
+ if (wl_egl_buffer) {
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+ status = wl_egl_buffer->status;
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+ } else {
+ continue;
+ }
+
+ if (status > ENQUEUED && status <= COMMITTED) {
tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
wl_egl_buffer->tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
"wl_egl_surface(%p) tbm_queue(%p)",
wl_egl_surface, wl_egl_surface->tbm_queue);
+ _print_buffer_lists(wl_egl_surface);
+
return TPL_ERROR_NONE;
}
wl_egl_buffer->draw_done = TPL_FALSE;
wl_egl_buffer->need_to_commit = TPL_TRUE;
+#if TIZEN_FEATURE_ENABLE
wl_egl_buffer->buffer_release = NULL;
+#endif
wl_egl_buffer->transform = tizen_private->transform;
if (wl_egl_buffer->w_transform != tizen_private->window_transform) {
wl_egl_buffer->width = tbm_surface_get_width(tbm_surface);
wl_egl_buffer->height = tbm_surface_get_height(tbm_surface);
+ wl_egl_buffer->w_transform = -1;
+
tpl_gmutex_init(&wl_egl_buffer->mutex);
tpl_gcond_init(&wl_egl_buffer->cond);
for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
if (wl_egl_surface->buffers[i] == NULL) break;
+ /* If this exception is reached,
+ * it may be a critical memory leak problem. */
+ if (i == BUFFER_ARRAY_SIZE) {
+ tpl_wl_egl_buffer_t *evicted_buffer = NULL;
+ int evicted_idx = 0; /* evict the frontmost buffer */
+
+ evicted_buffer = wl_egl_surface->buffers[evicted_idx];
+
+ TPL_WARN("wl_egl_surface(%p) buffers array is full. evict one.",
+ wl_egl_surface);
+ TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
+ evicted_buffer, evicted_buffer->tbm_surface,
+ status_to_string[evicted_buffer->status]);
+
+ /* [TODO] need to think about whether there will be
+ * better modifications */
+ wl_egl_surface->buffer_cnt--;
+ wl_egl_surface->buffers[evicted_idx] = NULL;
+
+ i = evicted_idx;
+ }
+
wl_egl_surface->buffer_cnt++;
wl_egl_surface->buffers[i] = wl_egl_buffer;
wl_egl_buffer->idx = i;
tbm_surface_h tbm_surface = NULL;
TPL_OBJECT_UNLOCK(surface);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ if (wl_egl_surface->reset == TPL_TRUE) {
+ if (_check_buffer_validate(wl_egl_surface, wl_egl_surface->last_enq_buffer) &&
+ tbm_surface_internal_is_valid(wl_egl_surface->last_enq_buffer)) {
+ tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer;
+ tpl_wl_egl_buffer_t *enqueued_buffer =
+ _get_wl_egl_buffer(last_enq_buffer);
+
+ if (enqueued_buffer) {
+ tbm_surface_internal_ref(last_enq_buffer);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ tpl_gmutex_lock(&enqueued_buffer->mutex);
+ while (enqueued_buffer->status >= ENQUEUED &&
+ enqueued_buffer->status < COMMITTED) {
+ tpl_result_t wait_result;
+ TPL_INFO("[DEQ_AFTER_RESET]",
+ "waiting for previous wl_egl_buffer(%p) commit",
+ enqueued_buffer);
+
+ wait_result = tpl_gcond_timed_wait(&enqueued_buffer->cond,
+ &enqueued_buffer->mutex,
+ 200); /* 200ms */
+ if (wait_result == TPL_ERROR_TIME_OUT) {
+ TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
+ enqueued_buffer);
+ break;
+ }
+ }
+ tpl_gmutex_unlock(&enqueued_buffer->mutex);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ tbm_surface_internal_unref(last_enq_buffer);
+ }
+ }
+
+ wl_egl_surface->last_enq_buffer = NULL;
+ }
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
TPL_OBJECT_LOCK(surface);
TPL_LOG_T("WL_EGL",
"[DEQ][F] surface->frontbuffer(%p) BO_NAME(%d)",
surface->frontbuffer, bo_name);
- TRACE_ASYNC_BEGIN((int)surface->frontbuffer,
+ TRACE_ASYNC_BEGIN((intptr_t)surface->frontbuffer,
"[DEQ]~[ENQ] BO_NAME:%d",
bo_name);
tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
* the tbm_surface can be used immediately.
* If not, user(EGL) have to wait until signaled. */
if (release_fence) {
- if (wl_egl_surface->surface_sync) {
+#if TIZEN_FEATURE_ENABLE
+ if (wl_egl_display->use_explicit_sync) {
*release_fence = wl_egl_buffer->release_fence_fd;
TPL_DEBUG("wl_egl_surface(%p) wl_egl_buffer(%p) release_fence_fd(%d)",
wl_egl_surface, wl_egl_buffer, *release_fence);
- } else {
+
+ wl_egl_buffer->release_fence_fd = -1;
+ } else
+#endif
+ {
*release_fence = -1;
}
}
wl_egl_surface->reset = TPL_FALSE;
TRACE_MARK("[DEQ][NEW]BO_NAME:%d", wl_egl_buffer->bo_name);
- TRACE_ASYNC_BEGIN((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d",
+ TRACE_ASYNC_BEGIN((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d",
wl_egl_buffer->bo_name);
TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
wl_egl_buffer, tbm_surface, wl_egl_buffer->bo_name,
if (!tbm_surface_internal_is_valid(tbm_surface)) {
TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
tbm_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+ if (!wl_egl_buffer) {
+ TPL_ERR("Failed to get wl_egl_buffer from tbm_surface(%p)", tbm_surface);
return TPL_ERROR_INVALID_PARAMETER;
}
TRACE_MARK("[ENQ] BO_NAME:%d", bo_name);
- wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
-
tpl_gmutex_lock(&wl_egl_buffer->mutex);
/* If there are received region information, save it to wl_egl_buffer */
!wl_egl_buffer->need_to_commit) {
TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
return TPL_ERROR_NONE;
}
tbm_surface_internal_unref(tbm_surface);
TPL_ERR("Failed to enqueue tbm_surface(%p). wl_egl_surface(%p) tsq_err=%d",
tbm_surface, wl_egl_surface, tsq_err);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
return TPL_ERROR_INVALID_OPERATION;
}
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ wl_egl_surface->last_enq_buffer = tbm_surface;
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
tbm_surface_internal_unref(tbm_surface);
- TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ TRACE_ASYNC_END((intptr_t)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
return TPL_ERROR_NONE;
}
tpl_wl_egl_buffer_t *wl_egl_buffer =
(tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
tbm_surface_h tbm_surface = wl_egl_buffer->tbm_surface;
wl_egl_surface->render_done_cnt++;
wl_egl_buffer, tbm_surface);
tpl_gmutex_lock(&wl_egl_buffer->mutex);
- tpl_gcond_signal(&wl_egl_buffer->cond);
wl_egl_buffer->status = WAITING_VBLANK;
+
+ TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
+ wl_egl_buffer, wl_egl_buffer->waiting_source,
+ wl_egl_buffer->acquire_fence_fd);
+
+ close(wl_egl_buffer->acquire_fence_fd);
+ wl_egl_buffer->acquire_fence_fd = -1;
+ wl_egl_buffer->waiting_source = NULL;
+
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done)
+ if (wl_egl_surface->vblank == NULL || wl_egl_surface->vblank_done)
_thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
- else
- __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers,
+ else {
+ tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+ __tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers,
wl_egl_buffer);
+ tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+ }
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
static void
__thread_func_waiting_source_finalize(tpl_gsource *gsource)
{
- tpl_wl_egl_buffer_t *wl_egl_buffer =
- (tpl_wl_egl_buffer_t *)tpl_gsource_get_data(gsource);
-
- TPL_DEBUG("[FINALIZE] wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
- wl_egl_buffer, wl_egl_buffer->waiting_source,
- wl_egl_buffer->acquire_fence_fd);
-
- close(wl_egl_buffer->acquire_fence_fd);
- wl_egl_buffer->acquire_fence_fd = -1;
- wl_egl_buffer->waiting_source = NULL;
+ TPL_IGNORE(gsource);
}
static tpl_gsource_functions buffer_funcs = {
wl_egl_buffer, tbm_surface,
_get_tbm_surface_bo_name(tbm_surface));
- if (wl_egl_buffer->wl_buffer == NULL) {
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
- wl_egl_buffer->wl_buffer =
- (struct wl_proxy *)wayland_tbm_client_create_buffer(
- wl_egl_display->wl_tbm_client, tbm_surface);
-
- if (!wl_egl_buffer->wl_buffer) {
- TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
- wl_egl_display->wl_tbm_client, tbm_surface);
- }
- }
-
if (wl_egl_buffer->acquire_fence_fd != -1) {
- if (wl_egl_surface->surface_sync)
+#if TIZEN_FEATURE_ENABLE
+ if (wl_egl_display->use_explicit_sync)
ready_to_commit = TPL_TRUE;
- else {
+ else
+#endif
+ {
if (wl_egl_buffer->waiting_source) {
tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
wl_egl_buffer->waiting_source = NULL;
ready_to_commit = TPL_FALSE;
}
+ } else {
+ ready_to_commit = TPL_TRUE;
}
if (ready_to_commit) {
- if (!wl_egl_display->use_wait_vblank || wl_egl_surface->vblank_done)
+ if (wl_egl_surface->vblank == NULL || wl_egl_surface->vblank_done)
ready_to_commit = TPL_TRUE;
else {
wl_egl_buffer->status = WAITING_VBLANK;
- __tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, wl_egl_buffer);
+ tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+ __tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers, wl_egl_buffer);
+ tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
ready_to_commit = TPL_FALSE;
}
}
tpl_wl_egl_surface_t *wl_egl_surface = (tpl_wl_egl_surface_t *)user_data;
tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
- TRACE_ASYNC_END((int)wl_egl_surface, "WAIT_VBLANK");
+ TRACE_ASYNC_END((intptr_t)wl_egl_surface, "WAIT_VBLANK");
TPL_DEBUG("[VBLANK] wl_egl_surface(%p)", wl_egl_surface);
if (error == TDM_ERROR_TIMEOUT)
wl_egl_surface->vblank_done = TPL_TRUE;
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(
- wl_egl_surface->vblank_waiting_buffers,
- NULL);
- if (wl_egl_buffer)
- _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
+ if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
+ tpl_bool_t is_empty = TPL_TRUE;
+ do {
+ tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+ wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(
+ wl_egl_surface->vblank->waiting_buffers,
+ NULL);
+ is_empty = __tpl_list_is_empty(wl_egl_surface->vblank->waiting_buffers);
+ tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+
+ if (!wl_egl_buffer) break;
+
+ _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
+
+ /* If tdm error such as TIMEOUT occured,
+ * flush all vblank waiting buffers of its wl_egl_surface.
+ * Otherwise, only one wl_egl_buffer will be commited per one vblank event.
+ */
+ if (error == TDM_ERROR_NONE) break;
+ } while (!is_empty);
+ }
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
/* -- END -- tdm_client vblank callback function */
+#if TIZEN_FEATURE_ENABLE
static void
__cb_buffer_fenced_release(void *data,
struct zwp_linux_buffer_release_v1 *release, int32_t fence)
tbm_surface = wl_egl_buffer->tbm_surface;
if (tbm_surface_internal_is_valid(tbm_surface)) {
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
tpl_gmutex_lock(&wl_egl_buffer->mutex);
if (wl_egl_buffer->status == COMMITTED) {
tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tbm_surface_queue_error_e tsq_err;
zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
wl_egl_buffer->buffer_release = NULL;
TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
_get_tbm_surface_bo_name(tbm_surface),
fence);
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
_get_tbm_surface_bo_name(tbm_surface));
TPL_LOG_T("WL_EGL",
- "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
- wl_egl_buffer->wl_buffer, tbm_surface,
+ "[FENCED_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ wl_egl_buffer, tbm_surface,
_get_tbm_surface_bo_name(tbm_surface),
fence);
tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
-
- tbm_surface_internal_unref(tbm_surface);
}
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+ if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
+ tbm_surface_internal_unref(tbm_surface);
+
} else {
TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
}
tbm_surface = wl_egl_buffer->tbm_surface;
if (tbm_surface_internal_is_valid(tbm_surface)) {
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
tpl_gmutex_lock(&wl_egl_buffer->mutex);
if (wl_egl_buffer->status == COMMITTED) {
tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tbm_surface_queue_error_e tsq_err;
zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
wl_egl_buffer->buffer_release = NULL;
TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
_get_tbm_surface_bo_name(tbm_surface));
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
_get_tbm_surface_bo_name(tbm_surface));
TPL_LOG_T("WL_EGL",
- "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)",
- wl_egl_buffer->wl_buffer, tbm_surface,
+ "[IMMEDIATE_RELEASE] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_egl_buffer, tbm_surface,
_get_tbm_surface_bo_name(tbm_surface));
tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
-
- tbm_surface_internal_unref(tbm_surface);
}
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+ if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
+ tbm_surface_internal_unref(tbm_surface);
+
} else {
TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
}
__cb_buffer_fenced_release,
__cb_buffer_immediate_release,
};
+#endif
static void
__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
wl_egl_buffer->status = RELEASED;
TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
_get_tbm_surface_bo_name(tbm_surface));
TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
static const struct wl_buffer_listener wl_buffer_release_listener = {
(void *)__cb_wl_buffer_release,
};
-
+#if TIZEN_FEATURE_ENABLE
static void
__cb_presentation_feedback_sync_output(void *data,
struct wp_presentation_feedback *presentation_feedback,
__cb_presentation_feedback_presented,
__cb_presentation_feedback_discarded
};
+#endif
static tpl_result_t
_thread_surface_vblank_wait(tpl_wl_egl_surface_t *wl_egl_surface)
{
tdm_error tdm_err = TDM_ERROR_NONE;
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
-
- if (wl_egl_surface->vblank == NULL) {
- wl_egl_surface->vblank =
- _thread_create_tdm_client_vblank(wl_egl_display->tdm_client);
- if (!wl_egl_surface->vblank) {
- TPL_WARN("Failed to create vblank. wl_egl_surface(%p)",
- wl_egl_surface);
- return TPL_ERROR_OUT_OF_MEMORY;
- }
- }
+ tpl_surface_vblank_t *vblank = wl_egl_surface->vblank;
- tdm_err = tdm_client_vblank_wait(wl_egl_surface->vblank,
+ tdm_err = tdm_client_vblank_wait(vblank->tdm_vblank,
wl_egl_surface->post_interval,
__cb_tdm_client_vblank,
(void *)wl_egl_surface);
if (tdm_err == TDM_ERROR_NONE) {
wl_egl_surface->vblank_done = TPL_FALSE;
- TRACE_ASYNC_BEGIN((int)wl_egl_surface, "WAIT_VBLANK");
+ TRACE_ASYNC_BEGIN((intptr_t)wl_egl_surface, "WAIT_VBLANK");
} else {
TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
return TPL_ERROR_INVALID_OPERATION;
(struct wl_proxy *)wayland_tbm_client_create_buffer(
wl_egl_display->wl_tbm_client,
wl_egl_buffer->tbm_surface);
- }
- TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
- "[FATAL] Failed to create wl_buffer");
- wl_buffer_add_listener((void *)wl_egl_buffer->wl_buffer,
- &wl_buffer_release_listener, wl_egl_buffer);
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer->wl_buffer != NULL,
+ "[FATAL] Failed to create wl_buffer");
+
+ TPL_INFO("[WL_BUFFER_CREATE]",
+ "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
+ wl_egl_buffer, wl_egl_buffer->wl_buffer,
+ wl_egl_buffer->tbm_surface);
+
+#if TIZEN_FEATURE_ENABLE
+ if (!wl_egl_display->use_explicit_sync ||
+ wl_egl_buffer->acquire_fence_fd == -1)
+#endif
+ {
+ wl_buffer_add_listener((struct wl_buffer *)wl_egl_buffer->wl_buffer,
+ &wl_buffer_release_listener,
+ wl_egl_buffer);
+ }
+ }
version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
+#if TIZEN_FEATURE_ENABLE
/* create presentation feedback and add listener */
tpl_gmutex_lock(&wl_egl_surface->presentation_sync.mutex);
if (wl_egl_display->presentation && wl_egl_buffer->presentation_sync_fd != -1) {
}
}
tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
+#endif
if (wl_egl_buffer->w_rotated == TPL_TRUE) {
- wayland_tbm_client_set_buffer_transform(
- wl_egl_display->wl_tbm_client,
- (void *)wl_egl_buffer->wl_buffer,
- wl_egl_buffer->w_transform);
+ if (version > 1) {
+ wayland_tbm_client_set_buffer_transform(
+ wl_egl_display->wl_tbm_client,
+ (void *)wl_egl_buffer->wl_buffer,
+ wl_egl_buffer->w_transform);
+ TPL_INFO("[W_TRANSFORM]",
+ "wl_egl_surface(%p) wl_egl_buffer(%p) w_transform(%d)",
+ wl_egl_surface, wl_egl_buffer, wl_egl_buffer->w_transform);
+ }
wl_egl_buffer->w_rotated = TPL_FALSE;
}
if (wl_egl_surface->latest_transform != wl_egl_buffer->transform) {
+ if (version > 1) {
+ wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
+ TPL_INFO("[TRANSFORM]",
+ "wl_egl_surface(%p) wl_egl_buffer(%p) transform(%d -> %d)",
+ wl_egl_surface, wl_egl_buffer,
+ wl_egl_surface->latest_transform, wl_egl_buffer->transform);
+ }
wl_egl_surface->latest_transform = wl_egl_buffer->transform;
- wl_surface_set_buffer_transform(wl_surface, wl_egl_buffer->transform);
}
if (wl_egl_window) {
wayland_tbm_client_set_buffer_serial(wl_egl_display->wl_tbm_client,
(void *)wl_egl_buffer->wl_buffer,
wl_egl_buffer->serial);
-
+#if TIZEN_FEATURE_ENABLE
if (wl_egl_display->use_explicit_sync &&
- wl_egl_surface->surface_sync) {
+ wl_egl_buffer->acquire_fence_fd != -1) {
zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_egl_surface->surface_sync,
wl_egl_buffer->acquire_fence_fd);
TPL_DEBUG("add explicit_sync_release_listener.");
}
}
+#endif
wl_surface_commit(wl_surface);
wl_display_flush(wl_egl_display->wl_display);
- TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+ TRACE_ASYNC_BEGIN((intptr_t)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
wl_egl_buffer->bo_name);
tpl_gmutex_lock(&wl_egl_buffer->mutex);
wl_egl_buffer->need_to_commit = TPL_FALSE;
wl_egl_buffer->status = COMMITTED;
+ if (wl_egl_surface->last_enq_buffer == wl_egl_buffer->tbm_surface)
+ wl_egl_surface->last_enq_buffer = NULL;
tpl_gcond_signal(&wl_egl_buffer->cond);
wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface,
wl_egl_buffer->bo_name);
- if (wl_egl_display->use_wait_vblank &&
+ if (wl_egl_surface->vblank != NULL &&
_thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
TPL_ERR("Failed to set wait vblank.");
}
tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
- wl_display_flush(wl_egl_display->wl_display);
+ if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
+ tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+ __tpl_list_remove_data(wl_egl_surface->vblank->waiting_buffers,
+ (void *)wl_egl_buffer,
+ TPL_FIRST,
+ NULL);
+ tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+ }
+
+ if (wl_egl_display) {
+ if (wl_egl_buffer->wl_buffer) {
+ wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
+ (void *)wl_egl_buffer->wl_buffer);
+ wl_egl_buffer->wl_buffer = NULL;
+ }
- if (wl_egl_buffer->wl_buffer) {
- wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
- (void *)wl_egl_buffer->wl_buffer);
- wl_egl_buffer->wl_buffer = NULL;
+ wl_display_flush(wl_egl_display->wl_display);
}
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+#if TIZEN_FEATURE_ENABLE
if (wl_egl_buffer->buffer_release) {
zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
wl_egl_buffer->buffer_release = NULL;
}
+ if (wl_egl_buffer->release_fence_fd != -1) {
+ close(wl_egl_buffer->release_fence_fd);
+ wl_egl_buffer->release_fence_fd = -1;
+ }
+#endif
+
if (wl_egl_buffer->waiting_source) {
tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
wl_egl_buffer->waiting_source = NULL;
wl_egl_buffer->tbm_surface = NULL;
wl_egl_buffer->bo_name = -1;
+ wl_egl_buffer->status = RELEASED;
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+ tpl_gmutex_clear(&wl_egl_buffer->mutex);
+ tpl_gcond_clear(&wl_egl_buffer->cond);
free(wl_egl_buffer);
}
}
tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
}
+
+static tpl_bool_t
+_check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface)
+{
+ int idx = 0;
+ tpl_bool_t ret = TPL_FALSE;
+
+ /* silent return */
+ if (!wl_egl_surface || !tbm_surface)
+ return ret;
+
+ tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+ for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
+ tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
+ if (wl_egl_buffer && wl_egl_buffer->tbm_surface == tbm_surface) {
+ ret = TPL_TRUE;
+ break;
+ }
+ }
+
+ if (ret == TPL_FALSE || idx == BUFFER_ARRAY_SIZE) {
+ TPL_ERR("tbm_surface(%p) is not owned by wl_egl_surface(%p)",
+ tbm_surface, wl_egl_surface);
+ }
+ tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
+ return ret;
+}