struct wl_proxy *wl_proxy; /* wl_buffer proxy */
};
+static tpl_list_t *committed_wl_buffers = NULL;
+static pthread_mutex_t g_list_mutex;
+
static const struct wl_buffer_listener buffer_release_listener;
static int tpl_wayland_egl_buffer_key;
goto free_wl_display;
}
+ if (!committed_wl_buffers) {
+ committed_wl_buffers = __tpl_list_alloc();
+ if (!committed_wl_buffers)
+ TPL_ERR("Failed to allocate committed_wl_buffers list.");
+ if (pthread_mutex_init(&g_list_mutex, NULL) != 0)
+ TPL_ERR("g_list_mutex init failed.");
+ }
+
wayland_egl_display->wl_dpy = wl_dpy;
__tpl_wayland_egl_display_buffer_flusher_init(wayland_egl_display);
free(wayland_egl_display);
}
+ if (pthread_mutex_lock(&g_list_mutex) == 0) {
+ if (committed_wl_buffers)
+ __tpl_list_free(committed_wl_buffers, NULL);
+ committed_wl_buffers = NULL;
+ pthread_mutex_unlock(&g_list_mutex);
+ }
+
+ if (pthread_mutex_destroy(&g_list_mutex) != 0)
+ TPL_ERR("Failed to destroy g_list_mutex");
+
display->backend.data = NULL;
}
tbm_surface_queue_destroy(wayland_egl_surface->tbm_queue);
wayland_egl_surface->tbm_queue = NULL;
queue_create_fail:
- __tpl_list_free(wayland_egl_surface->attached_buffers, NULL);
-alloc_dequeue_buffers_fail:
__tpl_list_free(wayland_egl_surface->dequeued_buffers, NULL);
+alloc_dequeue_buffers_fail:
+ __tpl_list_free(wayland_egl_surface->attached_buffers, NULL);
alloc_attached_buffers_fail:
__tpl_object_fini(&wayland_egl_surface->base);
tpl_object_init_fail:
/* When surface is destroyed, unreference tbm_surface which tracked by
* the list of attached_buffers in order to free the created resources.
* (tpl_wayland_egl_buffer_t or wl_buffer) */
+ TPL_OBJECT_LOCK(&wayland_egl_surface->base);
if (wayland_egl_surface->attached_buffers) {
- TPL_OBJECT_LOCK(&wayland_egl_surface->base);
while (!__tpl_list_is_empty(wayland_egl_surface->attached_buffers)) {
tbm_surface_queue_error_e tsq_err;
tbm_surface_h tbm_surface =
__tpl_list_free(wayland_egl_surface->attached_buffers, NULL);
wayland_egl_surface->attached_buffers = NULL;
- TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
}
+ TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
if (lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->wl_event_mutex);
wayland_egl_buffer->wl_proxy,
wayland_egl_buffer->width, wayland_egl_buffer->height);
+ TPL_OBJECT_LOCK(&wayland_egl_surface->base);
if (wayland_egl_surface->attached_buffers) {
- TPL_OBJECT_LOCK(&wayland_egl_surface->base);
/* Start tracking of this tbm_surface until release_cb called. */
__tpl_list_push_back(wayland_egl_surface->attached_buffers,
(void *)tbm_surface);
- TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
+ }
+ TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
+
+ if (pthread_mutex_lock(&g_list_mutex) == 0) {
+ if (committed_wl_buffers) {
+ /* Start tracking of wl_buffer which is committed by this wayland_egl_surface */
+ __tpl_list_push_back(committed_wl_buffers,
+ (void *)wayland_egl_buffer->wl_proxy);
+ }
+ pthread_mutex_unlock(&g_list_mutex);
}
/* TPL_WAIT_VBLANK = 1 */
tbm_surface_h tbm_surface = NULL;
tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- TPL_ASSERT(data);
+ if (proxy && (pthread_mutex_lock(&g_list_mutex) == 0)) {
+ if (committed_wl_buffers) {
+ /* Look for the given wl_proxy in the global list(committed_wl_buffers),
+ * whether its release event has not been processed since wl_surface_commit
+ * with this wl_proxy */
+ tpl_list_node_t *node =
+ __tpl_list_find_node(committed_wl_buffers, (void *)proxy,
+ TPL_FIRST, NULL);
+
+ /* If the proxy can not be found in the committed_wl_buffers list,
+ * it has not been committed or has already been released.
+ * In this case, it is not an error, but the log will be printed. */
+ if (!node) {
+ TPL_ERR("wl_buffer(%p) already has been released.", proxy);
+ pthread_mutex_unlock(&g_list_mutex);
+ return;
+ }
+ }
+ pthread_mutex_unlock(&g_list_mutex);
+ }
tbm_surface = (tbm_surface_h) data;
if (wayland_egl_buffer->need_to_release) {
wayland_egl_surface = wayland_egl_buffer->wayland_egl_surface;
+ TPL_OBJECT_LOCK(&wayland_egl_surface->base);
if (wayland_egl_surface->attached_buffers) {
- TPL_OBJECT_LOCK(&wayland_egl_surface->base);
/* Stop tracking of this released tbm_surface. */
__tpl_list_remove_data(wayland_egl_surface->attached_buffers,
(void *)tbm_surface, TPL_FIRST, NULL);
- TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
}
+ TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
tsq_err = tbm_surface_queue_release(wayland_egl_surface->tbm_queue,
tbm_surface);
wayland_egl_buffer->need_to_release = TPL_FALSE;
tbm_surface_internal_unref(tbm_surface);
+
+ if (pthread_mutex_lock(&g_list_mutex) == 0) {
+ /* This wl_buffer should be removed from committed_wl_buffers list. */
+ __tpl_list_remove_data(committed_wl_buffers, (void *)proxy,
+ TPL_FIRST, NULL);
+ pthread_mutex_unlock(&g_list_mutex);
+ }
+
} else {
TPL_WARN("No need to release buffer | wl_buffer(%p) tbm_surface(%p) bo(%d)",
proxy, tbm_surface,
tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)));
}
}
+ } else {
+ TPL_ERR("Failed to process release_event. Invalid tbm_surface(%p)", tbm_surface);
}
}
* Then, client does not need to wait for release_callback to unreference
* attached buffer.
*/
+
+ TPL_OBJECT_LOCK(&wayland_egl_surface->base);
if (wayland_egl_surface->attached_buffers) {
- TPL_OBJECT_LOCK(&wayland_egl_surface->base);
while (!__tpl_list_is_empty(wayland_egl_surface->attached_buffers)) {
tbm_surface_queue_error_e tsq_err;
tbm_surface_h tbm_surface =
TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
tbm_surface, tsq_err);
}
- TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
+ }
+ TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
+
+ if (pthread_mutex_lock(&g_list_mutex) == 0) {
+ __tpl_list_fini(committed_wl_buffers, NULL);
+ pthread_mutex_unlock(&g_list_mutex);
}
if (lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->wl_event_mutex);