}
}
+static void
+handle_tbm_queue_free_flush(void *data,
+ struct wl_tbm_queue *wl_tbm_queue)
+{
+ struct wayland_tbm_surface_queue *queue_info =
+ (struct wayland_tbm_surface_queue *)data;
+ tbm_surface_queue_h tbm_queue = NULL;
+
+ WL_TBM_LOG("free flush surface_queue:%p", queue_info->tbm_queue);
+
+ pthread_mutex_lock(&queue_info->lock);
+
+ if (queue_info->is_active && queue_info->active_flush) {
+ WL_TBM_LOG_E("warning: Cannot flush the tbm_surface_queueu. The queue is activate.");
+ pthread_mutex_unlock(&queue_info->lock);
+ return;
+ }
+
+ tbm_queue = queue_info->tbm_queue;
+
+ pthread_mutex_unlock(&queue_info->lock);
+
+ /* flush the allocated surfaces at the client */
+ tbm_surface_queue_free_flush(tbm_queue);
+}
+
static void
handle_tbm_queue_flush(void *data,
struct wl_tbm_queue *wl_tbm_queue)
handle_tbm_queue_active,
handle_tbm_queue_deactive,
handle_tbm_queue_flush,
+ handle_tbm_queue_free_flush,
handle_tbm_queue_buffer_usable,
};
wl_tbm_queue_send_flush(cqueue->wl_tbm_queue);
}
+void
+wayland_tbm_server_client_queue_free_flush(struct wayland_tbm_client_queue *cqueue)
+{
+ WL_TBM_RETURN_IF_FAIL(cqueue != NULL);
+ WL_TBM_RETURN_IF_FAIL(cqueue->wl_tbm_queue != NULL);
+
+#ifdef DEBUG_TRACE
+ WL_TBM_TRACE(" pid:%d", cqueue->pid);
+#endif
+ WL_TBM_LOG("send free flush queue pid:%d", cqueue->pid);
+
+ wl_tbm_queue_send_free_flush(cqueue->wl_tbm_queue);
+}
+
void
wayland_tbm_server_increase_buffer_sync_timeline(struct wayland_tbm_server *tbm_srv,
struct wl_resource *wl_buffer, unsigned int count)
void
wayland_tbm_server_client_queue_flush(struct wayland_tbm_client_queue *cqueue);
+void
+wayland_tbm_server_client_queue_free_flush(struct wayland_tbm_client_queue *cqueue);
+
void
wayland_tbm_server_increase_buffer_sync_timeline(struct wayland_tbm_server *tbm_srv,
struct wl_resource *wl_buffer, unsigned int count);