wl_tbm_queue: add free_flush event 59/231759/2
authorChangyeon Lee <cyeon.lee@samsung.com>
Fri, 24 Apr 2020 05:52:16 +0000 (14:52 +0900)
committerJunkyeong Kim <jk0430.kim@samsung.com>
Mon, 27 Apr 2020 09:05:29 +0000 (09:05 +0000)
Change-Id: I3d20f0d1805f755a5b3585f63a482a393a0904a9

protocol/wayland-tbm.xml
src/wayland-tbm-client.c
src/wayland-tbm-server.c
src/wayland-tbm-server.h

index 64e426b1e24f988224e98f380734c14ec0ad4c49..87ffd4d150e4332cceef46df1a8eb76f1ab25d71 100644 (file)
 
     <event name="flush"/>
 
+    <event name="free_flush"/>
+
     <event name="buffer_usable">
       <arg name="id" type="object" interface="wl_buffer"/>
     </event>
index 031e160a51b3fba7aa575d769a3d7cdd938b6c06..4af8bf3bdc6ed216bb92b7487ea6874a64e7dae7 100644 (file)
@@ -1095,6 +1095,32 @@ handle_tbm_queue_deactive(void *data,
        }
 }
 
+static void
+handle_tbm_queue_free_flush(void *data,
+                      struct wl_tbm_queue *wl_tbm_queue)
+{
+       struct wayland_tbm_surface_queue *queue_info =
+                               (struct wayland_tbm_surface_queue *)data;
+       tbm_surface_queue_h tbm_queue = NULL;
+
+       WL_TBM_LOG("free flush surface_queue:%p", queue_info->tbm_queue);
+
+       pthread_mutex_lock(&queue_info->lock);
+
+       if (queue_info->is_active && queue_info->active_flush) {
+               WL_TBM_LOG_E("warning: Cannot flush the tbm_surface_queueu. The queue is activate.");
+               pthread_mutex_unlock(&queue_info->lock);
+               return;
+       }
+
+       tbm_queue = queue_info->tbm_queue;
+
+       pthread_mutex_unlock(&queue_info->lock);
+
+       /* flush the allocated surfaces at the client */
+       tbm_surface_queue_free_flush(tbm_queue);
+}
+
 static void
 handle_tbm_queue_flush(void *data,
                       struct wl_tbm_queue *wl_tbm_queue)
@@ -1159,6 +1185,7 @@ const struct wl_tbm_queue_listener wl_tbm_queue_listener = {
        handle_tbm_queue_active,
        handle_tbm_queue_deactive,
        handle_tbm_queue_flush,
+       handle_tbm_queue_free_flush,
        handle_tbm_queue_buffer_usable,
 };
 
index c41f3484091889652cc02ef9b683a96982c9737a..3c1c59e6a9de5e7c8e7c28cb8c6b3c4d655cf833 100644 (file)
@@ -1160,6 +1160,20 @@ wayland_tbm_server_client_queue_flush(struct wayland_tbm_client_queue *cqueue)
        wl_tbm_queue_send_flush(cqueue->wl_tbm_queue);
 }
 
+void
+wayland_tbm_server_client_queue_free_flush(struct wayland_tbm_client_queue *cqueue)
+{
+       WL_TBM_RETURN_IF_FAIL(cqueue != NULL);
+       WL_TBM_RETURN_IF_FAIL(cqueue->wl_tbm_queue != NULL);
+
+#ifdef DEBUG_TRACE
+       WL_TBM_TRACE("    pid:%d", cqueue->pid);
+#endif
+       WL_TBM_LOG("send free flush queue pid:%d", cqueue->pid);
+
+       wl_tbm_queue_send_free_flush(cqueue->wl_tbm_queue);
+}
+
 void
 wayland_tbm_server_increase_buffer_sync_timeline(struct wayland_tbm_server *tbm_srv,
                               struct wl_resource *wl_buffer, unsigned int count)
index 6deea204454c553061201b85c28272f069975cce..df3cc25b7d88fa5d88f5e6fed472f521ce080868 100644 (file)
@@ -110,6 +110,9 @@ wayland_tbm_server_client_queue_export_buffer2(struct wayland_tbm_client_queue *
 void
 wayland_tbm_server_client_queue_flush(struct wayland_tbm_client_queue *cqueue);
 
+void
+wayland_tbm_server_client_queue_free_flush(struct wayland_tbm_client_queue *cqueue);
+
 void
 wayland_tbm_server_increase_buffer_sync_timeline(struct wayland_tbm_server *tbm_srv,
                               struct wl_resource *wl_buffer, unsigned int count);