tpl_wayland_egl_thread: Added an internal function to wait dequeueable until given... 70/161970/2
authorjoonbum.ko <joonbum.ko@samsung.com>
Thu, 19 Oct 2017 06:54:45 +0000 (15:54 +0900)
committerjoonbum.ko <joonbum.ko@samsung.com>
Tue, 28 Nov 2017 10:47:59 +0000 (19:47 +0900)
 - Added TPL_ERROR_TIMED_OUT to tpl_result_t.
 - Added free_queue_mutex and free_queue_cond to surf_source.

Change-Id: I8205d888570c58a22987b2d8ab9a36c1fdd10c6b
Signed-off-by: joonbum.ko <joonbum.ko@samsung.com>
src/tpl.h
src/tpl_wayland_egl_thread.c
src/tpl_wayland_egl_thread.h

index b8277ba..ff20ff4 100644 (file)
--- a/src/tpl.h
+++ b/src/tpl.h
@@ -207,7 +207,8 @@ typedef enum {
        TPL_ERROR_NONE = 0, /* Successfull */
        TPL_ERROR_INVALID_PARAMETER, /* Invalid parmeter */
        TPL_ERROR_INVALID_OPERATION, /* Invalid operation */
-       TPL_ERROR_OUT_OF_MEMORY /* Out of memory */
+       TPL_ERROR_OUT_OF_MEMORY, /* Out of memory */
+       TPL_ERROR_TIME_OUT /* Time out error */
 } tpl_result_t;
 
 /**
index 283f720..347c9ba 100644 (file)
@@ -112,6 +112,9 @@ struct _twe_wl_surf_source {
        tbm_surface_queue_h tbm_queue;
        twe_wl_disp_source *disp_source;
        twe_del_source *surf_del_source;
+
+       GMutex free_queue_mutex;
+       GCond free_queue_cond;
 };
 
 struct _twe_wl_buffer_info {
@@ -1285,6 +1288,8 @@ __cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer)
                        twe_wl_surf_source *surf_source = buf_info->surf_source;
                        tbm_surface_queue_error_e tsq_err;
 
+                       g_mutex_lock(&surf_source->free_queue_mutex);
+
                        tsq_err = tbm_surface_queue_release(surf_source->tbm_queue,
                                                                                                tbm_surface);
                        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
@@ -1303,6 +1308,9 @@ __cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer)
                                          tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)));
                        tbm_surface_internal_unref(tbm_surface);
 
+                       g_cond_signal(&surf_source->free_queue_cond);
+                       g_mutex_unlock(&surf_source->free_queue_mutex);
+
                        return;
                }
        } else {
@@ -2104,7 +2112,9 @@ _twe_thread_wl_surf_source_destroy(void *source)
                surf_source->committed_buffers = NULL;
        }
 
-       _twe_surface_buffer_flusher_fini(surf_source);
+       if (!disp_source->is_vulkan_dpy) {
+               _twe_surface_buffer_flusher_fini(surf_source);
+       }
 
        if (surf_source->tbm_queue) {
                tbm_surface_queue_destroy(surf_source->tbm_queue);
@@ -2133,6 +2143,9 @@ _twe_thread_wl_surf_source_destroy(void *source)
 
        TPL_OBJECT_UNLOCK(&disp_source->obj);
 
+       g_cond_clear(&surf_source->free_queue_cond);
+       g_mutex_clear(&surf_source->free_queue_mutex);
+
        g_source_remove_unix_fd(&surf_source->gsource, surf_source->tag);
        g_source_destroy(&surf_source->gsource);
        g_source_unref(&surf_source->gsource);
@@ -2239,6 +2252,9 @@ twe_surface_add(twe_thread* thread,
 
        g_source_attach(&source->gsource, g_main_loop_get_context(ctx->twe_loop));
 
+       g_mutex_init(&source->free_queue_mutex);
+       g_cond_init(&source->free_queue_cond);
+
        TPL_LOG_T("WL_EGL",
                          "gsource(%p) native_handle(%p) wl_surface(%p) event_fd(%d)",
                          source, native_handle, source->surf, source->event_fd);
@@ -2595,6 +2611,47 @@ twe_surface_set_sync_fd(tbm_surface_h tbm_surface, tbm_fd sync_fd)
 }
 
 tpl_result_t
+twe_surface_wait_dequeueable(twe_surface_h twe_surface, uint64_t timeout_ns)
+{
+       twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+       gint64 end_time;
+
+       if (!surf_source || g_source_is_destroyed(&surf_source->gsource)) {
+               TPL_ERR("Invalid parameter. surf_source(%p)", surf_source);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       /* wait until dequeueable */
+       g_mutex_lock(&surf_source->free_queue_mutex);
+
+       if (timeout_ns != UINT64_MAX)
+               end_time = g_get_monotonic_time() + (timeout_ns / 1000);
+
+       while (!tbm_surface_queue_can_dequeue(surf_source->tbm_queue, 0)) {
+               gboolean ret = FALSE;
+
+               if (timeout_ns != UINT64_MAX) {
+                       ret = g_cond_wait_until(&surf_source->free_queue_cond,
+                                                                       &surf_source->free_queue_mutex,
+                                                                       end_time);
+                       if (ret == FALSE) {
+                               TPL_WARN("time out to wait dequeueable.");
+                               g_mutex_unlock(&surf_source->free_queue_mutex);
+                               return TPL_ERROR_TIME_OUT;
+                       }
+               } else {
+                       g_cond_wait(&surf_source->free_queue_cond,
+                                               &surf_source->free_queue_mutex);
+               }
+       }
+
+       g_mutex_unlock(&surf_source->free_queue_mutex);
+
+       return TPL_ERROR_NONE;
+}
+
+
+tpl_result_t
 twe_check_native_handle_is_wl_display(tpl_handle_t display)
 {
        struct wl_interface *wl_egl_native_dpy = *(void **) display;
index 631ac8c..e21e6bd 100644 (file)
@@ -87,6 +87,9 @@ tpl_result_t
 twe_surface_set_sync_fd(tbm_surface_h tbm_surface, tbm_fd wait_fd);
 
 tpl_result_t
+twe_surface_wait_dequeueable(twe_surface_h twe_surface, uint64_t timeout_ns);
+
+tpl_result_t
 twe_check_native_handle_is_wl_display(tpl_handle_t display);
 
 tpl_result_t