Added an internal API to get release fence from buf_info.
authorJoonbum Ko <joonbum.ko@samsung.com>
Mon, 1 Jun 2020 11:11:29 +0000 (20:11 +0900)
committerSooChan Lim <sc1.lim@samsung.com>
Mon, 10 Aug 2020 08:15:11 +0000 (17:15 +0900)
Change-Id: I56e536cea0f56368e8519d79082327fd8f052242
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wayland_egl_thread.c
src/tpl_wayland_egl_thread.h
src/tpl_wl_egl_thread.c

index 3b4fdce..291afa0 100755 (executable)
@@ -1897,6 +1897,7 @@ _twe_surface_set_wl_buffer_info(twe_wl_surf_source *surf_source,
        buf_info->sync_timeline = -1;
        buf_info->is_vk_image = surf_source->disp_source->is_vulkan_dpy;
        buf_info->commit_sync_ts_backup = ++surf_source->commit_sync_timestamp;
+       buf_info->release_fence_fd = -1;
 
        if (buf_info->is_vk_image) {
                buf_info->sync_timeline = tbm_sync_timeline_create();
@@ -3730,6 +3731,27 @@ twe_surface_create_sync_fd(tbm_surface_h tbm_surface)
        return sync_fd;
 }
 
+tbm_fd
+twe_surface_get_buffer_release_fence_fd(twe_surface_h twe_surface,
+                                                                               tbm_surface_h tbm_surface)
+{
+       twe_wl_buffer_info *buf_info = NULL;
+       twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+       tbm_fd release_fence_fd = -1;
+
+       tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO,
+                                                                          (void **)&buf_info);
+       if (surf_source->use_surface_sync &&
+               surf_source->disp_source->use_explicit_sync &&
+               buf_info) {
+               release_fence_fd = buf_info->release_fence_fd;
+               TPL_DEBUG("surf_source(%p) buf_info(%p) release_fence_fd(%d)",
+                                 surf_source, buf_info, release_fence_fd);
+       }
+
+       return release_fence_fd;
+}
+
 tpl_result_t
 twe_surface_wait_dequeueable(twe_surface_h twe_surface, uint64_t timeout_ns)
 {
index 9252fa9..208e243 100755 (executable)
@@ -91,6 +91,10 @@ twe_surface_set_sync_fd(twe_surface_h twe_surface,
 tbm_fd
 twe_surface_create_sync_fd(tbm_surface_h tbm_surface);
 
+tbm_fd
+twe_surface_get_buffer_release_fence_fd(twe_surface_h twe_surface,
+                                                                               tbm_surface_h tbm_surface);
+
 tpl_result_t
 twe_surface_wait_dequeueable(twe_surface_h twe_surface, uint64_t timeout_ns);
 
index 1915fc9..67ed2eb 100755 (executable)
@@ -670,7 +670,7 @@ __tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
 
 static tbm_surface_h
 __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
-                                                                                tbm_fd *sync_fence)
+                                                                       tbm_fd *sync_fence)
 {
        TPL_ASSERT(surface);
        TPL_ASSERT(surface->backend.data);
@@ -688,9 +688,6 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
        int bo_name = 0;
        tpl_result_t lock_ret = TPL_FALSE;
 
-       if (sync_fence)
-               *sync_fence = -1;
-
        TPL_OBJECT_UNLOCK(surface);
        tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
                                wayland_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
@@ -777,6 +774,15 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
        }
 
        tbm_surface_internal_ref(tbm_surface);
+
+       /* If twe_surface_get_buffer_release_fence_fd return -1,
+        * the tbm_surface can be used immediately.
+        * If not, user(EGL) have to wait until signaled. */
+       if (sync_fence) {
+               *sync_fence = twe_surface_get_buffer_release_fence_fd(
+                                                       wayland_egl_surface->twe_surface, tbm_surface);
+       }
+
        bo_name = tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
 
        if (surface->is_frontbuffer_mode && is_activated)