From 9f547b050595c438840fde344664fb67cb11486f Mon Sep 17 00:00:00 2001 From: "deasung.kim" Date: Wed, 24 Aug 2016 17:14:20 +0900 Subject: [PATCH] libtpl-egl: change the usage of libtbm_sync api remove tbm_sync objects and use fd tbm sync related api change reference: https://review.tizen.org/gerrit/#/c/85169/ submitted Change-Id: Ia291424c127487bed3af99759b6f37e9bd76c27a --- src/tpl.h | 4 +-- src/tpl_gbm.c | 11 +++++--- src/tpl_internal.h | 4 +-- src/tpl_surface.c | 8 +++--- src/tpl_tbm.c | 11 +++++--- src/tpl_wayland_egl.c | 11 +++++--- src/tpl_wayland_vk_wsi.c | 66 ++++++++++++++++++++---------------------------- 7 files changed, 59 insertions(+), 56 deletions(-) diff --git a/src/tpl.h b/src/tpl.h index 1196e26..585165d 100644 --- a/src/tpl.h +++ b/src/tpl.h @@ -477,7 +477,7 @@ tpl_surface_dequeue_buffer(tpl_surface_t *surface); */ tbm_surface_h tpl_surface_dequeue_buffer_with_sync(tpl_surface_t *surface, uint64_t timeout_ns, - tbm_sync_fence_h *sync_fence); + tbm_fd *sync_fence); /** * Post a given tbm_surface. @@ -553,7 +553,7 @@ tpl_result_t tpl_surface_enqueue_buffer_with_damage_and_sync(tpl_surface_t *surface, tbm_surface_h tbm_surface, int num_rects, const int *rects, - tbm_sync_fence_h sync_fence); + tbm_fd sync_fence); /** * Set frame interval of the given TPL surface. * diff --git a/src/tpl_gbm.c b/src/tpl_gbm.c index 7465f5d..02242cb 100644 --- a/src/tpl_gbm.c +++ b/src/tpl_gbm.c @@ -335,7 +335,7 @@ __tpl_gbm_surface_fini(tpl_surface_t *surface) static tpl_result_t __tpl_gbm_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface, int num_rects, - const int *rects, tbm_sync_fence_h sync_fence) + const int *rects, tbm_fd sync_fence) { tbm_bo bo; @@ -373,6 +373,11 @@ __tpl_gbm_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } + if (sync_fence != -1) { + tbm_sync_fence_wait(sync_fence, -1); + close(sync_fence); + } + if (tbm_surface_queue_enqueue(gbm_surface->tbm_queue, tbm_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("tbm_surface_queue_enqueue failed. tbm_surface_queue(%p) tbm_surface(%p)", @@ -397,7 +402,7 @@ __tpl_gbm_surface_validate(tpl_surface_t *surface) static tbm_surface_h __tpl_gbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, - tbm_sync_fence_h *sync_fence) + tbm_fd *sync_fence) { tbm_bo bo; tbm_surface_h tbm_surface = NULL; @@ -412,7 +417,7 @@ __tpl_gbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_ASSERT(surface->display->native_handle); if (sync_fence) - *sync_fence = NULL; + *sync_fence = -1; gbm_surface = (tpl_gbm_surface_t *)surface->backend.data; diff --git a/src/tpl_internal.h b/src/tpl_internal.h index adcb48c..f9d0f71 100644 --- a/src/tpl_internal.h +++ b/src/tpl_internal.h @@ -60,11 +60,11 @@ struct _tpl_surface_backend { void (*fini)(tpl_surface_t *surface); tpl_bool_t (*validate)(tpl_surface_t *surface); tbm_surface_h (*dequeue_buffer)(tpl_surface_t *surface, uint64_t timeout_ns, - tbm_sync_fence_h *sync_fence); + tbm_fd *sync_fence); tpl_result_t (*enqueue_buffer)(tpl_surface_t *surface, tbm_surface_h tbm_surface, int num_rects, const int *rects, - tbm_sync_fence_h sync_fence); + tbm_fd sync_fence); tpl_result_t (*get_swapchain_buffers)(tpl_surface_t *surface, tbm_surface_h **buffers, int *buffer_count); diff --git a/src/tpl_surface.c b/src/tpl_surface.c index a5d6c7d..c9b2c88 100644 --- a/src/tpl_surface.c +++ b/src/tpl_surface.c @@ -186,7 +186,7 @@ tpl_surface_dequeue_buffer(tpl_surface_t *surface) tbm_surface_h tpl_surface_dequeue_buffer_with_sync(tpl_surface_t *surface, uint64_t timeout_ns, - tbm_sync_fence_h *sync_fence) + tbm_fd *sync_fence) { TPL_ASSERT(surface); @@ -230,7 +230,7 @@ tpl_result_t tpl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface) { return tpl_surface_enqueue_buffer_with_damage_and_sync(surface, tbm_surface, - 0, NULL, NULL); + 0, NULL, -1); } tpl_result_t @@ -239,14 +239,14 @@ tpl_surface_enqueue_buffer_with_damage(tpl_surface_t *surface, int num_rects, const int *rects) { return tpl_surface_enqueue_buffer_with_damage_and_sync(surface, tbm_surface, - num_rects, rects, NULL); + num_rects, rects, -1); } tpl_result_t tpl_surface_enqueue_buffer_with_damage_and_sync(tpl_surface_t *surface, tbm_surface_h tbm_surface, int num_rects, const int *rects, - tbm_sync_fence_h sync_fence) + tbm_fd sync_fence) { tpl_result_t ret = TPL_ERROR_NONE; diff --git a/src/tpl_tbm.c b/src/tpl_tbm.c index 58e55cf..1dd2207 100644 --- a/src/tpl_tbm.c +++ b/src/tpl_tbm.c @@ -235,7 +235,7 @@ __tpl_tbm_surface_fini(tpl_surface_t *surface) static tpl_result_t __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface, int num_rects, - const int *rects, tbm_sync_fence_h sync_fence) + const int *rects, tbm_fd sync_fence) { TPL_ASSERT(surface); TPL_ASSERT(surface->display); @@ -264,6 +264,11 @@ __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } + if (sync_fence != -1) { + tbm_sync_fence_wait(sync_fence, -1); + close(sync_fence); + } + if (tbm_surface_queue_enqueue(tbm_queue, tbm_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("tbm_surface_queue_enqueue failed. tbm_queue(%p) tbm_surface(%p)", @@ -284,7 +289,7 @@ __tpl_tbm_surface_validate(tpl_surface_t *surface) static tbm_surface_h __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, - tbm_sync_fence_h *sync_fence) + tbm_fd *sync_fence) { tbm_surface_h tbm_surface = NULL; tbm_surface_queue_h tbm_queue = NULL; @@ -296,7 +301,7 @@ __tpl_tbm_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, TPL_ASSERT(surface->display->native_handle); if (sync_fence) - *sync_fence = NULL; + *sync_fence = -1; tbm_queue = (tbm_surface_queue_h)surface->native_handle; diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index 3dd502e..25b72e7 100644 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -664,7 +664,7 @@ __tpl_wayland_egl_surface_commit(tpl_surface_t *surface, static tpl_result_t __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface, - int num_rects, const int *rects, tbm_sync_fence_h sync_fence) + int num_rects, const int *rects, tbm_fd sync_fence) { TPL_ASSERT(surface); TPL_ASSERT(surface->display); @@ -695,6 +695,11 @@ __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface, __tpl_wayland_egl_surface_wait_vblank(surface); } + if (sync_fence != -1) { + tbm_sync_fence_wait(sync_fence, -1); + close(sync_fence); + } + /* Stop tracking of this render_done tbm_surface. */ __tpl_list_remove_data(wayland_egl_surface->dequeued_buffers, (void *)tbm_surface, TPL_FIRST, NULL); @@ -870,7 +875,7 @@ __tpl_wayland_egl_surface_wait_dequeuable(tpl_surface_t *surface) static tbm_surface_h __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, - tbm_sync_fence_h *sync_fence) + tbm_fd *sync_fence) { TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); @@ -887,7 +892,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou tbm_surface_queue_error_e tsq_err = 0; if (sync_fence) - *sync_fence = NULL; + *sync_fence = -1; /* Check whether the surface was resized by wayland_egl */ if (wayland_egl_surface->resized == TPL_TRUE) { diff --git a/src/tpl_wayland_vk_wsi.c b/src/tpl_wayland_vk_wsi.c index 1a61cdd..fd38861 100644 --- a/src/tpl_wayland_vk_wsi.c +++ b/src/tpl_wayland_vk_wsi.c @@ -59,11 +59,11 @@ struct _tpl_wayland_vk_wsi_buffer { tpl_display_t *display; tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface; struct wl_proxy *wl_proxy; - tbm_sync_timeline_h sync_timeline; + tbm_fd sync_timeline; unsigned int sync_timestamp; #if USE_WORKER_THREAD == 1 - tbm_sync_fence_h wait_sync; + tbm_fd wait_sync; #endif }; @@ -368,7 +368,7 @@ static tpl_result_t __tpl_wayland_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface, int num_rects, const int *rects, - tbm_sync_fence_h sync_fence) + tbm_fd sync_fence) { TPL_ASSERT(surface); @@ -414,13 +414,12 @@ __tpl_wayland_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, } #if USE_WORKER_THREAD == 0 - if (sync_fence != NULL) { + if (sync_fence != -1) { /* non worker thread mode */ - tbm_sync_error_e sync_err; - sync_err = tbm_sync_fence_wait(sync_fence, -1); - if (sync_err != TBM_SYNC_ERROR_NONE) - TPL_ERR("Failed to wait sync. | error: %d", errno); - tbm_sync_fence_destroy(sync_fence); + /* TODO: set max wait time */ + if (tbm_sync_fence_wait(sync_fence, -1) != 1) + TPL_ERR("Failed to wait sync. | error: %d(%s)", errno, strerror(errno)); + close(sync_fence); } tsq_err = tbm_surface_queue_acquire(wayland_vk_wsi_surface->tbm_queue, @@ -464,7 +463,7 @@ __tpl_wayland_vk_wsi_surface_validate(tpl_surface_t *surface) static tbm_surface_h __tpl_wayland_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, - uint64_t timeout_ns, tbm_sync_fence_h *sync_fence) + uint64_t timeout_ns, tbm_fd *sync_fence) { TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); @@ -478,10 +477,9 @@ __tpl_wayland_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; struct wl_proxy *wl_proxy = NULL; tbm_surface_queue_error_e tsq_err = 0; - tbm_sync_error_e sync_err; if (sync_fence) - *sync_fence = NULL; + *sync_fence = -1; #if USE_WORKER_THREAD == 0 TPL_OBJECT_UNLOCK(surface); @@ -553,14 +551,11 @@ __tpl_wayland_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); *sync_fence = tbm_sync_fence_create(wayland_vk_wsi_buffer->sync_timeline, name, - wayland_vk_wsi_buffer->sync_timestamp, - &sync_err); - if (*sync_fence == NULL || sync_err != TBM_SYNC_ERROR_NONE) { - TPL_ERR("Failed to create TBM sync fence!"); - /* ??? destroy and return NULL */ - } + wayland_vk_wsi_buffer->sync_timestamp); + if (*sync_fence == -1) + TPL_ERR("Failed to create TBM sync fence: %d(%s)", errno, strerror(errno)); } else { - *sync_fence = NULL; + *sync_fence = -1; } } return tbm_surface; @@ -585,10 +580,10 @@ __tpl_wayland_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, /* can change signaled sync */ if (sync_fence) - *sync_fence = NULL; - wayland_vk_wsi_buffer->sync_timeline = tbm_sync_timeline_create(&sync_err); - if (wayland_vk_wsi_buffer->sync_timeline == NULL || sync_err != TBM_SYNC_ERROR_NONE) { - TPL_ERR("Failed to create TBM sync timeline!"); + *sync_fence = -1; + wayland_vk_wsi_buffer->sync_timeline = tbm_sync_timeline_create(); + if (wayland_vk_wsi_buffer->sync_timeline == -1) { + TPL_ERR("Failed to create TBM sync timeline: %d(%s)", errno, strerror(errno)); wl_proxy_destroy(wl_proxy); tbm_surface_internal_unref(tbm_surface); free(wayland_vk_wsi_buffer); @@ -788,8 +783,8 @@ __tpl_wayland_vk_wsi_buffer_free(tpl_wayland_vk_wsi_buffer_t wayland_tbm_client_destroy_buffer(wayland_vk_wsi_display->wl_tbm_client, (void *)wayland_vk_wsi_buffer->wl_proxy); - if (wayland_vk_wsi_buffer->sync_timeline != NULL) - tbm_sync_timeline_destroy(wayland_vk_wsi_buffer->sync_timeline); + if (wayland_vk_wsi_buffer->sync_timeline != -1) + close(wayland_vk_wsi_buffer->sync_timeline); free(wayland_vk_wsi_buffer); } @@ -878,7 +873,6 @@ static const struct wl_callback_listener frame_listener = { static void __cb_client_buffer_release_callback(void *data, struct wl_proxy *proxy) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; tpl_wayland_vk_wsi_buffer_t *wayland_vk_wsi_buffer = NULL; tbm_surface_h tbm_surface = NULL; @@ -889,11 +883,8 @@ __cb_client_buffer_release_callback(void *data, struct wl_proxy *proxy) wayland_vk_wsi_buffer = __tpl_wayland_vk_wsi_get_wayland_buffer_from_tbm_surface(tbm_surface); - if (wayland_vk_wsi_buffer) { - wayland_vk_wsi_surface = wayland_vk_wsi_buffer->wayland_vk_wsi_surface; - + if (wayland_vk_wsi_buffer) tbm_surface_internal_unref(tbm_surface); - } } static const struct wl_buffer_listener buffer_release_listener = { @@ -934,9 +925,8 @@ __tpl_wayland_vk_wsi_worker_thread_loop(void *arg) ret = pthread_cond_timedwait(&wayland_vk_wsi_surface->dirty_queue_cond, &wayland_vk_wsi_surface->dirty_queue_mutex, &abs_time); - if (ret == ETIMEDOUT) { + if (ret == ETIMEDOUT) timeout = TPL_TRUE; - } } if (timeout) { pthread_mutex_unlock(&wayland_vk_wsi_surface->dirty_queue_mutex); @@ -953,13 +943,11 @@ __tpl_wayland_vk_wsi_worker_thread_loop(void *arg) wayland_vk_wsi_buffer = __tpl_wayland_vk_wsi_get_wayland_buffer_from_tbm_surface(tbm_surface); TPL_ASSERT(wayland_vk_wsi_buffer); - if (wayland_vk_wsi_buffer->wait_sync != NULL) { - tbm_sync_error_e sync_err; - sync_err = tbm_sync_fence_wait(wayland_vk_wsi_buffer->wait_sync, -1); - if (sync_err != TBM_SYNC_ERROR_NONE) - TPL_ERR("Failed to wait sync. | error: %d", errno); - tbm_sync_fence_destroy(wayland_vk_wsi_buffer->wait_sync); - wayland_vk_wsi_buffer->wait_sync = NULL; + if (wayland_vk_wsi_buffer->wait_sync != -1) { + if (tbm_sync_fence_wait(wayland_vk_wsi_buffer->wait_sync, -1) != 1) + TPL_ERR("Failed to wait sync. | error: %d(%s)", errno, strerror(errno)); + close(wayland_vk_wsi_buffer->wait_sync); + wayland_vk_wsi_buffer->wait_sync = -1; } __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); -- 2.7.4