From: Joonbum Ko Date: Mon, 1 Jun 2020 09:55:36 +0000 (+0900) Subject: Implemented buffer release method with zwp_linux_buffer_release_v1 X-Git-Tag: submit/tizen/20200810.080631~9 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7e059c8c32729fc7a13e774a8f6923fe28907cd9;p=platform%2Fcore%2Fuifw%2Flibtpl-egl.git Implemented buffer release method with zwp_linux_buffer_release_v1 zwp_linux_buffer_release_v1 This object is instantiated in response to a zwp_linux_surface_synchronization_v1.get_release request. It provides an alternative to wl_buffer.release events, providing a unique release from a single wl_surface.commit request. The release event also supports explicit synchronization, providing a fence FD for the client to synchronize against. Exactly one event, either a fenced_release or an immediate_release, will be emitted for the wl_surface.commit request. The compositor can choose release by release which event it uses. This event does not replace wl_buffer.release events; servers are still required to send those events. Once a buffer release object has delivered a 'fenced_release' or an 'immediate_release' event it is automatically destroyed. Change-Id: I092fa619679d9d38bcedb7d2de324ddacceb5a2b Signed-off-by: Joonbum Ko --- diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 884ad77..3b4fdce 100755 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -197,6 +197,13 @@ struct _twe_wl_buffer_info { /* for wayland_tbm_client_set_buffer_serial */ unsigned int serial; + + /* to get release event via zwp_linux_buffer_release_v1 */ + struct zwp_linux_buffer_release_v1 *buffer_release; + + /* each buffers own its release_fence_fd, until it passes ownership + * to it to EGL (see wait_for_buffer_release_fence) */ + int release_fence_fd; }; struct _twe_fence_wait_source { @@ -1632,6 +1639,117 @@ static const struct wl_buffer_listener wl_buffer_release_listener = { (void *)__cb_buffer_release_callback, }; +static void +__cb_buffer_fenced_release(void *data, + struct zwp_linux_buffer_release_v1 *release, int32_t fence) +{ + twe_wl_buffer_info *buf_info = NULL; + tbm_surface_h tbm_surface = (tbm_surface_h)data; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO, + (void **)&buf_info); + + if (buf_info && buf_info->need_to_release) { + twe_wl_surf_source *surf_source = buf_info->surf_source; + tbm_surface_queue_error_e tsq_err; + + if (surf_source->committed_buffers) { + g_mutex_lock(&surf_source->surf_mutex); + __tpl_list_remove_data(surf_source->committed_buffers, + (void *)tbm_surface, + TPL_FIRST, NULL); + g_mutex_unlock(&surf_source->surf_mutex); + } + + buf_info->need_to_release = TPL_FALSE; + + zwp_linux_buffer_release_v1_destroy(buf_info->buffer_release); + buf_info->buffer_release = NULL; + buf_info->release_fence_fd = fence; + + TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)", + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)), + fence); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ FENCED_RELEASE] BO(%d)", + tbm_bo_export(tbm_surface_internal_get_bo( + tbm_surface, 0))); + + TPL_LOG_T(BACKEND, + "[FENCED_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)", + buf_info->wl_buffer, tbm_surface, + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)), + fence); + + tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); + } + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +static void +__cb_buffer_immediate_release(void *data, + struct zwp_linux_buffer_release_v1 *release) +{ + twe_wl_buffer_info *buf_info = NULL; + tbm_surface_h tbm_surface = (tbm_surface_h)data; + + if (tbm_surface_internal_is_valid(tbm_surface)) { + tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO, + (void **)&buf_info); + + if (buf_info && buf_info->need_to_release) { + twe_wl_surf_source *surf_source = buf_info->surf_source; + tbm_surface_queue_error_e tsq_err; + + if (surf_source->committed_buffers) { + g_mutex_lock(&surf_source->surf_mutex); + __tpl_list_remove_data(surf_source->committed_buffers, + (void *)tbm_surface, + TPL_FIRST, NULL); + g_mutex_unlock(&surf_source->surf_mutex); + } + + buf_info->need_to_release = TPL_FALSE; + + zwp_linux_buffer_release_v1_destroy(buf_info->buffer_release); + buf_info->buffer_release = NULL; + buf_info->release_fence_fd = -1; + + TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)", + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ IMMEDIATE_RELEASE] BO(%d)", + tbm_bo_export(tbm_surface_internal_get_bo( + tbm_surface, 0))); + + TPL_LOG_T(BACKEND, + "[IMMEDIATE_RELEASE] wl_buffer(%p) tbm_surface(%p) bo(%d)", + buf_info->wl_buffer, tbm_surface, + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + + tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, + tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err); + + tbm_surface_internal_unref(tbm_surface); + } + } else { + TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface); + } +} + +static const struct zwp_linux_buffer_release_v1_listener explicit_sync_release_listner = { + __cb_buffer_fenced_release, + __cb_buffer_immediate_release, +}; + static void _twe_surface_set_wl_buffer_info(twe_wl_surf_source *surf_source, tbm_surface_h tbm_surface) @@ -1693,6 +1811,14 @@ _twe_surface_set_wl_buffer_info(twe_wl_surf_source *surf_source, g_mutex_unlock(&surf_source->surf_mutex); } + if (surf_source->disp_source->use_explicit_sync && + surf_source->use_surface_sync) { + buf_info->buffer_release = + zwp_linux_surface_synchronization_v1_get_release(surf_source->surface_sync); + zwp_linux_buffer_release_v1_add_listener( + buf_info->buffer_release, &explicit_sync_release_listner, tbm_surface); + } + TRACE_MARK("[SET_BUFFER_INFO] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); @@ -1785,8 +1911,16 @@ _twe_surface_set_wl_buffer_info(twe_wl_surf_source *surf_source, buf_info->sync_timeline); } - wl_buffer_add_listener((void *)buf_info->wl_buffer, - &wl_buffer_release_listener, tbm_surface); + if (surf_source->disp_source->use_explicit_sync && + surf_source->use_surface_sync) { + buf_info->buffer_release = + zwp_linux_surface_synchronization_v1_get_release(surf_source->surface_sync); + zwp_linux_buffer_release_v1_add_listener( + buf_info->buffer_release, &explicit_sync_release_listner, tbm_surface); + } else { + wl_buffer_add_listener((void *)buf_info->wl_buffer, + &wl_buffer_release_listener, tbm_surface); + } tbm_surface_internal_add_user_data(tbm_surface, KEY_BUFFER_INFO, (tbm_data_free)__cb_twe_buffer_free_callback); @@ -2785,6 +2919,14 @@ _twe_thread_wl_surf_source_destroy(void *source) surf_source->in_use_buffers = NULL; } + if (surf_source->surface_sync) { + TPL_LOG_T(BACKEND, + "[SURFACE_SYNC FINI] twe_wl_surf_source(%p) surface_sync(%p)", + surf_source, surf_source->surface_sync); + zwp_linux_surface_synchronization_v1_destroy(surf_source->surface_sync); + surf_source->surface_sync = NULL; + } + if (surf_source->committed_buffers) { while (!__tpl_list_is_empty(surf_source->committed_buffers)) { tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; @@ -2853,14 +2995,6 @@ _twe_thread_wl_surf_source_destroy(void *source) surf_source->vblank = NULL; } - if (surf_source->surface_sync) { - TPL_LOG_T(BACKEND, - "[SURFACE_SYNC FINI] twe_wl_surf_source(%p) surface_sync(%p)", - surf_source, surf_source->surface_sync); - zwp_linux_surface_synchronization_v1_destroy(surf_source->surface_sync); - surf_source->surface_sync = NULL; - } - surf_source->cb_data = NULL; surf_source->rotate_cb = NULL;