From 257c88f515a54cc190f32accd2c78c580a192ed6 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Mon, 26 Sep 2016 10:49:06 +0900 Subject: [PATCH 01/16] tpl_wayland_egl: Fixed some bugs related to wl_display_dispatch_queue_pending. Temporal fix : Change of tbm_surface's reset state. - Currently, tbm_surface's reset state is changed by tpl_surface_validate() and tbm_surface_queue event handler. When we have good solution of changing of tbm_surface's reset state, tpl_surface_validate()'s wayland egl backend should remove changing of tbm_surface's reset state. - Fixed some bugs about ACTIVE/DEACTIVE(queue_flush) events and finally applied wl_display_dispatch_queue_pending(). - Related with below commit. 019e7ff6cade7d660524cb23f7b7c442011a07dd tpl_wayland_egl: dispatching before checking whether can dequeue or not. - The event processing can be delayed in case there are idle buffer in free queue. So, in dequeueable procedure, it has to call wl_display_dispatch_queue_pending before checking whether tbm_surface_queue can dequeue or not. It can process that the events related to 'queue flush' more rapidly. - In future commit, tpl_surface_validate will be modified to suitable one which is related to reset flag. Change-Id: I0584211aed14ef2dd22531074c3b50e8463dd95f Signed-off-by: joonbum.ko --- src/tpl_wayland_egl.c | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index 8b969f2..37c71fd 100644 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -849,17 +849,30 @@ __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface, static tpl_bool_t __tpl_wayland_egl_surface_validate(tpl_surface_t *surface) { + tpl_bool_t retval = TPL_TRUE; + TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); tpl_wayland_egl_surface_t *wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; - if (wayland_egl_surface->resized || - wayland_egl_surface->reset) - return TPL_FALSE; + retval = !(wayland_egl_surface->reset || wayland_egl_surface->resized); + + /* TODO */ + /* Be planning to revise below line in future commits. + - It is under development so that EGL can realize tbm_surface_queue_reset + immediately. + */ + + /* The tbm_surface_queue_flush (which is occured by ACTIVE, DEACTIVE events) + * only occured in __tpl_wayland_egl_surface_wait_dequeable. + * After tpl_surface_dequeue_buffer(), tpl_surface has to inform to frontend + * surface was reset. (retval) + * The reset flag will be set to TPL_FALSE only here after inform it. */ + wayland_egl_surface->reset = TPL_FALSE; - return TPL_TRUE; + return retval; } static tpl_result_t @@ -873,6 +886,9 @@ __tpl_wayland_egl_surface_wait_dequeuable(tpl_surface_t *surface) surface->display->backend.data; wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; + wl_display_dispatch_queue_pending(wayland_egl_display->wl_dpy, + wayland_egl_display->wl_tbm_event_queue); + if (tbm_surface_queue_can_dequeue(wayland_egl_surface->tbm_queue, 0)) return TPL_ERROR_NONE; @@ -937,12 +953,12 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, format = tbm_surface_queue_get_format(wayland_egl_surface->tbm_queue); tbm_surface_queue_reset(wayland_egl_surface->tbm_queue, width, height, format); - wayland_egl_surface->resized = TPL_FALSE; surface->width = width; surface->height = height; - } - wayland_egl_surface->reset = TPL_FALSE; + wayland_egl_surface->resized = TPL_FALSE; + wayland_egl_surface->reset = TPL_FALSE; + } if (__tpl_wayland_egl_surface_wait_dequeuable(surface)) { TPL_ERR("Failed to wait dequeeable buffer"); @@ -1013,18 +1029,8 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, /* reset flag is to check whether it is the buffer before tbm_surface_queue is reset or not. */ wayland_egl_buffer->reset = TPL_FALSE; - wayland_egl_surface->current_buffer = tbm_surface; - /* - * Only when the tbm_surface which it dequeued after tbm_surface_queue_dequeue - * was called is not reused one, the following flag 'reset' has to - * initialize to TPL_FALSE. - * - * If this flag initialized before tbm_surface_queue_dequeue, it cause - * the problem that return TPL_FALSE in tpl_surface_validate() in spite of - * EGL already has valid buffer. - */ - wayland_egl_surface->reset = TPL_FALSE; + wayland_egl_surface->current_buffer = tbm_surface; __tpl_wayland_egl_set_wayland_buffer_to_tbm_surface(tbm_surface, wayland_egl_buffer); -- 2.7.4 From 295d42d2ef720b03fc47b86bba76ec1a17df90ce Mon Sep 17 00:00:00 2001 From: Sangjin Lee Date: Fri, 23 Sep 2016 16:04:07 +0900 Subject: [PATCH 02/16] tpl_wayland_egl: Use the tizen_surface_shm protocol for flushing of buffer If the client's state changes the deiconified state, the tizen display server sends tizen_surface_shm_flusher event to client. So, when the client gets the tizen_surface_shm_flusher event from server, client can flush buffers of surface_queue. And the tizen_surface_shm_flush event is dispatched by client's default event queue in tpl_wayland_egl. Change-Id: Icccb55e621fe264bf299d9d8c81e6f5376cee24d --- Makefile | 1 + src/protocol/tizen-surface-client.h | 103 ++++++++++++++++++++ src/protocol/tizen-surface-protocol.c | 36 +++++++ src/tpl_wayland_egl.c | 178 +++++++++++++++++++++++++++++++++- 4 files changed, 315 insertions(+), 3 deletions(-) create mode 100644 src/protocol/tizen-surface-client.h create mode 100644 src/protocol/tizen-surface-protocol.c diff --git a/Makefile b/Makefile index 23e82a8..ee75eba 100644 --- a/Makefile +++ b/Makefile @@ -73,6 +73,7 @@ ifneq ($(call is-feature-enabled,winsys_wl),) TPL_SRCS += $(SRC_DIR)/tpl_wayland_egl.c TPL_SRCS += $(SRC_DIR)/tpl_wayland_vk_wsi.c TPL_SRCS += $(SRC_DIR)/tpl_gbm.c +TPL_SRCS += $(SRC_DIR)/protocol/tizen-surface-protocol.c endif ifneq ($(call is-feature-enabled,winsys_dri2),) diff --git a/src/protocol/tizen-surface-client.h b/src/protocol/tizen-surface-client.h new file mode 100644 index 0000000..6159a82 --- /dev/null +++ b/src/protocol/tizen-surface-client.h @@ -0,0 +1,103 @@ +#ifndef TIZEN_SURFACE_CLIENT_PROTOCOL_H +#define TIZEN_SURFACE_CLIENT_PROTOCOL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include "wayland-client.h" + +struct wl_client; +struct wl_resource; + +struct tizen_surface_shm; +struct tizen_surface_shm_flusher; + +extern const struct wl_interface tizen_surface_shm_interface; +extern const struct wl_interface tizen_surface_shm_flusher_interface; + +#define TIZEN_SURFACE_SHM_GET_FLUSHER 0 + +static inline void +tizen_surface_shm_set_user_data(struct tizen_surface_shm *tizen_surface_shm, + void *user_data) +{ + wl_proxy_set_user_data((struct wl_proxy *) tizen_surface_shm, user_data); +} + +static inline void * +tizen_surface_shm_get_user_data(struct tizen_surface_shm *tizen_surface_shm) +{ + return wl_proxy_get_user_data((struct wl_proxy *) tizen_surface_shm); +} + +static inline void +tizen_surface_shm_destroy(struct tizen_surface_shm *tizen_surface_shm) +{ + wl_proxy_destroy((struct wl_proxy *) tizen_surface_shm); +} + +static inline struct tizen_surface_shm_flusher * +tizen_surface_shm_get_flusher(struct tizen_surface_shm *tizen_surface_shm, + struct wl_surface *surface) +{ + struct wl_proxy *id; + + id = wl_proxy_marshal_constructor((struct wl_proxy *) tizen_surface_shm, + TIZEN_SURFACE_SHM_GET_FLUSHER, &tizen_surface_shm_flusher_interface, NULL, + surface); + + return (struct tizen_surface_shm_flusher *) id; +} + +struct tizen_surface_shm_flusher_listener { + /** + * flush - (none) + */ + void (*flush)(void *data, + struct tizen_surface_shm_flusher *tizen_surface_shm_flusher); +}; + +static inline int +tizen_surface_shm_flusher_add_listener(struct tizen_surface_shm_flusher + *tizen_surface_shm_flusher, + const struct tizen_surface_shm_flusher_listener *listener, void *data) +{ + return wl_proxy_add_listener((struct wl_proxy *) tizen_surface_shm_flusher, + (void (* *)(void)) listener, data); +} + +#define TIZEN_SURFACE_SHM_FLUSHER_DESTROY 0 + +static inline void +tizen_surface_shm_flusher_set_user_data(struct tizen_surface_shm_flusher + *tizen_surface_shm_flusher, void *user_data) +{ + wl_proxy_set_user_data((struct wl_proxy *) tizen_surface_shm_flusher, + user_data); +} + +static inline void * +tizen_surface_shm_flusher_get_user_data(struct tizen_surface_shm_flusher + *tizen_surface_shm_flusher) +{ + return wl_proxy_get_user_data((struct wl_proxy *) tizen_surface_shm_flusher); +} + +static inline void +tizen_surface_shm_flusher_destroy(struct tizen_surface_shm_flusher + *tizen_surface_shm_flusher) +{ + wl_proxy_marshal((struct wl_proxy *) tizen_surface_shm_flusher, + TIZEN_SURFACE_SHM_FLUSHER_DESTROY); + + wl_proxy_destroy((struct wl_proxy *) tizen_surface_shm_flusher); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/protocol/tizen-surface-protocol.c b/src/protocol/tizen-surface-protocol.c new file mode 100644 index 0000000..8420280 --- /dev/null +++ b/src/protocol/tizen-surface-protocol.c @@ -0,0 +1,36 @@ +#include +#include +#include "wayland-util.h" + +extern const struct wl_interface tizen_surface_shm_flusher_interface; +extern const struct wl_interface wl_surface_interface; + +static const struct wl_interface *types[] = { + &tizen_surface_shm_flusher_interface, + &wl_surface_interface, +}; + +static const struct wl_message tizen_surface_shm_requests[] = { + { "get_flusher", "no", types + 0 }, +}; + +WL_EXPORT const struct wl_interface tizen_surface_shm_interface = { + "tizen_surface_shm", 1, + 1, tizen_surface_shm_requests, + 0, NULL, +}; + +static const struct wl_message tizen_surface_shm_flusher_requests[] = { + { "destroy", "", types + 0 }, +}; + +static const struct wl_message tizen_surface_shm_flusher_events[] = { + { "flush", "", types + 0 }, +}; + +WL_EXPORT const struct wl_interface tizen_surface_shm_flusher_interface = { + "tizen_surface_shm_flusher", 1, + 1, tizen_surface_shm_flusher_requests, + 1, tizen_surface_shm_flusher_events, +}; + diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index 37c71fd..4ff75bd 100644 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -23,6 +23,7 @@ #include #include #include +#include "protocol/tizen-surface-client.h" /* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */ #define CLIENT_QUEUE_SIZE 3 @@ -38,6 +39,7 @@ struct _tpl_wayland_egl_display { tdm_client *tdm_client; struct wl_display *wl_dpy; struct wl_event_queue *wl_tbm_event_queue; + struct tizen_surface_shm *tizen_surface_shm; /* used for surface buffer_flush */ }; struct _tpl_wayland_egl_surface { @@ -50,6 +52,7 @@ struct _tpl_wayland_egl_surface { tpl_bool_t vblank_done; tpl_list_t *attached_buffers; /* list for tracking [ACQ]~[REL] buffers */ tpl_list_t *dequeued_buffers; /* list for tracking [DEQ]~[ENQ] buffers */ + struct tizen_surface_shm_flusher *tizen_surface_shm_flusher; /* wl_proxy for buffer flush */ }; struct _tpl_wayland_egl_buffer { @@ -67,8 +70,16 @@ static const struct wl_buffer_listener buffer_release_listener; static int tpl_wayland_egl_buffer_key; #define KEY_tpl_wayland_egl_buffer (unsigned long)(&tpl_wayland_egl_buffer_key) -static void __tpl_wayland_egl_buffer_free(tpl_wayland_egl_buffer_t - *wayland_egl_buffer); +static void +__tpl_wayland_egl_display_buffer_flusher_init(tpl_display_t *display); +static void +__tpl_wayland_egl_display_buffer_flusher_fini(tpl_display_t *display); +static void +__tpl_wayland_egl_surface_buffer_flusher_init(tpl_surface_t *surface); +static void +__tpl_wayland_egl_surface_buffer_flusher_fini(tpl_surface_t *surface); +static void +__tpl_wayland_egl_buffer_free(tpl_wayland_egl_buffer_t *wayland_egl_buffer); static TPL_INLINE tpl_wayland_egl_buffer_t * __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface(tbm_surface_h surface) @@ -183,6 +194,7 @@ __tpl_wayland_egl_display_init(tpl_display_t *display) } wayland_egl_display->wl_dpy = wl_dpy; + __tpl_wayland_egl_display_buffer_flusher_init(display); } else { TPL_ERR("Invalid native handle for display."); @@ -241,6 +253,8 @@ __tpl_wayland_egl_display_fini(tpl_display_t *display) if (wayland_egl_display->wl_tbm_event_queue) wl_event_queue_destroy(wayland_egl_display->wl_tbm_event_queue); + __tpl_wayland_egl_display_buffer_flusher_fini(display); + wayland_egl_display->wl_tbm_event_queue = NULL; wayland_egl_display->wl_tbm_client = NULL; wayland_egl_display->tdm_client = NULL; @@ -535,6 +549,8 @@ __tpl_wayland_egl_surface_init(tpl_surface_t *surface) } } + __tpl_wayland_egl_surface_buffer_flusher_init(surface); + TPL_LOG_B("WL_EGL", "[INIT] tpl_surface_t(%p) tpl_wayland_egl_surface_t(%p) tbm_queue(%p)", surface, wayland_egl_surface, @@ -597,6 +613,8 @@ __tpl_wayland_egl_surface_fini(tpl_surface_t *surface) wayland_egl_surface, wl_egl_window, wayland_egl_surface->tbm_queue); tbm_surface_queue_destroy(wayland_egl_surface->tbm_queue); wayland_egl_surface->tbm_queue = NULL; + + __tpl_wayland_egl_surface_buffer_flusher_fini(surface); } /* When surface is destroyed, unreference tbm_surface which tracked by @@ -923,7 +941,8 @@ __tpl_wayland_egl_surface_wait_dequeuable(tpl_surface_t *surface) static tbm_surface_h __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, - uint64_t timeout_ns, tbm_fd *sync_fence) + uint64_t timeout_ns, + tbm_fd *sync_fence) { TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); @@ -1223,3 +1242,156 @@ __cb_client_window_resize_callback(struct wl_egl_window *wl_egl_window, || (height != tbm_surface_queue_get_height(wayland_egl_surface->tbm_queue))) wayland_egl_surface->resized = TPL_TRUE; } + +void +__cb_resistry_global_callback(void *data, struct wl_registry *wl_registry, + uint32_t name, const char *interface, + uint32_t version) +{ + tpl_wayland_egl_display_t *wayland_egl_display = data; + + if (!strcmp(interface, "tizen_surface_shm")) { + wayland_egl_display->tizen_surface_shm = + wl_registry_bind(wl_registry, + name, + &tizen_surface_shm_interface, + version); + } +} + +void +__cb_resistry_global_remove_callback(void *data, struct wl_registry *wl_registry, + uint32_t name) +{ +} + +static const struct wl_registry_listener registry_listener = { + __cb_resistry_global_callback, + __cb_resistry_global_remove_callback +}; + +static void +__tpl_wayland_egl_display_buffer_flusher_init(tpl_display_t *display) +{ + tpl_wayland_egl_display_t *wayland_egl_display = display->backend.data; + struct wl_registry *registry = NULL; + struct wl_event_queue *queue = NULL; + int ret; + + queue = wl_display_create_queue(wayland_egl_display->wl_dpy); + if (!queue) { + TPL_ERR("Failed to create wl_queue"); + goto fini; + } + + registry = wl_display_get_registry(wayland_egl_display->wl_dpy); + if (!queue) { + TPL_ERR("Failed to create wl_registry"); + goto fini; + } + + wl_proxy_set_queue((struct wl_proxy *)registry, queue); + if (wl_registry_add_listener(registry, ®istry_listener, + wayland_egl_display)) { + TPL_ERR("Failed to wl_registry_add_listener"); + goto fini; + } + + ret = wl_display_roundtrip_queue(wayland_egl_display->wl_dpy, queue); + if (ret) { + TPL_ERR("Failed to wl_display_roundtrip_queue ret:%d, err:%d", ret, errno); + goto fini; + } + + /* set tizen_surface_shm's queue as client's default queue */ + if (wayland_egl_display->tizen_surface_shm) + wl_proxy_set_queue((struct wl_proxy *)wayland_egl_display->tizen_surface_shm, + NULL); + +fini: + if (queue) + wl_event_queue_destroy(queue); + if (registry) + wl_registry_destroy(registry); +} + +static void +__tpl_wayland_egl_display_buffer_flusher_fini(tpl_display_t *display) +{ + tpl_wayland_egl_display_t *wayland_egl_display = display->backend.data; + + if (wayland_egl_display->tizen_surface_shm) { + tizen_surface_shm_destroy(wayland_egl_display->tizen_surface_shm); + wayland_egl_display->tizen_surface_shm = NULL; + } +} + +static void __cb_tizen_surface_shm_flusher_flush_callback(void *data, + struct tizen_surface_shm_flusher *tizen_surface_shm_flusher) +{ + tpl_surface_t *surface = data; + tpl_wayland_egl_surface_t *wayland_egl_surface; + tpl_wayland_egl_display_t *wayland_egl_display; + int ret; + + TPL_CHECK_ON_NULL_RETURN(surface); + wayland_egl_surface = surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface); + TPL_CHECK_ON_NULL_RETURN(surface->display); + wayland_egl_display = surface->display->backend.data; + TPL_CHECK_ON_NULL_RETURN(wayland_egl_display); + + TPL_CHECK_ON_NULL_RETURN(wayland_egl_display->wl_dpy); + TPL_CHECK_ON_NULL_RETURN(wayland_egl_display->wl_tbm_event_queue); + TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface->tbm_queue); + + /*Fist distach panding queue for TPL + - dispatch buffer-release + - dispatch queue flush + */ + ret = wl_display_dispatch_queue_pending(wayland_egl_display->wl_dpy, + wayland_egl_display->wl_tbm_event_queue); + if (ret) { + TPL_ERR("Failed to wl_display_dispatch_queue_pending ret:%d, err:%d", ret, + errno); + return; + } + + tbm_surface_queue_flush(wayland_egl_surface->tbm_queue); +} + +static const struct tizen_surface_shm_flusher_listener + tizen_surface_shm_flusher_listener = { + __cb_tizen_surface_shm_flusher_flush_callback +}; + +static void +__tpl_wayland_egl_surface_buffer_flusher_init(tpl_surface_t *surface) +{ + tpl_wayland_egl_display_t *wayland_egl_display = surface->display->backend.data; + tpl_wayland_egl_surface_t *wayland_egl_surface = surface->backend.data; + struct wl_egl_window *wl_egl_window = (struct wl_egl_window *) + surface->native_handle; + + if (!wayland_egl_display->tizen_surface_shm) + return; + + wayland_egl_surface->tizen_surface_shm_flusher = + tizen_surface_shm_get_flusher(wayland_egl_display->tizen_surface_shm, + wl_egl_window->surface); + tizen_surface_shm_flusher_add_listener( + wayland_egl_surface->tizen_surface_shm_flusher, + &tizen_surface_shm_flusher_listener, surface); +} + +static void +__tpl_wayland_egl_surface_buffer_flusher_fini(tpl_surface_t *surface) +{ + tpl_wayland_egl_surface_t *wayland_egl_surface = surface->backend.data; + + if (wayland_egl_surface->tizen_surface_shm_flusher) { + tizen_surface_shm_flusher_destroy( + wayland_egl_surface->tizen_surface_shm_flusher); + wayland_egl_surface->tizen_surface_shm_flusher = NULL; + } +} -- 2.7.4 From 851992fc72282446ebf374c642af4361b3652f5a Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Wed, 5 Oct 2016 17:44:52 +0900 Subject: [PATCH 03/16] tpl_wayland_egl: Fix misused return value of wl_display_roundtrip_queue() on buffer_flusher_init() - If there are errors on wl_display_roundtrip_queue then it returns values as '-1'. Change-Id: I754df1768b46f0d815af84255255997429eefa7b Signed-off-by: joonbum.ko --- src/tpl_wayland_egl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index 4ff75bd..7119bf2 100644 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -1298,7 +1298,7 @@ __tpl_wayland_egl_display_buffer_flusher_init(tpl_display_t *display) } ret = wl_display_roundtrip_queue(wayland_egl_display->wl_dpy, queue); - if (ret) { + if (ret == -1) { TPL_ERR("Failed to wl_display_roundtrip_queue ret:%d, err:%d", ret, errno); goto fini; } -- 2.7.4 From 6e4b86832ee5726001be04da57da8ce614c87fb4 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Tue, 27 Sep 2016 18:41:29 +0900 Subject: [PATCH 04/16] tpl: Added frontend API "tpl_surface_set_reset_cb" - This API can set function of reset_cb to tpl_surface. - When the tbm_surface_queue_reset is occured, if there is reset_cb which is registered by frontend, tpl_surface calls that function of reset_cb with reset_data. - tpl_surface_validate() also can notify info of surface's "reset/resized states" to frontend as before. But if you want to know the surface reset state without render call, it is better that registers callback of reset, using above API. Change-Id: Ib762856ad1db306c435013218c1a4be2b24ef1c9 Signed-off-by: joonbum.ko --- src/tpl.h | 28 ++++++++++++++++++++++++++++ src/tpl_internal.h | 4 ++++ src/tpl_surface.c | 17 +++++++++++++++++ src/tpl_wayland_egl.c | 39 +++++++++++++++++++-------------------- 4 files changed, 68 insertions(+), 20 deletions(-) diff --git a/src/tpl.h b/src/tpl.h index cb48da9..76a3ad4 100644 --- a/src/tpl.h +++ b/src/tpl.h @@ -124,6 +124,11 @@ typedef struct _tpl_surface tpl_surface_t; typedef void (*tpl_free_func_t)(void *data); /** + * Function type used for registering callback function. + */ +typedef void (*tpl_surface_cb_func_t)(void *data); + +/** * Object types. * * @see tpl_object_get_type() @@ -695,6 +700,29 @@ tbm_surface_h tpl_display_get_buffer_from_native_pixmap(tpl_display_t *display, tpl_handle_t pixmap); +/** + * Set frontbuffer mode to render to only frontbuffer. + * + * @param surface surface to set mode. + * @param set TPL_TRUE if user want to set tpl_surface to frontbuffer mode. + * @return TPL_ERROR_NONE if tpl_surface is valid. + */ tpl_result_t tpl_surface_set_frontbuffer_mode(tpl_surface_t *surface, tpl_bool_t set); + +/** + * Set callback function to tpl_surface for receiving reset information. + * + * This function should be called when after tpl_surface_create() if + * frontend surface want to know that if surface reset or not. + * + * @param surface surface to set callback. It has to be not NULL. + * @param data data to be delivered to callback function. + * @param reset_cb pointer value of callback funtion. + * - It can be NULL if frontend wants to delete callback function. + * @return TPL_ERROR_NONE if tpl_surface is valid. + */ +tpl_result_t +tpl_surface_set_reset_cb(tpl_surface_t *surface, + void* data, tpl_surface_cb_func_t reset_cb); #endif /* TPL_H */ diff --git a/src/tpl_internal.h b/src/tpl_internal.h index 6231c87..41da0a4 100644 --- a/src/tpl_internal.h +++ b/src/tpl_internal.h @@ -132,6 +132,10 @@ struct _tpl_surface { /*For frontbuffer extension*/ tpl_bool_t is_frontbuffer_mode; tbm_surface_h frontbuffer; + + /* Surface reset callback */ + tpl_surface_cb_func_t reset_cb; + void* reset_data; }; /******************************************************************************* diff --git a/src/tpl_surface.c b/src/tpl_surface.c index d6ad51d..762a597 100644 --- a/src/tpl_surface.c +++ b/src/tpl_surface.c @@ -405,3 +405,20 @@ tpl_surface_set_frontbuffer_mode(tpl_surface_t *surface, tpl_bool_t set) surface, set ? "ACTIVATED" : "DEACTIVATED"); return ret; } + +tpl_result_t +tpl_surface_set_reset_cb(tpl_surface_t *surface, void *data, tpl_surface_cb_func_t reset_cb) +{ + tpl_result_t ret = TPL_ERROR_NONE; + + if (!surface) + { + TPL_ERR("Invalid surface!"); + return TPL_ERROR_INVALID_PARAMETER; + } + + surface->reset_data = data; + surface->reset_cb = reset_cb; + + return ret; +} diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index 7119bf2..35c8556 100644 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -47,7 +47,7 @@ struct _tpl_wayland_egl_surface { tbm_surface_queue_h tbm_queue; tbm_surface_h current_buffer; tpl_bool_t resized; - tpl_bool_t reset; /* TRUE if queue reseted by external */ + tpl_bool_t reset; /* TRUE if queue reseted by external */ tdm_client_vblank *tdm_vblank; /* vblank object for each wl_surface */ tpl_bool_t vblank_done; tpl_list_t *attached_buffers; /* list for tracking [ACQ]~[REL] buffers */ @@ -400,8 +400,14 @@ static void __cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, void *data) { - tpl_wayland_egl_surface_t *wayland_egl_surface = - (tpl_wayland_egl_surface_t *)data; + tpl_surface_t *surface = NULL; + tpl_wayland_egl_surface_t *wayland_egl_surface = NULL; + + surface = (tpl_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(surface); + + wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface); if (!wayland_egl_surface) return; @@ -409,10 +415,10 @@ __cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, "[QUEUE_RESET_CB] tpl_wayland_egl_surface_t(%p) surface_queue(%p)", data, surface_queue); - wayland_egl_surface->reset = TPL_TRUE; - TPL_OBJECT_LOCK(&wayland_egl_surface->base); + wayland_egl_surface->reset = TPL_TRUE; + /* Set the reset flag of the buffers which attached but not released to TPL_TRUE. */ __tpl_wayland_egl_buffer_set_reset_flag(wayland_egl_surface->attached_buffers); @@ -420,6 +426,9 @@ __cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, __tpl_wayland_egl_buffer_set_reset_flag(wayland_egl_surface->dequeued_buffers); TPL_OBJECT_UNLOCK(&wayland_egl_surface->base); + + if (surface->reset_cb) + surface->reset_cb(surface->reset_data); } static tpl_result_t @@ -487,6 +496,7 @@ __tpl_wayland_egl_surface_init(tpl_surface_t *surface) surface->backend.data = (void *)wayland_egl_surface; wayland_egl_surface->tbm_queue = NULL; wayland_egl_surface->resized = TPL_FALSE; + wayland_egl_surface->reset = TPL_FALSE; wayland_egl_surface->vblank_done = TPL_TRUE; wayland_egl_surface->current_buffer = NULL; @@ -526,7 +536,7 @@ __tpl_wayland_egl_surface_init(tpl_surface_t *surface) /* Set reset_callback to tbm_queue */ tbm_surface_queue_add_reset_cb(wayland_egl_surface->tbm_queue, __cb_tbm_surface_queue_reset_callback, - (void *)wayland_egl_surface); + (void *)surface); surface->width = wl_egl_window->width; @@ -875,20 +885,7 @@ __tpl_wayland_egl_surface_validate(tpl_surface_t *surface) tpl_wayland_egl_surface_t *wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; - retval = !(wayland_egl_surface->reset || wayland_egl_surface->resized); - - /* TODO */ - /* Be planning to revise below line in future commits. - - It is under development so that EGL can realize tbm_surface_queue_reset - immediately. - */ - - /* The tbm_surface_queue_flush (which is occured by ACTIVE, DEACTIVE events) - * only occured in __tpl_wayland_egl_surface_wait_dequeable. - * After tpl_surface_dequeue_buffer(), tpl_surface has to inform to frontend - * surface was reset. (retval) - * The reset flag will be set to TPL_FALSE only here after inform it. */ - wayland_egl_surface->reset = TPL_FALSE; + retval = !(wayland_egl_surface->resized || wayland_egl_surface->reset); return retval; } @@ -1006,6 +1003,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, tbm_surface, tbm_bo_export(wayland_egl_buffer->bo)); wayland_egl_buffer->reset = TPL_FALSE; + wayland_egl_surface->reset = TPL_FALSE; if (wayland_egl_surface->dequeued_buffers) { TPL_OBJECT_LOCK(&wayland_egl_surface->base); @@ -1050,6 +1048,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, wayland_egl_buffer->reset = TPL_FALSE; wayland_egl_surface->current_buffer = tbm_surface; + wayland_egl_surface->reset = TPL_FALSE; __tpl_wayland_egl_set_wayland_buffer_to_tbm_surface(tbm_surface, wayland_egl_buffer); -- 2.7.4 From 05708df85f03caa8dc8bee57d7e44d656bf2ea5d Mon Sep 17 00:00:00 2001 From: Sangjin Lee Date: Thu, 6 Oct 2016 10:56:33 +0900 Subject: [PATCH 05/16] tpl_wayland_egl: Fix error check for dispatch queue Change-Id: I8b2f684a160358b632e1edf2d1f28a8843b409ed --- src/tpl_wayland_egl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index 35c8556..bc5507f 100644 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -1350,7 +1350,7 @@ static void __cb_tizen_surface_shm_flusher_flush_callback(void *data, */ ret = wl_display_dispatch_queue_pending(wayland_egl_display->wl_dpy, wayland_egl_display->wl_tbm_event_queue); - if (ret) { + if (ret == -1) { TPL_ERR("Failed to wl_display_dispatch_queue_pending ret:%d, err:%d", ret, errno); return; -- 2.7.4 From 805176455d44a88fc5bad0d6bf0066e1111b426f Mon Sep 17 00:00:00 2001 From: "Mun, Gwan-gyeong" Date: Thu, 6 Oct 2016 18:14:18 +0900 Subject: [PATCH 06/16] tpl: Add TPL_ERROR_OUT_OF_MEMORY to enum vaulue of tpl_result_t. - it provides error case of OOM (out of memory) for users of libtpl-egl. previous: tpl returns TPL_ERROR_INVALID_OPERATION or NULL when the tpl fails on allocation of memory. current : tpl returns TPL_ERROR_OUT_OF_MEMORY or NULL when the tpl fails on allocation of memory. Change-Id: I9859b71be94422b2f36250d62a7e68029f355072 Signed-off-by: Mun, Gwan-gyeong --- src/tpl.c | 2 +- src/tpl.h | 3 ++- src/tpl_gbm.c | 4 ++-- src/tpl_tbm.c | 12 ++++++------ src/tpl_utils.h | 2 +- src/tpl_utils_hlist.c | 2 +- src/tpl_wayland_egl.c | 4 ++-- src/tpl_wayland_vk_wsi.c | 13 ++++++++----- 8 files changed, 23 insertions(+), 19 deletions(-) diff --git a/src/tpl.c b/src/tpl.c index 4f2ab38..ed4c8a1 100644 --- a/src/tpl.c +++ b/src/tpl.c @@ -18,7 +18,7 @@ __tpl_runtime_init() runtime = (tpl_runtime_t *) calloc(1, sizeof(tpl_runtime_t)); if (runtime == NULL) { TPL_ERR("Failed to allocate new tpl_runtime_t."); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } } diff --git a/src/tpl.h b/src/tpl.h index 76a3ad4..b4a9fbe 100644 --- a/src/tpl.h +++ b/src/tpl.h @@ -204,7 +204,8 @@ typedef enum { typedef enum { TPL_ERROR_NONE = 0, /* Successfull */ TPL_ERROR_INVALID_PARAMETER, /* Invalid parmeter */ - TPL_ERROR_INVALID_OPERATION /* Invalid operation */ + TPL_ERROR_INVALID_OPERATION, /* Invalid operation */ + TPL_ERROR_OUT_OF_MEMORY /* Out of memory */ } tpl_result_t; /** diff --git a/src/tpl_gbm.c b/src/tpl_gbm.c index d5d43ce..6875db0 100644 --- a/src/tpl_gbm.c +++ b/src/tpl_gbm.c @@ -102,7 +102,7 @@ __tpl_gbm_display_init(tpl_display_t *display) } gbm_display = (tpl_gbm_display_t *) calloc(1, sizeof(tpl_gbm_display_t)); - if (!gbm_display) return TPL_ERROR_INVALID_PARAMETER; + if (!gbm_display) return TPL_ERROR_OUT_OF_MEMORY; display->bufmgr_fd = dup(gbm_device_get_fd(display->native_handle)); gbm_display->bufmgr = tbm_bufmgr_init(display->bufmgr_fd); @@ -263,7 +263,7 @@ __tpl_gbm_surface_init(tpl_surface_t *surface) tpl_gbm_surface = (tpl_gbm_surface_t *) calloc(1, sizeof(tpl_gbm_surface_t)); if (!tpl_gbm_surface) { TPL_ERR("Failed to allocate new gbm backend surface."); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } surface->backend.data = (void *)tpl_gbm_surface; diff --git a/src/tpl_tbm.c b/src/tpl_tbm.c index a375008..b656306 100644 --- a/src/tpl_tbm.c +++ b/src/tpl_tbm.c @@ -30,16 +30,16 @@ __tpl_tbm_display_init(tpl_display_t *display) tbm_display = (tpl_tbm_display_t *) calloc(1, sizeof(tpl_tbm_display_t)); + if (!tbm_display) { + TPL_ERR("Failed to allocate memory for new tpl_tbm_display_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + if (!display->native_handle) { display->native_handle = tbm_bufmgr_init(-1); tbm_display->need_dpy_deinit = 1; } - if (!tbm_display) { - TPL_ERR("Failed to allocate memory for new tpl_tbm_display_t."); - return TPL_ERROR_INVALID_OPERATION; - } - display->backend.data = tbm_display; display->bufmgr_fd = -1; @@ -174,7 +174,7 @@ __tpl_tbm_surface_init(tpl_surface_t *surface) tpl_tbm_surface = (tpl_tbm_surface_t *) calloc(1, sizeof(tpl_tbm_surface_t)); if (!tpl_tbm_surface) { TPL_ERR("Failed to allocate memory for new tpl_tbm_surface_t"); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } surface->backend.data = (void *)tpl_tbm_surface; diff --git a/src/tpl_utils.h b/src/tpl_utils.h index 7c1696e..2b8580b 100644 --- a/src/tpl_utils.h +++ b/src/tpl_utils.h @@ -518,7 +518,7 @@ __tpl_list_insert(tpl_list_node_t *pos, void *data) tpl_list_node_t *node = (tpl_list_node_t *)malloc(sizeof(tpl_list_node_t)); if (!node) { TPL_ERR("Failed to allocate new tpl_list_node_t."); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } node->data = data; diff --git a/src/tpl_utils_hlist.c b/src/tpl_utils_hlist.c index 4997732..283dced 100644 --- a/src/tpl_utils_hlist.c +++ b/src/tpl_utils_hlist.c @@ -211,7 +211,7 @@ __tpl_hashlist_insert(tpl_hlist_t *list, size_t key, void *data) new_node = (tpl_hlist_node_t *) malloc(sizeof(tpl_hlist_node_t)); if (!new_node) { TPL_ERR("Failed to allocate new tpl_hlist_node_t."); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } hash = CALC_HASH(key); diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index bc5507f..99b957f 100644 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -144,7 +144,7 @@ __tpl_wayland_egl_display_init(tpl_display_t *display) sizeof(tpl_wayland_egl_display_t)); if (!wayland_egl_display) { TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_display_t."); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } display->backend.data = wayland_egl_display; @@ -483,7 +483,7 @@ __tpl_wayland_egl_surface_init(tpl_surface_t *surface) sizeof(tpl_wayland_egl_surface_t)); if (!wayland_egl_surface) { TPL_ERR("Failed to allocate memory for new tpl_wayland_egl_surface_t."); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } if (__tpl_object_init(&wayland_egl_surface->base, TPL_OBJECT_SURFACE, diff --git a/src/tpl_wayland_vk_wsi.c b/src/tpl_wayland_vk_wsi.c index 69e1fbf..febd7bd 100644 --- a/src/tpl_wayland_vk_wsi.c +++ b/src/tpl_wayland_vk_wsi.c @@ -167,7 +167,7 @@ __tpl_wayland_vk_wsi_display_init(tpl_display_t *display) sizeof(tpl_wayland_vk_wsi_display_t)); if (!wayland_vk_wsi_display) { TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_display_t."); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } wayland_vk_wsi_display->surface_capabilities.min_buffer = 2; @@ -288,7 +288,7 @@ __tpl_wayland_vk_wsi_surface_init(tpl_surface_t *surface) sizeof(tpl_wayland_vk_wsi_surface_t)); if (!wayland_vk_wsi_surface) { TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_surface_t."); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } surface->backend.data = (void *)wayland_vk_wsi_surface; @@ -624,6 +624,7 @@ __tpl_wayland_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; tbm_surface_queue_error_e tsq_err; int i, dequeue_count; + tpl_result_t ret = TPL_ERROR_NONE; TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); @@ -636,7 +637,7 @@ __tpl_wayland_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, wayland_vk_wsi_surface->buffer_count, sizeof(tbm_surface_h)); if (!swapchain_buffers) { TPL_ERR("Failed to allocate memory for buffers."); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } for (i = 0 ; i < wayland_vk_wsi_surface->buffer_count ; i++) { @@ -645,6 +646,7 @@ __tpl_wayland_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, TPL_ERR("Failed to get tbm_surface from tbm_surface_queue | tsq_err = %d", tsq_err); dequeue_count = i; + ret = TPL_ERROR_OUT_OF_MEMORY; goto get_buffer_fail; } swapchain_buffers[i] = buffer; @@ -655,6 +657,7 @@ __tpl_wayland_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, swapchain_buffers[i]); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("Failed to release tbm_surface. | tsq_err = %d", tsq_err); + ret = TPL_ERROR_INVALID_OPERATION; goto release_buffer_fail; } } @@ -675,7 +678,7 @@ get_buffer_fail: release_buffer_fail: free(swapchain_buffers); - return TPL_ERROR_INVALID_OPERATION; + return ret; } #if USE_WORKER_THREAD == 1 @@ -716,7 +719,7 @@ __tpl_wayland_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, if (!wayland_vk_wsi_surface->tbm_queue) { TPL_ERR("TBM surface queue creation failed!"); - return TPL_ERROR_INVALID_OPERATION; + return TPL_ERROR_OUT_OF_MEMORY; } wayland_vk_wsi_surface->buffer_count = buffer_count; -- 2.7.4 From 29f03f4bf4ff4220a0c58270fe08ef14f5503ca7 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Thu, 6 Oct 2016 14:45:16 +0900 Subject: [PATCH 07/16] tpl_wayland_egl: Do unreference the attached buffers when the flushing of buffer is occured. it uses the list of attached_buffers. Change-Id: Iaf880e86807af33e75ad7ee41e682628db4216e1 Signed-off-by: joonbum.ko --- src/tpl_wayland_egl.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index 99b957f..0323dfc 100644 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -1357,6 +1357,23 @@ static void __cb_tizen_surface_shm_flusher_flush_callback(void *data, } tbm_surface_queue_flush(wayland_egl_surface->tbm_queue); + + /* Only when client call tpl_surface_dequeue_buffer(), client can do + * unreference tbm_surface although there are release events in the event queue, + * After tbm_surface_queue_flush, queue has no tbm_surface, client can do + * unreference attached buffers using the list of attached_buffers. + * Then, client does not need to wait for release_callback to unreference + * attached buffer. + */ + if (wayland_egl_surface->attached_buffers) { + TPL_OBJECT_LOCK(&wayland_egl_surface->base); + while (!__tpl_list_is_empty(wayland_egl_surface->attached_buffers)) { + tbm_surface_h tbm_surface = + __tpl_list_pop_front(wayland_egl_surface->attached_buffers, NULL); + tbm_surface_internal_unref(tbm_surface); + } + TPL_OBJECT_UNLOCK(&wayland_egl_surface->base); + } } static const struct tizen_surface_shm_flusher_listener -- 2.7.4 From 3017e1311f70fc5088c44056a9f8b9a64558fa96 Mon Sep 17 00:00:00 2001 From: "deasung.kim" Date: Sat, 24 Sep 2016 15:53:03 +0900 Subject: [PATCH 08/16] tpl_worker_thread: added worker_thread files change thread model from per each surface to one process event handle in worker thread backends set "draw done"/"fd get" func to tpl_worker_thread_surface will be added more event functions Change-Id: Ic6a316c75f3a0fa40e6fb3e8830f488e92d5cfa5 --- Makefile | 2 + src/tpl_wayland_vk_wsi.c | 158 +++++++++-------------- src/tpl_worker_thread.c | 327 +++++++++++++++++++++++++++++++++++++++++++++++ src/tpl_worker_thread.h | 25 ++++ 4 files changed, 411 insertions(+), 101 deletions(-) create mode 100644 src/tpl_worker_thread.c create mode 100644 src/tpl_worker_thread.h diff --git a/Makefile b/Makefile index ee75eba..a755695 100644 --- a/Makefile +++ b/Makefile @@ -70,10 +70,12 @@ TPL_SRCS += $(SRC_DIR)/tpl_utils_hlist.c TPL_SRCS += $(SRC_DIR)/tpl_utils_map.c ifneq ($(call is-feature-enabled,winsys_wl),) +TPL_HEADERS += $(SRC_DIR)/tpl_worker_thread.h TPL_SRCS += $(SRC_DIR)/tpl_wayland_egl.c TPL_SRCS += $(SRC_DIR)/tpl_wayland_vk_wsi.c TPL_SRCS += $(SRC_DIR)/tpl_gbm.c TPL_SRCS += $(SRC_DIR)/protocol/tizen-surface-protocol.c +TPL_SRCS += $(SRC_DIR)/tpl_worker_thread.c endif ifneq ($(call is-feature-enabled,winsys_dri2),) diff --git a/src/tpl_wayland_vk_wsi.c b/src/tpl_wayland_vk_wsi.c index febd7bd..6f01e1d 100644 --- a/src/tpl_wayland_vk_wsi.c +++ b/src/tpl_wayland_vk_wsi.c @@ -17,6 +17,7 @@ #ifndef USE_WORKER_THREAD #define USE_WORKER_THREAD 0 #else +#include "tpl_worker_thread.h" #include #include #undef USE_WORKER_THREAD @@ -40,24 +41,19 @@ struct _tpl_wayland_vk_wsi_surface { int buffer_count; #if USE_WORKER_THREAD == 1 - /* TODO: move thread per display or process */ - pthread_t worker_id; - tpl_bool_t worker_running; - /* * TODO: it can move to libtbm * libtbm already has free queue's pthread_cond and pthread_mutex */ - pthread_mutex_t dirty_queue_mutex; - pthread_cond_t dirty_queue_cond; pthread_mutex_t free_queue_mutex; pthread_cond_t free_queue_cond; + + tpl_worker_surface_t worker_surface; #endif }; struct _tpl_wayland_vk_wsi_buffer { tpl_display_t *display; - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface; struct wl_proxy *wl_proxy; tbm_fd sync_timeline; unsigned int sync_timestamp; @@ -396,15 +392,11 @@ __tpl_wayland_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, #if USE_WORKER_THREAD == 1 wayland_vk_wsi_buffer->wait_sync = sync_fence; - pthread_mutex_lock(&wayland_vk_wsi_surface->dirty_queue_mutex); #endif tsq_err = tbm_surface_queue_enqueue(wayland_vk_wsi_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("Failed to enqeueue tbm_surface. | tsq_err = %d", tsq_err); -#if USE_WORKER_THREAD == 1 - pthread_mutex_unlock(&wayland_vk_wsi_surface->dirty_queue_mutex); -#endif return TPL_ERROR_INVALID_OPERATION; } @@ -428,13 +420,10 @@ __tpl_wayland_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, } __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); + #else - /* - * TODO: it can move to libtbm - * libtbm already has dirty queue's pthread_cond and pthread_mutex - */ - pthread_mutex_unlock(&wayland_vk_wsi_surface->dirty_queue_mutex); - pthread_cond_signal(&wayland_vk_wsi_surface->dirty_queue_cond); + __tpl_worker_new_buffer_notify(&wayland_vk_wsi_surface->worker_surface); + #endif /* @@ -606,7 +595,6 @@ __tpl_wayland_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, wayland_vk_wsi_buffer->display = surface->display; wayland_vk_wsi_buffer->wl_proxy = wl_proxy; - wayland_vk_wsi_buffer->wayland_vk_wsi_surface = wayland_vk_wsi_surface; __tpl_wayland_vk_wsi_set_wayland_buffer_to_tbm_surface(tbm_surface, wayland_vk_wsi_buffer); @@ -682,8 +670,45 @@ release_buffer_fail: } #if USE_WORKER_THREAD == 1 -static void * -__tpl_wayland_vk_wsi_worker_thread_loop(void *arg); +static void +__tpl_wayland_vk_wsi_process_draw_done(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + tpl_result_t result) +{ + tpl_wayland_vk_wsi_buffer_t *wayland_vk_wsi_buffer = NULL; + + /*TPL_ASSERT(surface);*/ + TPL_ASSERT(tbm_surface); + + wayland_vk_wsi_buffer = + __tpl_wayland_vk_wsi_get_wayland_buffer_from_tbm_surface(tbm_surface); + + TPL_ASSERT(wayland_vk_wsi_buffer); + + close(wayland_vk_wsi_buffer->wait_sync); + wayland_vk_wsi_buffer->wait_sync = -1; + + /* TODO: check present mode and prepare vblank */ + __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); +} + +static int +__tpl_wayland_vk_wsi_draw_wait_fd_get(tpl_surface_t *surface, + tbm_surface_h tbm_surface) +{ + tpl_wayland_vk_wsi_buffer_t *wayland_vk_wsi_buffer = NULL; + + /*TPL_ASSERT(surface);*/ + TPL_ASSERT(tbm_surface); + + wayland_vk_wsi_buffer = + __tpl_wayland_vk_wsi_get_wayland_buffer_from_tbm_surface(tbm_surface); + + TPL_ASSERT(wayland_vk_wsi_buffer); + + return wayland_vk_wsi_buffer->wait_sync; +} + #endif static tpl_result_t @@ -728,15 +753,19 @@ __tpl_wayland_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, surface->height = height; #if USE_WORKER_THREAD == 1 - pthread_mutex_init(&wayland_vk_wsi_surface->dirty_queue_mutex, NULL); pthread_mutex_init(&wayland_vk_wsi_surface->free_queue_mutex, NULL); - pthread_cond_init(&wayland_vk_wsi_surface->dirty_queue_cond, NULL); pthread_cond_init(&wayland_vk_wsi_surface->free_queue_cond, NULL); - wayland_vk_wsi_surface->worker_running = 1; - pthread_create(&wayland_vk_wsi_surface->worker_id, NULL, - __tpl_wayland_vk_wsi_worker_thread_loop, surface); + wayland_vk_wsi_surface->worker_surface.surface = surface; + wayland_vk_wsi_surface->worker_surface.tbm_queue = + wayland_vk_wsi_surface->tbm_queue; + + wayland_vk_wsi_surface->worker_surface.draw_done = + __tpl_wayland_vk_wsi_process_draw_done; + wayland_vk_wsi_surface->worker_surface.draw_wait_fd_get = + __tpl_wayland_vk_wsi_draw_wait_fd_get; + __tpl_worker_surface_list_insert(&wayland_vk_wsi_surface->worker_surface); #endif return TPL_ERROR_NONE; } @@ -753,6 +782,9 @@ __tpl_wayland_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; TPL_ASSERT(wayland_vk_wsi_surface); +#if USE_WORKER_THREAD == 1 + __tpl_worker_surface_list_remove(&wayland_vk_wsi_surface->worker_surface); +#endif if (surface->type == TPL_SURFACE_TYPE_WINDOW) { wl_display_flush(surface->display->native_handle); @@ -763,13 +795,8 @@ __tpl_wayland_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) } #if USE_WORKER_THREAD == 1 - wayland_vk_wsi_surface->worker_running = 0; - pthread_join(wayland_vk_wsi_surface->worker_id, NULL); - pthread_cond_destroy(&wayland_vk_wsi_surface->free_queue_cond); - pthread_cond_destroy(&wayland_vk_wsi_surface->dirty_queue_cond); pthread_mutex_destroy(&wayland_vk_wsi_surface->free_queue_mutex); - pthread_mutex_destroy(&wayland_vk_wsi_surface->dirty_queue_mutex); #endif return TPL_ERROR_NONE; } @@ -898,74 +925,3 @@ __cb_client_buffer_release_callback(void *data, struct wl_proxy *proxy) static const struct wl_buffer_listener buffer_release_listener = { (void *)__cb_client_buffer_release_callback, }; - -#if USE_WORKER_THREAD == 1 -static void * -__tpl_wayland_vk_wsi_worker_thread_loop(void *arg) -{ - tpl_surface_t *surface = arg; - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; - - /* - * TODO: it can change when thread per display or process model - * then need poll all surface's buffers wait sync - */ - while (wayland_vk_wsi_surface->worker_running) { - tbm_surface_queue_error_e tsq_err; - tbm_surface_h tbm_surface; - tpl_wayland_vk_wsi_buffer_t *wayland_vk_wsi_buffer; - - /* - * TODO: it can move to libtbm - * libtbm already has dirty queue's pthread_cond and pthread_mutex - * or with wait vblank in poll - */ - struct timespec abs_time; - tpl_bool_t timeout = TPL_FALSE; - - clock_gettime(CLOCK_REALTIME, &abs_time); - abs_time.tv_sec += 1; - pthread_mutex_lock(&wayland_vk_wsi_surface->dirty_queue_mutex); - while ((tbm_surface_queue_can_acquire(wayland_vk_wsi_surface->tbm_queue, - 0) == 0) && - (timeout == TPL_FALSE)) { - int ret; - ret = pthread_cond_timedwait(&wayland_vk_wsi_surface->dirty_queue_cond, - &wayland_vk_wsi_surface->dirty_queue_mutex, - &abs_time); - if (ret == ETIMEDOUT) - timeout = TPL_TRUE; - } - if (timeout) { - pthread_mutex_unlock(&wayland_vk_wsi_surface->dirty_queue_mutex); - continue; - } - - tsq_err = tbm_surface_queue_acquire(wayland_vk_wsi_surface->tbm_queue, - &tbm_surface); - pthread_mutex_unlock(&wayland_vk_wsi_surface->dirty_queue_mutex); - if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { - TPL_ERR("Failed to acquire tbm_surface. | tsq_err = %d", tsq_err); - continue; - } - - wayland_vk_wsi_buffer = - __tpl_wayland_vk_wsi_get_wayland_buffer_from_tbm_surface(tbm_surface); - TPL_ASSERT(wayland_vk_wsi_buffer); - if (wayland_vk_wsi_buffer->wait_sync != -1) { - if (tbm_sync_fence_wait(wayland_vk_wsi_buffer->wait_sync, -1) != 1) { - char buf[1024]; - strerror_r(errno, buf, sizeof(buf)); - TPL_ERR("Failed to wait sync. | error: %d(%s)", errno, buf); - } - close(wayland_vk_wsi_buffer->wait_sync); - wayland_vk_wsi_buffer->wait_sync = -1; - } - - __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); - } - - return NULL; -} -#endif diff --git a/src/tpl_worker_thread.c b/src/tpl_worker_thread.c new file mode 100644 index 0000000..784bbe6 --- /dev/null +++ b/src/tpl_worker_thread.c @@ -0,0 +1,327 @@ +#include "tpl_worker_thread.h" +#include "tpl_internal.h" + +#include +/*#define __USE_GNU*/ +#include +#include +#include + +#define TPL_ERR_ERRNO(f, x...) \ + do { int err = errno; char buf[256] = {0,}; \ + strerror_r(err, buf, 255); \ + TPL_ERR(f " | error: %d(%s)", ##x, err, buf); \ + } while (0); + +#define TPL_WARN_ERRNO(f, x...) \ + do { int err = errno; char buf[256] = {0,}; \ + strerror_r(err, buf, 255); \ + TPL_WARN(f " | error: %d(%s)", ##x, err, buf); \ + } while (0); + +static struct { + int running; + int epoll_fd; + int event_fd; + + pthread_t worker_id; + tpl_list_t surface_list; + pthread_mutex_t surface_mutex; +} tpl_worker_thread; + +void +__tpl_worker_surface_list_insert(tpl_worker_surface_t *surface) +{ + TPL_ASSERT(surface->surface); + TPL_ASSERT(surface->tbm_queue); + + if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { + TPL_ERR_ERRNO("surface list mutex lock failed"); + return; + } + + surface->draw_wait_buffer = NULL; + __tpl_list_push_back(&tpl_worker_thread.surface_list, surface); + + pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); +} + +void +__tpl_worker_surface_list_remove(tpl_worker_surface_t *surface) +{ + if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { + TPL_ERR_ERRNO("surface list mutex lock failed"); + return; + } + + __tpl_list_remove_data(&tpl_worker_thread.surface_list, surface, + TPL_FIRST, NULL); + + pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); +} + +static void +__tpl_worker_event_send() +{ + int len; + uint64_t dummy_event = 1; + + if (tpl_worker_thread.event_fd == -1) { + TPL_ERR("worker thread not working"); + return; + } + + len = write(tpl_worker_thread.event_fd, + &dummy_event, + sizeof(dummy_event)); + if (len < 0) + TPL_WARN_ERRNO("event fd(%d) write failed.", + tpl_worker_thread.event_fd); +} + +static void +__tpl_worker_prepare_draw_wait_buffer(int epoll_fd, + tpl_worker_surface_t *surface) +{ + if (surface->draw_wait_buffer) + return; + + while (tbm_surface_queue_can_acquire(surface->tbm_queue, 0)) { + tbm_surface_h tbm_surface = NULL; + tbm_surface_queue_error_e tsq_err; + int wait_fd = -1; + + tsq_err = tbm_surface_queue_acquire(surface->tbm_queue, &tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE || tbm_surface == NULL) { + TPL_ERR("Failed to acquire tbm_surface. | tsq_err = %d", tsq_err); + return; + } + + if (surface->draw_wait_fd_get) + wait_fd = surface->draw_wait_fd_get(surface->surface, tbm_surface); + + if (wait_fd != -1) { + struct epoll_event wait_fence_event; + int epoll_err; + + wait_fence_event.events = EPOLLIN; + wait_fence_event.data.ptr = surface; + epoll_err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, + wait_fd, + &wait_fence_event); + if (epoll_err == 0) { + surface->draw_wait_buffer = tbm_surface; + return; + } + } /* else can't(or not need) wait fence in poll */ + + if (surface->draw_done) + surface->draw_done(surface->surface, tbm_surface, + TPL_ERROR_INVALID_OPERATION); + } +} + +void +__tpl_worker_new_buffer_notify(tpl_worker_surface_t *surface) +{ + TPL_ASSERT(surface->surface); + + if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { + TPL_ERR_ERRNO("surface list mutex lock failed"); + return; + } + + __tpl_worker_prepare_draw_wait_buffer(tpl_worker_thread.epoll_fd, surface); + pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); +} + +static int +__tpl_worker_prepare_event_fd(int epoll_fd) +{ + int event_fd; + struct epoll_event event; + event.events = EPOLLIN; + event.data.ptr = &tpl_worker_thread; + + event_fd = eventfd(0, EFD_CLOEXEC); + if (event_fd == -1) { + TPL_ERR_ERRNO("eventfd() failed"); + return -1; + } + + if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &event) != 0) { + TPL_ERR_ERRNO("eventfd epoll ctl epoll_fd: %d, event_fd: %d.", + epoll_fd, tpl_worker_thread.event_fd); + close(event_fd); + return -1; + } + return event_fd; +} + +static void * +__tpl_worker_thread_loop(void *arg) +{ +#define EPOLL_MAX_SIZE 100 + int ret, epoll_fd = epoll_create(EPOLL_MAX_SIZE); + struct epoll_event ev_list[EPOLL_MAX_SIZE]; + + if (epoll_fd == -1) { + TPL_ERR_ERRNO("epoll create failed"); + goto cleanup; + } + + /* event fd */ + tpl_worker_thread.event_fd = __tpl_worker_prepare_event_fd(epoll_fd); + if (tpl_worker_thread.event_fd == -1) + goto cleanup; + + while(tpl_worker_thread.running) { + int i; + tpl_list_node_t *trail; + + /* set buffer's sync fd and vblank list */ + if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { + TPL_ERR_ERRNO("surface list mutex lock failed"); + goto cleanup; + } + + for (trail = __tpl_list_get_front_node(&tpl_worker_thread.surface_list); + trail != NULL; + trail = __tpl_list_node_next(trail)) { + tpl_worker_surface_t *surface = __tpl_list_node_get_data(trail); + TPL_ASSERT(surface); + + __tpl_worker_prepare_draw_wait_buffer(epoll_fd, surface); + } + pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); + + /* wait events */ + ret = epoll_wait(epoll_fd, ev_list, EPOLL_MAX_SIZE, -1); + if (ret == -1) { + TPL_ERR_ERRNO("epoll fd: %d.", epoll_fd); + continue; + } + + for (i = 0; i < ret; i++) { + if (ev_list[i].data.ptr == &tpl_worker_thread) { + /* thread terminate event */ + if (ev_list[i].events & EPOLLIN) { + int len; + uint64_t read_buf; + len = read(tpl_worker_thread.event_fd, + &read_buf, sizeof(uint64_t)); + if (len < 0) { + TPL_WARN_ERRNO("event fd(%d) read failed.", + tpl_worker_thread.event_fd); + continue; + } else { + break; + } + } + } else { + /* draw done */ + tpl_worker_surface_t *surface = ev_list[i].data.ptr; + int wait_fd; + int fence_result; + + if (!(ev_list[i].events & EPOLLIN)) + continue; + + if (!surface->draw_wait_buffer) { + TPL_WARN("recieve already signaled event\n"); + continue; + } + + wait_fd = surface->draw_wait_fd_get(surface->surface, surface->draw_wait_buffer); + if (wait_fd == -1) { + if (surface->draw_done) + surface->draw_done(surface->surface, surface->draw_wait_buffer, + TPL_ERROR_INVALID_OPERATION); + surface->draw_wait_buffer = NULL; + continue; + } + + switch (fence_result = tbm_sync_fence_wait(wait_fd, 0)) { + case 0: + TPL_ERR_ERRNO("sync_fence_wait return error."); + case 1: + /* some time recieve event two times */ + epoll_ctl(epoll_fd, EPOLL_CTL_DEL, wait_fd, NULL); + if (surface->draw_done) + surface->draw_done(surface->surface, surface->draw_wait_buffer, + fence_result == 1 ? + TPL_ERROR_NONE : TPL_ERROR_INVALID_OPERATION); + surface->draw_wait_buffer = NULL; + break; + case -1: + TPL_WARN("sync_fence_wait return timeout."); + break; + } + + /* prepare next buffer in loop start time */ + } + } + } + +cleanup: + /* thread cleanup */ + close(epoll_fd); + close(tpl_worker_thread.event_fd); + tpl_worker_thread.event_fd = -1; + + return NULL; +} + +static void __attribute__((constructor)) +__tpl_worker_init(void) +{ + /* + * It can be move to display or surface create function + * with pthread_once + */ + tpl_worker_thread.running = 1; + + if (pthread_mutex_init(&tpl_worker_thread.surface_mutex, NULL) != 0) { + TPL_ERR_ERRNO("surface list mutex init failed"); + goto error; + } + + __tpl_list_init(&tpl_worker_thread.surface_list); + + if (pthread_create(&tpl_worker_thread.worker_id, NULL, + __tpl_worker_thread_loop, + NULL) != 0) { + TPL_ERR_ERRNO("worker thread create failed"); + goto error_thread_create; + } + /*pthread_setname_np(tpl_worker_thread.worker_id, "tpl_worker_thread");*/ + + return; + +error_thread_create: + pthread_mutex_destroy(&tpl_worker_thread.surface_mutex); + +error: + tpl_worker_thread.running = 0; +} + +static void __attribute__((destructor)) +__tpl_worker_fini(void) +{ + if (tpl_worker_thread.running == 0) + return; + + /* deinitailize global object */ + tpl_worker_thread.running = 0; + + /* maybe EPOLLRDHUP not work with eventfd */ + /* close(tpl_worker_thread.event_fd); */ + __tpl_worker_event_send(); + + if (__tpl_list_get_count(&tpl_worker_thread.surface_list)) + TPL_WARN("called destructor, but tpl surface count: %d", + __tpl_list_get_count(&tpl_worker_thread.surface_list)); + + pthread_join(tpl_worker_thread.worker_id, NULL); + pthread_mutex_destroy(&tpl_worker_thread.surface_mutex); +} diff --git a/src/tpl_worker_thread.h b/src/tpl_worker_thread.h new file mode 100644 index 0000000..3f195d4 --- /dev/null +++ b/src/tpl_worker_thread.h @@ -0,0 +1,25 @@ +#ifndef TPL_WORKER_THREAD_H +#define TPL_WORKER_THREAD_H + +#include "tpl.h" +#include +#include +#include + +typedef struct __tpl_worker_surface tpl_worker_surface_t; + +struct __tpl_worker_surface { + tpl_surface_t *surface; + tbm_surface_queue_h tbm_queue; + + void (*draw_done)(tpl_surface_t *surface, tbm_surface_h tbm_surface, tpl_result_t result); + int (*draw_wait_fd_get)(tpl_surface_t *surface, tbm_surface_h tbm_surface); + + tbm_surface_h draw_wait_buffer; +}; + +void __tpl_worker_surface_list_insert(tpl_worker_surface_t *surface); +void __tpl_worker_surface_list_remove(tpl_worker_surface_t *surface); +void __tpl_worker_new_buffer_notify(tpl_worker_surface_t *surface); + +#endif //TPL_WORKER_THREAD_H -- 2.7.4 From ff613e3bebbfcb1ca2a9abe376a53559cec56327 Mon Sep 17 00:00:00 2001 From: "deasung.kim" Date: Sat, 24 Sep 2016 18:06:08 +0900 Subject: [PATCH 09/16] tpl_worker_thread: added vblank event added vblank event use tdm client for present mode FIFO Change-Id: I86830125cf9091e634a03375857a7723f7f9cafc --- src/tpl_worker_thread.c | 134 ++++++++++++++++++++++++++++++++++++++++++++++++ src/tpl_worker_thread.h | 3 ++ 2 files changed, 137 insertions(+) diff --git a/src/tpl_worker_thread.c b/src/tpl_worker_thread.c index 784bbe6..1c8af05 100644 --- a/src/tpl_worker_thread.c +++ b/src/tpl_worker_thread.c @@ -6,6 +6,7 @@ #include #include #include +#include #define TPL_ERR_ERRNO(f, x...) \ do { int err = errno; char buf[256] = {0,}; \ @@ -27,8 +28,15 @@ static struct { pthread_t worker_id; tpl_list_t surface_list; pthread_mutex_t surface_mutex; + tpl_bool_t support_vblank; } tpl_worker_thread; +tpl_bool_t +__tpl_worker_support_vblank() +{ + return tpl_worker_thread.support_vblank; +} + void __tpl_worker_surface_list_insert(tpl_worker_surface_t *surface) { @@ -135,6 +143,51 @@ __tpl_worker_new_buffer_notify(tpl_worker_surface_t *surface) pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); } +static tpl_bool_t +__tpl_worker_regist_vblank_handler(tdm_client_vblank *tdm_vblank); + +static void +__tpl_worker_cb_vblank(tdm_client_vblank *tdm_vblank, tdm_error error, + unsigned int sequence, unsigned int tv_sec, + unsigned int tv_usec, void *user_data) +{ + tpl_list_node_t *trail; + + if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { + TPL_ERR_ERRNO("surface list mutex lock failed"); + return; + } + + for (trail = __tpl_list_get_front_node(&tpl_worker_thread.surface_list); + trail != NULL; + trail = __tpl_list_node_next(trail)) { + tpl_worker_surface_t *surface; + + surface = __tpl_list_node_get_data(trail); + if (surface->vblank) + surface->vblank(surface->surface, sequence, tv_sec, tv_usec); + } + pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); + + __tpl_worker_regist_vblank_handler(tdm_vblank); +} + +static tpl_bool_t +__tpl_worker_regist_vblank_handler(tdm_client_vblank *tdm_vblank) +{ + tdm_error tdm_err; + + tdm_err = tdm_client_vblank_wait(tdm_vblank, + 1, /* interval */ + __tpl_worker_cb_vblank, /* handler */ + NULL); + if (tdm_err != TDM_ERROR_NONE) { + TPL_ERR ("Failed to tdm_client_wait_vblank. error:%d", tdm_err); + return TPL_FALSE; + } + return TPL_TRUE; +} + static int __tpl_worker_prepare_event_fd(int epoll_fd) { @@ -158,12 +211,79 @@ __tpl_worker_prepare_event_fd(int epoll_fd) return event_fd; } +static tpl_bool_t +__tpl_worker_prepare_vblank(int epoll_fd, tdm_client **ret_client, tdm_client_vblank **ret_vblank) +{ + tdm_error tdm_err; + tdm_client *tdm_client = NULL; + tdm_client_output *tdm_output = NULL; + tdm_client_vblank *tdm_vblank = NULL; + int tdm_fd, ret; + struct epoll_event event; + + TPL_ASSERT(ret_client); + TPL_ASSERT(ret_vblank); + + tdm_client = tdm_client_create(&tdm_err); + if (!tdm_client) { + TPL_ERR("tdm_client_create failed | tdm_err: %d\n", tdm_err); + goto error_cleanup; + } + + tdm_err = tdm_client_get_fd(tdm_client, &tdm_fd); + if (tdm_err != TDM_ERROR_NONE || tdm_fd < 0) { + TPL_ERR("tdm_client_get_fd failed | tdm_err: %d\n", tdm_err); + goto error_cleanup; + } + + tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err); + if (!tdm_output) { + TPL_ERR("Failed to get tdm client output. tdm_err(%d)", tdm_err); + goto error_cleanup; + } + + tdm_vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err); + if (!tdm_vblank) { + TPL_ERR("Failed to create tdm vblank output. tdm_err(%d)", tdm_err); + goto error_cleanup; + } + + tdm_client_vblank_set_enable_fake(tdm_vblank, 1); + tdm_client_vblank_set_sync(tdm_vblank, 0); + + if (__tpl_worker_regist_vblank_handler(tdm_vblank) == TPL_FALSE) + goto error_cleanup; + + event.events = EPOLLIN; + event.data.ptr = tdm_client; + ret = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, tdm_fd, &event); + if (ret != 0) { + TPL_ERR_ERRNO("tdm epoll ctl epoll_fd: %d, tdm_fd: %d.", + epoll_fd, tdm_fd); + goto error_cleanup; + } + + *ret_vblank = tdm_vblank; + *ret_client = tdm_client; + + return TPL_TRUE; + +error_cleanup: + if (tdm_vblank) + tdm_client_vblank_destroy(tdm_vblank); + if (tdm_client) + tdm_client_destroy(tdm_client); + return TPL_FALSE; +} + static void * __tpl_worker_thread_loop(void *arg) { #define EPOLL_MAX_SIZE 100 int ret, epoll_fd = epoll_create(EPOLL_MAX_SIZE); struct epoll_event ev_list[EPOLL_MAX_SIZE]; + tdm_client *tdm_client = NULL; + tdm_client_vblank *tdm_vblank = NULL; if (epoll_fd == -1) { TPL_ERR_ERRNO("epoll create failed"); @@ -175,6 +295,10 @@ __tpl_worker_thread_loop(void *arg) if (tpl_worker_thread.event_fd == -1) goto cleanup; + /* vblank fd */ + if (__tpl_worker_prepare_vblank(epoll_fd, &tdm_client, &tdm_vblank)) + tpl_worker_thread.support_vblank = TPL_TRUE; + while(tpl_worker_thread.running) { int i; tpl_list_node_t *trail; @@ -218,6 +342,10 @@ __tpl_worker_thread_loop(void *arg) break; } } + } else if (ev_list[i].data.ptr == tdm_client) { + /* vblank */ + tdm_client_handle_events(tdm_client); + /* process in __tpl_worker_cb_vblank */ } else { /* draw done */ tpl_worker_surface_t *surface = ev_list[i].data.ptr; @@ -265,6 +393,11 @@ __tpl_worker_thread_loop(void *arg) cleanup: /* thread cleanup */ + if (tdm_vblank) + tdm_client_vblank_destroy(tdm_vblank); + if (tdm_client) + tdm_client_destroy(tdm_client); + close(epoll_fd); close(tpl_worker_thread.event_fd); tpl_worker_thread.event_fd = -1; @@ -280,6 +413,7 @@ __tpl_worker_init(void) * with pthread_once */ tpl_worker_thread.running = 1; + tpl_worker_thread.support_vblank = TPL_FALSE; if (pthread_mutex_init(&tpl_worker_thread.surface_mutex, NULL) != 0) { TPL_ERR_ERRNO("surface list mutex init failed"); diff --git a/src/tpl_worker_thread.h b/src/tpl_worker_thread.h index 3f195d4..08b8bfc 100644 --- a/src/tpl_worker_thread.h +++ b/src/tpl_worker_thread.h @@ -14,10 +14,13 @@ struct __tpl_worker_surface { void (*draw_done)(tpl_surface_t *surface, tbm_surface_h tbm_surface, tpl_result_t result); int (*draw_wait_fd_get)(tpl_surface_t *surface, tbm_surface_h tbm_surface); + void (*vblank)(tpl_surface_t *surface, unsigned int sequence, unsigned int tv_sec, + unsigned int tv_usec); tbm_surface_h draw_wait_buffer; }; +tpl_bool_t __tpl_worker_support_vblank(); void __tpl_worker_surface_list_insert(tpl_worker_surface_t *surface); void __tpl_worker_surface_list_remove(tpl_worker_surface_t *surface); void __tpl_worker_new_buffer_notify(tpl_worker_surface_t *surface); -- 2.7.4 From 6e8adbdb572bab67283b6bcdcfed2195f093aed6 Mon Sep 17 00:00:00 2001 From: "deasung.kim" Date: Sat, 24 Sep 2016 18:26:46 +0900 Subject: [PATCH 10/16] tpl: tpl_surface_create_swapchain() API is modified for present mode support tpl_display_query_supported_present_modes_from_native_window() is added for querying of present mode - Add enum of tpl_display_present_mode_t it's comments is referred from vulkan present mode comments of vulkan spec. Change-Id: Ief1f322dbb083996d7b77afed7832860dc6e063a --- src/tpl.h | 69 +++++++++++++++++++++++++++++++++++++++++++++++- src/tpl_display.c | 14 ++++++++++ src/tpl_internal.h | 5 +++- src/tpl_surface.c | 4 +-- src/tpl_wayland_vk_wsi.c | 2 +- 5 files changed, 89 insertions(+), 5 deletions(-) diff --git a/src/tpl.h b/src/tpl.h index b4a9fbe..4d9145f 100644 --- a/src/tpl.h +++ b/src/tpl.h @@ -598,14 +598,16 @@ tpl_surface_get_post_interval(tpl_surface_t *surface); * @param width width to the swapchain buffer. * @param height height to the swapchain buffer. * @param buffer_count buffer count to the swapchain. + * @param buffer present mode to the swapchain. * @return TPL_ERROR_NONE if this function is supported and the tpl_surface is valid, TPL_ERROR otherwise. * * @see tpl_surface_get_swapchain_buffers() * @see tpl_surface_destroy_swapchain() + * @see tpl_display_present_mode_t */ tpl_result_t tpl_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, - int width, int height, int buffer_count); + int width, int height, int buffer_count, int present_mode); /** * Destroy a swapchain for the given TPL surface. @@ -726,4 +728,69 @@ tpl_surface_set_frontbuffer_mode(tpl_surface_t *surface, tpl_bool_t set); tpl_result_t tpl_surface_set_reset_cb(tpl_surface_t *surface, void* data, tpl_surface_cb_func_t reset_cb); + +/** + * Present mode types. + * + * @TPL_DISPLAY_MODE_IMMEDIATE_KHR: The presentation engine does not wait for + * a vertical blanking period to update the current image, meaning this + * mode may result in visible tearing. + * No internal queuing of presentation requests is needed, + * as the requests are applied immediately. + * @TPL_DISPLAY_MODE_MAILBOX_KHR: The presentation engine waits for the next + * vertical blanking period to update the current image. + * Tearing cannot be observed. An internal single-entry queue is used + * to hold pending presentation requests. If the queue is full when + * a new presentation request is received, the new request replaces the + * existing entry, and any images associated with the prior entry become + * available for re-use by the application. One request is removed from + * the queue and processed during each vertical blanking period in + * which the queue is non-empty. + * @TPL_DISPLAY_MODE_FIFO_KHR: The presentation engine waits for the next + * vertical blanking period to update the current image. + * Tearing cannot be observed. An internal queue is used to hold pending + * presentation requests. New requests are appended to the end of the queue, + * and one request is removed from the beginning of the queue and processed + * during each vertical blanking period in which the queue is non-empty. + * This is the only value of presentMode that is required to be supported. + * @TPL_DISPLAY_MODE_FIFO_RELAXED_KHR: The presentation engine generally waits + * for the next vertical blanking period to update the current image. + * If a vertical blanking period has already passed since the last update + * of the current image then the presentation engine does not wait for + * another vertical blanking period for the update, meaning this mode may + * result in visible tearing in this case. This mode is useful for reducing + * visual stutter with an application that will mostly present + * a new image before the next vertical blanking period, but may + * occasionally be late, and present a new image just after the next + * vertical blanking period. + * An internal queue is used to hold pending presentation requests. + * New requests are appended to the end of the queue, + * and one request is removed from the beginning of the queue and processed + * during or after each vertical blanking period in which the queue is non-empty. + * default TPL_DISPLAY_PRESENT_MODE_MAILBOX + * + * @see tpl_display_query_supported_present_mode_from_native_window + */ +typedef enum { + TPL_DISPLAY_PRESENT_MODE_MAILBOX = 1, + TPL_DISPLAY_PRESENT_MODE_FIFO = 2, + TPL_DISPLAY_PRESENT_MODE_IMMEDIATE = 4, + TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED = 8, + TPL_DISPLAY_PRESENT_MODE_MAX, +} tpl_display_present_mode_t; + +/** + * Get the present mode capability of the given native window. + * + * @param display display used for query. + * @param window window used for query the present mode capability + * @paran modes pointer to receive present modes bitwise or. + * @return TPL_ERROR_NONE if this function is supported and the window is valid, TPL_ERROR otherwise. + * + * @see tpl_display_present_mode_t + */ +tpl_result_t +tpl_display_query_supported_present_modes_from_native_window(tpl_display_t *display, + tpl_handle_t window, + int *modes); #endif /* TPL_H */ diff --git a/src/tpl_display.c b/src/tpl_display.c index 69295b0..b58da59 100644 --- a/src/tpl_display.c +++ b/src/tpl_display.c @@ -190,3 +190,17 @@ tpl_display_get_buffer_from_native_pixmap(tpl_display_t *display, return display->backend.get_buffer_from_native_pixmap(pixmap); } + +tpl_result_t +tpl_display_query_supported_present_modes_from_native_window(tpl_display_t *display, + tpl_handle_t window, + int *modes) +{ + if (!display->backend.query_window_supported_present_modes) { + TPL_ERR("Backend for display has not been initialized!"); + return TPL_ERROR_INVALID_OPERATION; + } + + return display->backend.query_window_supported_present_modes(display, window, + modes); +} diff --git a/src/tpl_internal.h b/src/tpl_internal.h index 41da0a4..2297db4 100644 --- a/src/tpl_internal.h +++ b/src/tpl_internal.h @@ -74,6 +74,9 @@ struct _tpl_display_backend { tpl_result_t (*query_window_supported_buffer_count)(tpl_display_t *display, tpl_handle_t window, int *min, int *max); + tpl_result_t (*query_window_supported_present_modes)(tpl_display_t *display, + tpl_handle_t window, + int *modes); }; struct _tpl_surface_backend { @@ -94,7 +97,7 @@ struct _tpl_surface_backend { int *buffer_count); tpl_result_t (*create_swapchain)(tpl_surface_t *surface, tbm_format format, int width, - int height, int buffer_count); + int height, int buffer_count, int present_mode); tpl_result_t (*destroy_swapchain)(tpl_surface_t *surface); }; diff --git a/src/tpl_surface.c b/src/tpl_surface.c index 762a597..91aefc1 100644 --- a/src/tpl_surface.c +++ b/src/tpl_surface.c @@ -322,7 +322,7 @@ tpl_surface_get_swapchain_buffers(tpl_surface_t *surface, tpl_result_t tpl_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, - int width, int height, int buffer_count) + int width, int height, int buffer_count, int present_mode) { tpl_result_t ret = TPL_ERROR_INVALID_OPERATION; @@ -347,7 +347,7 @@ tpl_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, TPL_OBJECT_LOCK(surface); ret = surface->backend.create_swapchain(surface, format, width, height, - buffer_count); + buffer_count, present_mode); TPL_OBJECT_UNLOCK(surface); diff --git a/src/tpl_wayland_vk_wsi.c b/src/tpl_wayland_vk_wsi.c index 6f01e1d..68ea5cc 100644 --- a/src/tpl_wayland_vk_wsi.c +++ b/src/tpl_wayland_vk_wsi.c @@ -714,7 +714,7 @@ __tpl_wayland_vk_wsi_draw_wait_fd_get(tpl_surface_t *surface, static tpl_result_t __tpl_wayland_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, int width, - int height, int buffer_count) + int height, int buffer_count, int present_mode) { tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; -- 2.7.4 From d678b7a9d2abb01f633239cccd29dd1c6a39ff63 Mon Sep 17 00:00:00 2001 From: "deasung.kim" Date: Mon, 10 Oct 2016 18:44:13 +0900 Subject: [PATCH 11/16] tpl_worker_thread: change the timing of calling of epoll_ctl_add() on "next draw wait buffer". - before: __tpl_worker_thread_loop() iterates all surfaces before calling of epoll_wait() after : __tpl_worker_thread_loop() calls once __tpl_worker_prepare_draw_wait_buffer after handling of "current surface's buffer draw-done event". Change-Id: Ia8ffb537794eb01c19f4fff33acf40e3071f1184 --- src/tpl_worker_thread.c | 114 ++++++++++++++++++++++++++++-------------------- src/tpl_worker_thread.h | 2 + 2 files changed, 68 insertions(+), 48 deletions(-) diff --git a/src/tpl_worker_thread.c b/src/tpl_worker_thread.c index 1c8af05..69cd6bd 100644 --- a/src/tpl_worker_thread.c +++ b/src/tpl_worker_thread.c @@ -49,6 +49,10 @@ __tpl_worker_surface_list_insert(tpl_worker_surface_t *surface) } surface->draw_wait_buffer = NULL; + + if (pthread_mutex_init(&surface->mutex, NULL) != 0) + TPL_ERR_ERRNO("surface mutex init failed"); + __tpl_list_push_back(&tpl_worker_thread.surface_list, surface); pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); @@ -65,6 +69,23 @@ __tpl_worker_surface_list_remove(tpl_worker_surface_t *surface) __tpl_list_remove_data(&tpl_worker_thread.surface_list, surface, TPL_FIRST, NULL); + if (pthread_mutex_lock(&surface->mutex) != 0) + TPL_ERR_ERRNO("surface list mutex lock failed"); + + if (surface->draw_wait_buffer) { + int wait_fd; + + wait_fd = surface->draw_wait_fd_get(surface->surface, + surface->draw_wait_buffer); + if (wait_fd != -1) + epoll_ctl(tpl_worker_thread.epoll_fd, EPOLL_CTL_DEL, wait_fd, NULL); + surface->draw_wait_buffer = NULL; + } + pthread_mutex_unlock(&surface->mutex); + + if (pthread_mutex_destroy(&surface->mutex) != 0) + TPL_ERR_ERRNO("surface mutex init failed"); + pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); } @@ -134,13 +155,12 @@ __tpl_worker_new_buffer_notify(tpl_worker_surface_t *surface) { TPL_ASSERT(surface->surface); - if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { + if (pthread_mutex_lock(&surface->mutex) != 0) TPL_ERR_ERRNO("surface list mutex lock failed"); - return; - } __tpl_worker_prepare_draw_wait_buffer(tpl_worker_thread.epoll_fd, surface); - pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); + + pthread_mutex_unlock(&surface->mutex); } static tpl_bool_t @@ -164,8 +184,14 @@ __tpl_worker_cb_vblank(tdm_client_vblank *tdm_vblank, tdm_error error, tpl_worker_surface_t *surface; surface = __tpl_list_node_get_data(trail); + + if (pthread_mutex_lock(&surface->mutex) != 0) + TPL_ERR_ERRNO("surface list mutex lock failed"); + if (surface->vblank) surface->vblank(surface->surface, sequence, tv_sec, tv_usec); + + pthread_mutex_unlock(&surface->mutex); } pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); @@ -301,23 +327,6 @@ __tpl_worker_thread_loop(void *arg) while(tpl_worker_thread.running) { int i; - tpl_list_node_t *trail; - - /* set buffer's sync fd and vblank list */ - if (pthread_mutex_lock(&tpl_worker_thread.surface_mutex) != 0) { - TPL_ERR_ERRNO("surface list mutex lock failed"); - goto cleanup; - } - - for (trail = __tpl_list_get_front_node(&tpl_worker_thread.surface_list); - trail != NULL; - trail = __tpl_list_node_next(trail)) { - tpl_worker_surface_t *surface = __tpl_list_node_get_data(trail); - TPL_ASSERT(surface); - - __tpl_worker_prepare_draw_wait_buffer(epoll_fd, surface); - } - pthread_mutex_unlock(&tpl_worker_thread.surface_mutex); /* wait events */ ret = epoll_wait(epoll_fd, ev_list, EPOLL_MAX_SIZE, -1); @@ -332,6 +341,7 @@ __tpl_worker_thread_loop(void *arg) if (ev_list[i].events & EPOLLIN) { int len; uint64_t read_buf; + len = read(tpl_worker_thread.event_fd, &read_buf, sizeof(uint64_t)); if (len < 0) { @@ -349,44 +359,52 @@ __tpl_worker_thread_loop(void *arg) } else { /* draw done */ tpl_worker_surface_t *surface = ev_list[i].data.ptr; - int wait_fd; - int fence_result; if (!(ev_list[i].events & EPOLLIN)) continue; - if (!surface->draw_wait_buffer) { - TPL_WARN("recieve already signaled event\n"); - continue; - } + if (pthread_mutex_lock(&surface->mutex) != 0) + TPL_ERR_ERRNO("surface list mutex lock failed"); - wait_fd = surface->draw_wait_fd_get(surface->surface, surface->draw_wait_buffer); - if (wait_fd == -1) { - if (surface->draw_done) - surface->draw_done(surface->surface, surface->draw_wait_buffer, - TPL_ERROR_INVALID_OPERATION); - surface->draw_wait_buffer = NULL; - continue; - } + if (surface->draw_wait_buffer) { + int wait_fd; - switch (fence_result = tbm_sync_fence_wait(wait_fd, 0)) { - case 0: - TPL_ERR_ERRNO("sync_fence_wait return error."); - case 1: - /* some time recieve event two times */ - epoll_ctl(epoll_fd, EPOLL_CTL_DEL, wait_fd, NULL); + wait_fd = surface->draw_wait_fd_get(surface->surface, + surface->draw_wait_buffer); + if (wait_fd == -1) { if (surface->draw_done) surface->draw_done(surface->surface, surface->draw_wait_buffer, - fence_result == 1 ? - TPL_ERROR_NONE : TPL_ERROR_INVALID_OPERATION); + TPL_ERROR_INVALID_OPERATION); surface->draw_wait_buffer = NULL; - break; - case -1: - TPL_WARN("sync_fence_wait return timeout."); - break; + } else { + int fence_result; + + switch (fence_result = tbm_sync_fence_wait(wait_fd, 0)) { + case 0: + TPL_ERR_ERRNO("sync_fence_wait return error."); + case 1: + /* some time recieve event two times */ + epoll_ctl(epoll_fd, EPOLL_CTL_DEL, wait_fd, NULL); + if (surface->draw_done) + surface->draw_done(surface->surface, + surface->draw_wait_buffer, + fence_result == 1 ? + TPL_ERROR_NONE : + TPL_ERROR_INVALID_OPERATION); + surface->draw_wait_buffer = NULL; + break; + case -1: + TPL_WARN("sync_fence_wait return timeout."); + break; + } + } + } else { + TPL_WARN("recieve already signaled event\n"); } - /* prepare next buffer in loop start time */ + if (surface->draw_wait_buffer == NULL) + __tpl_worker_prepare_draw_wait_buffer(epoll_fd, surface); + pthread_mutex_unlock(&surface->mutex); } } } diff --git a/src/tpl_worker_thread.h b/src/tpl_worker_thread.h index 08b8bfc..987dbb1 100644 --- a/src/tpl_worker_thread.h +++ b/src/tpl_worker_thread.h @@ -5,6 +5,7 @@ #include #include #include +#include typedef struct __tpl_worker_surface tpl_worker_surface_t; @@ -18,6 +19,7 @@ struct __tpl_worker_surface { unsigned int tv_usec); tbm_surface_h draw_wait_buffer; + pthread_mutex_t mutex; }; tpl_bool_t __tpl_worker_support_vblank(); -- 2.7.4 From 5747d3d3ab316fdaa23d4e1e3083d810ed11b1d1 Mon Sep 17 00:00:00 2001 From: "deasung.kim" Date: Sat, 24 Sep 2016 19:59:45 +0900 Subject: [PATCH 12/16] tpl_wayland_vk_wsi: implement the wsi's present mode implement __tpl_wayland_vk_wsi_display_query_window_supported_present_modes vblank event feature supports FIFO/FIFO_RELAXED modes Change-Id: Id4fafee58ad3b2772449c3fb050ede2c004c52a9 --- src/tpl_wayland_vk_wsi.c | 140 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 136 insertions(+), 4 deletions(-) diff --git a/src/tpl_wayland_vk_wsi.c b/src/tpl_wayland_vk_wsi.c index 68ea5cc..7350f2b 100644 --- a/src/tpl_wayland_vk_wsi.c +++ b/src/tpl_wayland_vk_wsi.c @@ -48,8 +48,15 @@ struct _tpl_wayland_vk_wsi_surface { pthread_mutex_t free_queue_mutex; pthread_cond_t free_queue_cond; + /* tbm_surface list */ + tpl_list_t vblank_list; + pthread_mutex_t vblank_list_mutex; + + tpl_bool_t vblank_done; + tpl_worker_surface_t worker_surface; #endif + int present_mode; }; struct _tpl_wayland_vk_wsi_buffer { @@ -167,7 +174,7 @@ __tpl_wayland_vk_wsi_display_init(tpl_display_t *display) } wayland_vk_wsi_display->surface_capabilities.min_buffer = 2; - wayland_vk_wsi_display->surface_capabilities.max_buffer = CLIENT_QUEUE_SIZE;; + wayland_vk_wsi_display->surface_capabilities.max_buffer = CLIENT_QUEUE_SIZE; display->backend.data = wayland_vk_wsi_display; @@ -270,6 +277,30 @@ __tpl_wayland_vk_wsi_display_query_window_supported_buffer_count( return TPL_ERROR_NONE; } +static tpl_result_t +__tpl_wayland_vk_wsi_display_query_window_supported_present_modes( + tpl_display_t *display, + tpl_handle_t window, int *modes) +{ + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + + TPL_ASSERT(display); + TPL_ASSERT(window); + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + + if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; + + if (modes) { + *modes = TPL_DISPLAY_PRESENT_MODE_MAILBOX | TPL_DISPLAY_PRESENT_MODE_IMMEDIATE; +#if USE_WORKER_THREAD == 1 + if (__tpl_worker_support_vblank() == TPL_TRUE) + *modes |= TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED; +#endif + } + + return TPL_ERROR_NONE; +} static tpl_result_t __tpl_wayland_vk_wsi_surface_init(tpl_surface_t *surface) @@ -324,6 +355,7 @@ __tpl_wayland_vk_wsi_surface_commit_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display); TPL_ASSERT(surface->display->native_handle); TPL_ASSERT(tbm_surface); + TPL_ASSERT(tbm_surface_internal_is_valid(tbm_surface)); struct wl_surface *wl_sfc = NULL; struct wl_callback *frame_callback = NULL; @@ -675,21 +707,46 @@ __tpl_wayland_vk_wsi_process_draw_done(tpl_surface_t *surface, tbm_surface_h tbm_surface, tpl_result_t result) { + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; tpl_wayland_vk_wsi_buffer_t *wayland_vk_wsi_buffer = NULL; - /*TPL_ASSERT(surface);*/ + TPL_ASSERT(surface); TPL_ASSERT(tbm_surface); + TPL_ASSERT(tbm_surface_internal_is_valid(tbm_surface)); + wayland_vk_wsi_surface = + (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; wayland_vk_wsi_buffer = __tpl_wayland_vk_wsi_get_wayland_buffer_from_tbm_surface(tbm_surface); + TPL_ASSERT(wayland_vk_wsi_surface); TPL_ASSERT(wayland_vk_wsi_buffer); + /* TODO: send buffer to server immediate when server support present mode */ + close(wayland_vk_wsi_buffer->wait_sync); wayland_vk_wsi_buffer->wait_sync = -1; - /* TODO: check present mode and prepare vblank */ - __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); + if (wayland_vk_wsi_surface->present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO) { + pthread_mutex_lock(&wayland_vk_wsi_surface->vblank_list_mutex); + /* unref in tpl list remove callback + (__tpl_wayland_vk_wsi_buffer_remove_from_vblank_list) */ + tbm_surface_internal_ref(tbm_surface); + __tpl_list_push_back(&wayland_vk_wsi_surface->vblank_list, tbm_surface); + pthread_mutex_unlock(&wayland_vk_wsi_surface->vblank_list_mutex); + } else if (wayland_vk_wsi_surface->present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED && + wayland_vk_wsi_surface->vblank_done == TPL_FALSE) { + /* if can't process previous vblank event, send buffer immediately */ + pthread_mutex_lock(&wayland_vk_wsi_surface->vblank_list_mutex); + /* unref in tpl list remove callback + (__tpl_wayland_vk_wsi_buffer_remove_from_vblank_list) */ + tbm_surface_internal_ref(tbm_surface); + __tpl_list_push_back(&wayland_vk_wsi_surface->vblank_list, tbm_surface); + wayland_vk_wsi_surface->vblank_done = TPL_TRUE; + pthread_mutex_unlock(&wayland_vk_wsi_surface->vblank_list_mutex); + } else { + __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); + } } static int @@ -709,6 +766,43 @@ __tpl_wayland_vk_wsi_draw_wait_fd_get(tpl_surface_t *surface, return wayland_vk_wsi_buffer->wait_sync; } +static void +__tpl_wayland_vk_wsi_buffer_remove_from_vblank_list(void *data) +{ + tbm_surface_h tbm_surface = data; + tbm_surface_internal_unref(tbm_surface); +} + +static void +__tpl_wayland_vk_wsi_vblank(tpl_surface_t *surface, unsigned int sequence, + unsigned int tv_sec, unsigned int tv_usec) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface; + tbm_surface_h tbm_surface; + + TPL_ASSERT(surface); + + wayland_vk_wsi_surface = + (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + + TPL_ASSERT(wayland_vk_wsi_surface); + + if ((wayland_vk_wsi_surface->present_mode & + (TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED)) == 0) + return; + + pthread_mutex_lock(&wayland_vk_wsi_surface->vblank_list_mutex); + tbm_surface = __tpl_list_pop_front(&wayland_vk_wsi_surface->vblank_list, + __tpl_wayland_vk_wsi_buffer_remove_from_vblank_list); + pthread_mutex_unlock(&wayland_vk_wsi_surface->vblank_list_mutex); + + if (tbm_surface_internal_is_valid(tbm_surface)) { + __tpl_wayland_vk_wsi_surface_commit_buffer(surface, tbm_surface); + wayland_vk_wsi_surface->vblank_done = TPL_TRUE; + } else { + wayland_vk_wsi_surface->vblank_done = TPL_FALSE; + } +} #endif static tpl_result_t @@ -736,6 +830,26 @@ __tpl_wayland_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } + /* TODO: check server supported present modes */ + switch (present_mode) { +#if USE_WORKER_THREAD == 1 + case TPL_DISPLAY_PRESENT_MODE_FIFO: + case TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED: + if (__tpl_worker_support_vblank() == TPL_FALSE) { + TPL_ERR("Unsupported present mode: %d, worker not support vblank", present_mode); + return TPL_ERROR_INVALID_PARAMETER; + } +#endif + case TPL_DISPLAY_PRESENT_MODE_MAILBOX: + case TPL_DISPLAY_PRESENT_MODE_IMMEDIATE: + break; + default: + TPL_ERR("Unsupported present mode: %d", present_mode); + return TPL_ERROR_INVALID_PARAMETER; + } + + wayland_vk_wsi_surface->present_mode = present_mode; + wayland_vk_wsi_surface->tbm_queue = tbm_surface_queue_create(buffer_count, width, height, @@ -764,6 +878,13 @@ __tpl_wayland_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, __tpl_wayland_vk_wsi_process_draw_done; wayland_vk_wsi_surface->worker_surface.draw_wait_fd_get = __tpl_wayland_vk_wsi_draw_wait_fd_get; + if ((wayland_vk_wsi_surface->present_mode & + (TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED))) { + wayland_vk_wsi_surface->worker_surface.vblank = + __tpl_wayland_vk_wsi_vblank; + pthread_mutex_init(&wayland_vk_wsi_surface->vblank_list_mutex, NULL); + __tpl_list_init(&wayland_vk_wsi_surface->vblank_list); + } __tpl_worker_surface_list_insert(&wayland_vk_wsi_surface->worker_surface); #endif @@ -795,6 +916,15 @@ __tpl_wayland_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) } #if USE_WORKER_THREAD == 1 + if ((wayland_vk_wsi_surface->present_mode & + (TPL_DISPLAY_PRESENT_MODE_FIFO | TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED))) { + pthread_mutex_lock(&wayland_vk_wsi_surface->vblank_list_mutex); + __tpl_list_fini(&wayland_vk_wsi_surface->vblank_list, + __tpl_wayland_vk_wsi_buffer_remove_from_vblank_list); + pthread_mutex_unlock(&wayland_vk_wsi_surface->vblank_list_mutex); + pthread_mutex_destroy(&wayland_vk_wsi_surface->vblank_list_mutex); + } + pthread_cond_destroy(&wayland_vk_wsi_surface->free_queue_cond); pthread_mutex_destroy(&wayland_vk_wsi_surface->free_queue_mutex); #endif @@ -849,6 +979,8 @@ __tpl_display_init_backend_wayland_vk_wsi(tpl_display_backend_t *backend) backend->filter_config = __tpl_wayland_vk_wsi_display_filter_config; backend->query_window_supported_buffer_count = __tpl_wayland_vk_wsi_display_query_window_supported_buffer_count; + backend->query_window_supported_present_modes = + __tpl_wayland_vk_wsi_display_query_window_supported_present_modes; } void -- 2.7.4 From 43f3ba0d0ef58d2ccf2a2c69eea9691b50dbf441 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Wed, 12 Oct 2016 17:31:13 +0900 Subject: [PATCH 13/16] tpl_util: Revised in order to use dlog as the default tpl logs. - If set ENABLE_DLOG which is the flag in the spec file with 0, default log change to fprintf. - To distinguish the kind of logs, I added several below font colors. FONT_DEFAULT, FONT_RED(error), FONT_YELLOW(warning), FONT_GREEN(frontend), FONT_BLUE(backend), FONT_MAGENTA(debug) Change-Id: I13f41add53e0e3f3346bc89d4d056623f9bd98d1 Signed-off-by: joonbum.ko --- packaging/libtpl-egl.spec | 2 +- src/tpl_utils.h | 85 ++++++++++++++++++++++++++--------------------- 2 files changed, 49 insertions(+), 38 deletions(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index 2eb175c..eedabda 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -10,7 +10,7 @@ #TPL FEATURE OPTION %define ENABLE_TTRACE 0 -%define ENABLE_DLOG 0 +%define ENABLE_DLOG 1 %define ENABLE_DEFAULT_LOG 0 %define ENABLE_DEFAULT_DUMP 0 %define ENABLE_OBJECT_HASH_CHECK 1 diff --git a/src/tpl_utils.h b/src/tpl_utils.h index 2b8580b..386174e 100644 --- a/src/tpl_utils.h +++ b/src/tpl_utils.h @@ -55,25 +55,50 @@ extern unsigned int tpl_log_lvl; extern unsigned int tpl_log_initialized; extern unsigned int tpl_dump_lvl; +#define FONT_DEFAULT "\033[0m" /* for reset to default color */ +#define FONT_RED "\033[31m" /* for error logs */ +#define FONT_YELLOW "\033[33m" /* for warning logs */ +#define FONT_GREEN "\033[32m" /* for frontend API logs */ +#define FONT_BLUE "\033[34m" /* for backend logs */ +#define FONT_MAGENTA "\033[35m" /* for debug logs */ + #ifdef DLOG_DEFAULT_ENABLE #define LOG_TAG "TPL" #include -#define TPL_LOG_F(f, x...) LOGD(f, ##x) -#define TPL_LOG_B(b, f, x...) LOGD(f, ##x) -#define TPL_DEBUG(f, x...) LOGD(f, ##x) -#define TPL_ERR(f, x...) LOGE(f, ##x) -#define TPL_WARN(f, x...) LOGW(f, ##x) +#endif + +#ifdef DLOG_DEFAULT_ENABLE +#define tpl_log_f(t, f, x...) LOGD(FONT_GREEN t FONT_DEFAULT " " f, ##x) +#define tpl_log_b(t, f, x...) LOGD(FONT_BLUE t FONT_DEFAULT " " f, ##x) +#define tpl_log_d(t, f, x...) LOGD(FONT_MAGENTA t FONT_DEFAULT " " f, ##x) +#define tpl_log_e(t, f, x...) LOGE(FONT_RED t " " f FONT_DEFAULT, ##x) +#define tpl_log_w(t, f, x...) LOGW(FONT_YELLOW t " " f FONT_DEFAULT, ##x) #else /* DLOG_DEFAULT_ENABLE */ +#define tpl_log_f(t, f, x...) \ + fprintf(stderr, FONT_GREEN t FONT_DEFAULT "[(pid:%d)(%s)] " f "\n", \ + getpid(), __func__, ##x) +#define tpl_log_b(t, f, x...) \ + fprintf(stderr, FONT_BLUE t FONT_DEFAULT "[(pid:%d)(%s)] " f "\n", \ + getpid(), __func__, ##x) +#define tpl_log_d(t, f, x...) \ + fprintf(stderr, FONT_MAGENTA t FONT_DEFAULT "[(pid:%d)(%s)] " f "\n",\ + getpid(), __func__, ##x) +#define tpl_log_e(t, f, x...) \ + fprintf(stderr, FONT_RED t "[(pid:%d)(%s)] " f FONT_DEFAULT "\n", \ + getpid(), __func__, ##x) +#define tpl_log_w(t, f, x...) \ + fprintf(stderr, FONT_YELLOW t "[(pid:%d)(%s)] " f FONT_DEFAULT "\n",\ + getpid(), __func__, ##x) +#endif /* DLOG_DEFAULT_ENABLE */ + + +#define TPL_ERR(f, x...) tpl_log_e("[TPL_ERROR]", f, ##x) +#define TPL_WARN(f, x...) tpl_log_w("[TPL_WARNING]", f, ##x) + #ifdef LOG_DEFAULT_ENABLE -#define TPL_LOG_F(f, x...) \ - fprintf(stderr, "[TPL_F(%d):%s(%d)] " f "\n", \ - getpid(), __func__, __LINE__, ##x) -#define TPL_LOG_B(b, f, x...) \ - fprintf(stderr, "[TPL_" b "(%d):%s(%d)] " f "\n", \ - getpid(), __FILE__, __LINE__, ##x) -#define TPL_DEBUG(f, x...) \ - fprintf(stderr, "[TPL_D(%d):%s(%d)] " f "\n", \ - getpid(), __func__, __LINE__, ##x) +#define TPL_LOG_F(f, x...) tpl_log_f("[TPL_F]", f, ##x) +#define TPL_LOG_B(b, f, x...) tpl_log_b("[TPL_" b "]", f, ##x) +#define TPL_DEBUG(f, x...) tpl_log_d("[TPL_DEBUG]", f, ##x) #else /* LOG_DEFAULT_ENABLE */ /* * TPL_LOG_LEVEL @@ -102,45 +127,31 @@ extern unsigned int tpl_dump_lvl; { \ LOG_INIT(); \ if (tpl_log_lvl > 0 && tpl_log_lvl < 4) \ - fprintf(stderr, "[TPL_F(%d):%s(%d)] " f "\n",\ - getpid(), __func__, __LINE__, ##x); \ + tpl_log_f("[TPL_F]", f, ##x); \ } -#define TPL_LOG_B(b, f, x...) \ - { \ - LOG_INIT(); \ - if (tpl_log_lvl > 1 && tpl_log_lvl < 4) \ - fprintf(stderr, "[TPL_" b "(%d):%s(%d)] " f "\n",\ - getpid(), __FILE__, __LINE__, ##x); \ +#define TPL_LOG_B(b, f, x...) \ + { \ + LOG_INIT(); \ + if (tpl_log_lvl > 1 && tpl_log_lvl < 4) \ + tpl_log_b("[TPL_" b "]", f, ##x); \ } #define TPL_DEBUG(f, x...) \ { \ LOG_INIT(); \ if (tpl_log_lvl > 2) \ - fprintf(stderr, "[TPL_D(%d):%s(%d)] " f "\n",\ - getpid(), __func__, __LINE__, ##x); \ + tpl_log_d("[TPL_DEBUG]", f, ##x); \ } - #endif /* LOG_DEFAULT_ENABLE */ - -#define TPL_ERR(f, x...) \ - fprintf(stderr, \ - "[TPL_ERR(%d):%s(%d)] " f "\n", \ - getpid(), __func__, __LINE__, ##x) - -#define TPL_WARN(f, x...) \ - fprintf(stderr, \ - "[TPL_WARN(%d):%s(%d)] " f "\n", \ - getpid(), __func__, __LINE__, ##x) -#endif /* DLOG_DEFAULT_ENABLE */ #else /* NDEBUG */ #define TPL_LOG_F(f, x...) #define TPL_LOG_B(b, f, x...) #define TPL_DEBUG(f, x...) #define TPL_ERR(f, x...) #define TPL_WARN(f, x...) -#endif /* NDEBUG */ +#endif + #define TPL_CHECK_ON_NULL_RETURN(exp) \ { \ -- 2.7.4 From 44cd41d20691b02ec7ae65bd5099d5e7ceb13297 Mon Sep 17 00:00:00 2001 From: "deasung.kim" Date: Thu, 13 Oct 2016 14:22:44 +0900 Subject: [PATCH 14/16] tpl_worker_thread: remove regist vblank handler. vblank handler raises performance regression. remove regist vblank handler and replace all present mode to MAILBOX. Change-Id: I4a56b77aea2ff67dbe0f09dc322d3219a2c69190 --- src/tpl_wayland_vk_wsi.c | 3 +++ src/tpl_worker_thread.c | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/tpl_wayland_vk_wsi.c b/src/tpl_wayland_vk_wsi.c index 7350f2b..a305762 100644 --- a/src/tpl_wayland_vk_wsi.c +++ b/src/tpl_wayland_vk_wsi.c @@ -830,6 +830,9 @@ __tpl_wayland_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, return TPL_ERROR_INVALID_PARAMETER; } + /* FIXME: vblank has performance problem so replace all present mode to MAILBOX */ + present_mode = TPL_DISPLAY_PRESENT_MODE_MAILBOX; + /* TODO: check server supported present modes */ switch (present_mode) { #if USE_WORKER_THREAD == 1 diff --git a/src/tpl_worker_thread.c b/src/tpl_worker_thread.c index 69cd6bd..e50c755 100644 --- a/src/tpl_worker_thread.c +++ b/src/tpl_worker_thread.c @@ -322,8 +322,10 @@ __tpl_worker_thread_loop(void *arg) goto cleanup; /* vblank fd */ - if (__tpl_worker_prepare_vblank(epoll_fd, &tdm_client, &tdm_vblank)) - tpl_worker_thread.support_vblank = TPL_TRUE; + /* FIXME: vblank has performance problem */ + /*if (__tpl_worker_prepare_vblank(epoll_fd, &tdm_client, &tdm_vblank)) + tpl_worker_thread.support_vblank = TPL_TRUE;*/ + tpl_worker_thread.support_vblank = TPL_TRUE; while(tpl_worker_thread.running) { int i; -- 2.7.4 From 58b96d5185131e04d298ea56f54d89f54ce17d79 Mon Sep 17 00:00:00 2001 From: YoungJun Cho Date: Mon, 24 Oct 2016 14:28:55 +0900 Subject: [PATCH 15/16] tpl_wayland_egl: clean up codes __tpl_wayland_egl_buffer_set_reset_flag() This patch cleans up code for __tpl_wayland_egl_buffer_set_reset_flag(). There is a possibility to reuse wayland_egl_buffer when tbm_surface is NULL. Change-Id: If1b5580ecea47c435eccec44e0d0c78ae5696162 Signed-off-by: YoungJun Cho --- src/tpl_wayland_egl.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index 0323dfc..e93c66d 100644 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -378,19 +378,19 @@ __cb_client_window_resize_callback(struct wl_egl_window *wl_egl_window, static TPL_INLINE void __tpl_wayland_egl_buffer_set_reset_flag(tpl_list_t *tracking_list) { - tpl_wayland_egl_buffer_t *wayland_egl_buffer = NULL; - tbm_surface_h tbm_surface = NULL; tpl_list_node_t *node = __tpl_list_get_front_node(tracking_list); while (node) { - tbm_surface = (tbm_surface_h) __tpl_list_node_get_data(node); + tbm_surface_h tbm_surface = + (tbm_surface_h)__tpl_list_node_get_data(node); - if (tbm_surface) - wayland_egl_buffer = + if (tbm_surface) { + tpl_wayland_egl_buffer_t *wayland_egl_buffer = __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface(tbm_surface); - if (wayland_egl_buffer) - wayland_egl_buffer->reset = TPL_TRUE; + if (wayland_egl_buffer) + wayland_egl_buffer->reset = TPL_TRUE; + } node = __tpl_list_node_next(node); } -- 2.7.4 From 6e81fe157393c476524c93228b3f73d16fb57fdc Mon Sep 17 00:00:00 2001 From: YoungJun Cho Date: Mon, 24 Oct 2016 15:35:14 +0900 Subject: [PATCH 16/16] tpl_wayland_egl: remove duplicated routine in __cb_tbm_surface_queue_reset_callback() This patch removes duplicated routine in __cb_tbm_surface_queue_reset_callback(). The TPL_CHECK_ON_NULL_RETURN() already checks wayland_egl_surface is NULL or not. Change-Id: Ia17386495cf5fa15210e6e8847ffc7b2e8bf92cc Signed-off-by: YoungJun Cho --- src/tpl_wayland_egl.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/tpl_wayland_egl.c b/src/tpl_wayland_egl.c index e93c66d..d22f7c4 100644 --- a/src/tpl_wayland_egl.c +++ b/src/tpl_wayland_egl.c @@ -409,8 +409,6 @@ __cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data; TPL_CHECK_ON_NULL_RETURN(wayland_egl_surface); - if (!wayland_egl_surface) return; - TPL_LOG_B("WL_EGL", "[QUEUE_RESET_CB] tpl_wayland_egl_surface_t(%p) surface_queue(%p)", data, surface_queue); -- 2.7.4