From 5646c5705fda8957a21c745d6cbba6b93701c5c0 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Fri, 20 Oct 2017 15:04:25 +0900 Subject: [PATCH 01/16] tpl_wl_vk_thread: Integrated wl_vk_thread backend. Change-Id: Ie4491b28750f5639da02f474a3e5fdb742c6f07b Signed-off-by: joonbum.ko --- src/Makefile.am | 1 + src/tpl.c | 6 + src/tpl.h | 1 + src/tpl_internal.h | 2 + src/tpl_wayland_egl_thread.c | 43 ++- src/tpl_wl_vk_thread.c | 616 +++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 662 insertions(+), 7 deletions(-) create mode 100644 src/tpl_wl_vk_thread.c diff --git a/src/Makefile.am b/src/Makefile.am index d29509f..af61301 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -26,6 +26,7 @@ libtpl_egl_la_SOURCES += tpl_wayland_egl.c \ tpl_wl_egl_thread.c \ tpl_wayland_egl_thread.c \ tpl_wayland_vk_wsi.c \ + tpl_wl_vk_thread.c \ tpl_worker_thread.c \ wayland-vulkan/wayland-vulkan-protocol.c endif diff --git a/src/tpl.c b/src/tpl.c index 0f75d5f..7b0a213 100644 --- a/src/tpl.c +++ b/src/tpl.c @@ -301,6 +301,9 @@ __tpl_display_init_backend(tpl_display_t *display, tpl_backend_type_t type) case TPL_BACKEND_WAYLAND_VULKAN_WSI: __tpl_display_init_backend_wayland_vk_wsi(&display->backend); break; + case TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD: + __tpl_display_init_backend_wl_vk_wsi_thread(&display->backend); + break; case TPL_BACKEND_TBM: __tpl_display_init_backend_tbm(&display->backend, type); break; @@ -343,6 +346,9 @@ __tpl_surface_init_backend(tpl_surface_t *surface, tpl_backend_type_t type) case TPL_BACKEND_WAYLAND_VULKAN_WSI: __tpl_surface_init_backend_wayland_vk_wsi(&surface->backend); break; + case TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD: + __tpl_surface_init_backend_wl_vk_wsi_thread(&surface->backend); + break; case TPL_BACKEND_TBM: __tpl_surface_init_backend_tbm(&surface->backend, type); break; diff --git a/src/tpl.h b/src/tpl.h index ff20ff4..0ebf067 100644 --- a/src/tpl.h +++ b/src/tpl.h @@ -194,6 +194,7 @@ typedef enum { TPL_BACKEND_X11_DRI3, TPL_BACKEND_TBM, TPL_BACKEND_WAYLAND_VULKAN_WSI, + TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD, TPL_BACKEND_TBM_VULKAN_WSI, TPL_BACKEND_COUNT, TPL_BACKEND_MAX diff --git a/src/tpl_internal.h b/src/tpl_internal.h index 2fa642f..56552a4 100644 --- a/src/tpl_internal.h +++ b/src/tpl_internal.h @@ -220,6 +220,7 @@ void __tpl_display_init_backend_tbm(tpl_display_backend_t *backend, void __tpl_display_init_backend_wayland_egl(tpl_display_backend_t *backend); void __tpl_display_init_backend_wl_egl_thread(tpl_display_backend_t *backend); void __tpl_display_init_backend_wayland_vk_wsi(tpl_display_backend_t *backend); +void __tpl_display_init_backend_wl_vk_wsi_thread(tpl_display_backend_t *backend); void __tpl_display_init_backend_x11_dri2(tpl_display_backend_t *backend); void __tpl_display_init_backend_x11_dri3(tpl_display_backend_t *backend); void __tpl_surface_init_backend_gbm(tpl_surface_backend_t *backend); @@ -228,6 +229,7 @@ void __tpl_surface_init_backend_tbm(tpl_surface_backend_t *backend, void __tpl_surface_init_backend_wayland_egl(tpl_surface_backend_t *backend); void __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend); void __tpl_surface_init_backend_wayland_vk_wsi(tpl_surface_backend_t *backend); +void __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend); void __tpl_surface_init_backend_x11_dri2(tpl_surface_backend_t *backend); void __tpl_surface_init_backend_x11_dri3(tpl_surface_backend_t *backend); diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index ad49932..7f12646 100644 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -861,8 +861,6 @@ _twe_display_vk_init(twe_wl_disp_source *disp_source) NULL); } - TPL_LOG_T("WL_VK", "wl_vk_client(%p) init.", disp_source->wl_vk_client); - fini: if (queue) wl_event_queue_destroy(queue); @@ -950,7 +948,8 @@ twe_display_add(twe_thread* thread, source->gfd.revents = 0; __tpl_object_init(&source->obj, TPL_OBJECT_DISPLAY, NULL); - if (backend == TPL_BACKEND_WAYLAND_VULKAN_WSI) { + if (backend == TPL_BACKEND_WAYLAND_VULKAN_WSI || + backend == TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD) { source->is_vulkan_dpy = TPL_TRUE; source->surface_capabilities.min_buffer = 2; source->surface_capabilities.max_buffer = VK_CLIENT_QUEUE_SIZE; @@ -1658,6 +1657,10 @@ _twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source, buf_info->sync_timestamp++; + TPL_LOG_T("WL_VK", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", + buf_info->wl_buffer, tbm_surface, + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + if (surf_source->swapchain_properties.present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED) { if (_twe_surface_wait_vblank(surf_source) != TPL_ERROR_NONE) @@ -1801,6 +1804,9 @@ _twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source) return; } + TRACE_ASYNC_END((int)tbm_surface, "DRAWING BO(%d)", + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + tbm_surface_internal_ref(tbm_surface); tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO, @@ -1824,9 +1830,16 @@ _twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source) case TPL_DISPLAY_PRESENT_MODE_MAILBOX: if (surf_source->draw_done_buffer) { + g_mutex_lock(&surf_source->free_queue_mutex); + TPL_LOG_T("WL_VK", "[SKIP] tbm_surface(%p) bo(%d)", + tbm_surface, + tbm_bo_export(tbm_surface_internal_get_bo( + tbm_surface, 0))); tbm_surface_internal_unref(surf_source->draw_done_buffer); tbm_surface_queue_release(surf_source->tbm_queue, surf_source->draw_done_buffer); + g_cond_signal(&surf_source->free_queue_cond); + g_mutex_unlock(&surf_source->free_queue_mutex); } surf_source->draw_done_buffer = tbm_surface; @@ -1871,7 +1884,6 @@ static gboolean _twe_thread_wl_surface_dispatch(GSource *source, GSourceFunc cb, gpointer data) { twe_wl_surf_source *surf_source = (twe_wl_surf_source *)source; - twe_wl_disp_source *disp_source = surf_source->disp_source; GIOCondition cond; if (g_source_is_destroyed(source)) { @@ -1890,8 +1902,7 @@ _twe_thread_wl_surface_dispatch(GSource *source, GSourceFunc cb, gpointer data) TPL_ERR("Failed to read from event_fd(%d)", surf_source->event_fd); - if (!disp_source->is_vulkan_dpy) - _twe_thread_wl_surface_acquire_and_commit(surf_source); + _twe_thread_wl_surface_acquire_and_commit(surf_source); } return G_SOURCE_CONTINUE; @@ -2355,6 +2366,24 @@ twe_surface_create_swapchain(twe_surface_h twe_surface, return TPL_ERROR_OUT_OF_MEMORY; } + if (tbm_surface_queue_add_trace_cb(surf_source->tbm_queue, + __cb_tbm_queue_trace_callback, + (void *)surf_source) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register trace callback to tbm_surface_queue(%p)", + surf_source->tbm_queue); + tbm_surface_queue_destroy(surf_source->tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + + if (tbm_surface_queue_add_acquirable_cb(surf_source->tbm_queue, + __cb_tbm_queue_acquirable_callback, + (void *)surf_source) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)", + surf_source->tbm_queue); + tbm_surface_queue_destroy(surf_source->tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + surf_source->format = format; surf_source->swapchain_properties.width = width; surf_source->swapchain_properties.height = height; @@ -2365,7 +2394,7 @@ twe_surface_create_swapchain(twe_surface_h twe_surface, twe_surface, surf_source->tbm_queue); TPL_LOG_T("WL_VK", "[SWAPCHAIN_CREATE][2/2] w(%d) h(%d) f(%d) p(%d) b_cnt(%d)", - width, height, present_mode, buffer_count); + width, height, format, present_mode, buffer_count); return TPL_ERROR_NONE; } diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c new file mode 100644 index 0000000..fd518bf --- /dev/null +++ b/src/tpl_wl_vk_thread.c @@ -0,0 +1,616 @@ +#define inline __inline__ +#undef inline + +#include "tpl_internal.h" + +#include +#include +#include + +#include + +#include "tpl_wayland_egl_thread.h" + +typedef struct _tpl_wayland_vk_wsi_display tpl_wayland_vk_wsi_display_t; +typedef struct _tpl_wayland_vk_wsi_surface tpl_wayland_vk_wsi_surface_t; +typedef struct _tpl_wayland_vk_wsi_buffer tpl_wayland_vk_wsi_buffer_t; + +struct _tpl_wayland_vk_wsi_display { + twe_thread *wl_thread; + twe_display_h twe_display; +}; + +struct _tpl_wayland_vk_wsi_surface { + twe_surface_h twe_surface; + tbm_surface_queue_h tbm_queue; + int buffer_count; +}; + +static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( + tpl_surface_t *surface); + +static TPL_INLINE tpl_bool_t +__tpl_wl_vk_wsi_display_is_wl_display(tpl_handle_t native_dpy) +{ + if (!native_dpy) return TPL_FALSE; + + if (twe_check_native_handle_is_wl_display(native_dpy)) + return TPL_TRUE; + + return TPL_FALSE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_display_init(tpl_display_t *display) +{ + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + + TPL_ASSERT(display); + + /* Do not allow default display in wayland */ + if (!display->native_handle) { + TPL_ERR("Invalid native handle for display."); + return TPL_ERROR_INVALID_PARAMETER; + } + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) calloc(1, + sizeof(tpl_wayland_vk_wsi_display_t)); + if (!wayland_vk_wsi_display) { + TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_display_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + display->backend.data = wayland_vk_wsi_display; + + if (twe_check_native_handle_is_wl_display(display->native_handle)) { + wayland_vk_wsi_display->wl_thread = twe_thread_create(); + if (!wayland_vk_wsi_display->wl_thread) { + TPL_ERR("Failed to create twe_thread."); + goto free_display; + } + + wayland_vk_wsi_display->twe_display = + twe_display_add(wayland_vk_wsi_display->wl_thread, + display->native_handle, + display->backend.type); + if (!wayland_vk_wsi_display->twe_display) { + TPL_ERR("Failed to add native_display(%p) to thread(%p)", + display->native_handle, + wayland_vk_wsi_display->wl_thread); + goto free_display; + } + + } else { + TPL_ERR("Invalid native handle for display."); + goto free_display; + } + + TPL_LOG_T("WL_VK", + "[INIT DISPLAY] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)", + wayland_vk_wsi_display, + wayland_vk_wsi_display->wl_thread, + wayland_vk_wsi_display->twe_display); + + return TPL_ERROR_NONE; + +free_display: + if (wayland_vk_wsi_display) { + if (wayland_vk_wsi_display->twe_display) + twe_display_del(wayland_vk_wsi_display->twe_display); + if (wayland_vk_wsi_display->wl_thread) + twe_thread_destroy(wayland_vk_wsi_display->wl_thread); + + wayland_vk_wsi_display->wl_thread = NULL; + wayland_vk_wsi_display->twe_display = NULL; + + free(wayland_vk_wsi_display); + display->backend.data = NULL; + } + + return TPL_ERROR_INVALID_OPERATION; +} + +static void +__tpl_wl_vk_wsi_display_fini(tpl_display_t *display) +{ + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display; + + TPL_ASSERT(display); + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + if (wayland_vk_wsi_display) { + + TPL_LOG_T("WL_VK", + "[FINI] wayland_vk_wsi_display(%p) twe_thread(%p) twe_display(%p)", + wayland_vk_wsi_display, + wayland_vk_wsi_display->wl_thread, + wayland_vk_wsi_display->twe_display); + + if (wayland_vk_wsi_display->twe_display) { + tpl_result_t ret = TPL_ERROR_NONE; + ret = twe_display_del(wayland_vk_wsi_display->twe_display); + if (ret != TPL_ERROR_NONE) + TPL_ERR("Failed to delete twe_display(%p) from twe_thread(%p)", + wayland_vk_wsi_display->twe_display, + wayland_vk_wsi_display->wl_thread); + wayland_vk_wsi_display->twe_display = NULL; + } + + if (wayland_vk_wsi_display->wl_thread) { + twe_thread_destroy(wayland_vk_wsi_display->wl_thread); + wayland_vk_wsi_display->wl_thread = NULL; + } + + free(wayland_vk_wsi_display); + } + display->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_vk_wsi_display_query_config(tpl_display_t *display, + tpl_surface_type_t surface_type, + int red_size, int green_size, + int blue_size, int alpha_size, + int color_depth, int *native_visual_id, + tpl_bool_t *is_slow) +{ + TPL_ASSERT(display); + + if (surface_type == TPL_SURFACE_TYPE_WINDOW && red_size == 8 && + green_size == 8 && blue_size == 8 && + (color_depth == 32 || color_depth == 24)) { + + if (alpha_size == 8) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_ARGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + if (alpha_size == 0) { + if (native_visual_id) *native_visual_id = TBM_FORMAT_XRGB8888; + if (is_slow) *is_slow = TPL_FALSE; + return TPL_ERROR_NONE; + } + } + + return TPL_ERROR_INVALID_PARAMETER; +} + +static tpl_result_t +__tpl_wl_vk_wsi_display_filter_config(tpl_display_t *display, + int *visual_id, + int alpha_size) +{ + TPL_IGNORE(display); + TPL_IGNORE(visual_id); + TPL_IGNORE(alpha_size); + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_display_query_window_supported_buffer_count( + tpl_display_t *display, + tpl_handle_t window, int *min, int *max) +{ + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_result_t res = TPL_ERROR_NONE; + + TPL_ASSERT(display); + TPL_ASSERT(window); + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + + if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; + + res = twe_display_get_buffer_count(wayland_vk_wsi_display->twe_display, + min, max); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to query buffer count. twe_display(%p)", + wayland_vk_wsi_display->twe_display); + return res; + } + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_display_query_window_supported_present_modes( + tpl_display_t *display, + tpl_handle_t window, int *modes) +{ + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_result_t res = TPL_ERROR_NONE; + + TPL_ASSERT(display); + TPL_ASSERT(window); + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + + if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; + + if (modes) { + res = twe_display_get_present_mode(wayland_vk_wsi_display->twe_display, + modes); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to query present modes. twe_display(%p)", + wayland_vk_wsi_display->twe_display); + return res; + } + } + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + twe_surface_h twe_surface = NULL; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); + TPL_ASSERT(surface->native_handle); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) calloc(1, + sizeof(tpl_wayland_vk_wsi_surface_t)); + if (!wayland_vk_wsi_surface) { + TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_surface_t."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + wayland_vk_wsi_display = + (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + if (!wayland_vk_wsi_display) { + TPL_ERR("Invalid parameter. wayland_vk_wsi_display(%p)", + wayland_vk_wsi_display); + free(wayland_vk_wsi_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + surface->backend.data = (void *)wayland_vk_wsi_surface; + wayland_vk_wsi_surface->tbm_queue = NULL; + + twe_surface = twe_surface_add(wayland_vk_wsi_display->wl_thread, + wayland_vk_wsi_display->twe_display, + surface->native_handle, + surface->format); + if (!twe_surface) { + TPL_ERR("Failed to add native_surface(%p) to thread(%p)", + surface->native_handle, wayland_vk_wsi_display->wl_thread); + free(wayland_vk_wsi_surface); + surface->backend.data = NULL; + return TPL_ERROR_OUT_OF_MEMORY; + } + + wayland_vk_wsi_surface->twe_surface = twe_surface; + + TPL_LOG_T("WL_VK", + "[INIT]tpl_surface(%p) tpl_wayland_vk_wsi_surface(%p) twe_surface(%p)", + surface, wayland_vk_wsi_surface, twe_surface); + + return TPL_ERROR_NONE; +} + +static void +__tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + if (wayland_vk_wsi_surface == NULL) return; + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) + surface->display->backend.data; + if (wayland_vk_wsi_display == NULL) return; + + if (wayland_vk_wsi_surface->tbm_queue) + __tpl_wl_vk_wsi_surface_destroy_swapchain(surface); + + TPL_LOG_T("WL_VK", + "[FINI] wayland_vk_wsi_surface(%p) native_surface(%p) twe_surface(%p)", + wayland_vk_wsi_surface, surface->native_handle, + wayland_vk_wsi_surface->twe_surface); + + if (twe_surface_del(wayland_vk_wsi_surface->twe_surface) + != TPL_ERROR_NONE) { + TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", + wayland_vk_wsi_surface->twe_surface, + wayland_vk_wsi_display->wl_thread); + } + + wayland_vk_wsi_surface->twe_surface = NULL; + + free(wayland_vk_wsi_surface); + surface->backend.data = NULL; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, + tbm_surface_h tbm_surface, + int num_rects, const int *rects, + tbm_fd sync_fence) +{ + + TPL_ASSERT(surface); + TPL_ASSERT(surface->display); + TPL_ASSERT(surface->display->native_handle); + TPL_ASSERT(tbm_surface); + + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = + (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + tbm_surface_queue_error_e tsq_err; + + if (!tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.", + tbm_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + if (sync_fence != -1) { + tpl_result_t res = TPL_ERROR_NONE; + res = twe_surface_set_sync_fd(tbm_surface, sync_fence); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to set sync_fd to tbm_surface(%p)", tbm_surface); + tbm_surface_internal_unref(tbm_surface); + return res; + } + } + + tsq_err = tbm_surface_queue_enqueue(wayland_vk_wsi_surface->tbm_queue, + tbm_surface); + if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) { + tbm_surface_internal_unref(tbm_surface); + } else { + TPL_ERR("Failed to enqeueue tbm_surface. | tsq_err = %d", tsq_err); + return TPL_ERROR_INVALID_OPERATION; + } + + TPL_LOG_T("WL_VK", "[ENQ] tbm_surface(%p) bo(%d)", + tbm_surface, + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + + return TPL_ERROR_NONE; +} + +static tpl_bool_t +__tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + + return TPL_TRUE; +} + +static tbm_surface_h +__tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, + uint64_t timeout_ns, + tbm_fd *sync_fence) +{ + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + + tbm_surface_h tbm_surface = NULL; + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = + (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + tbm_surface_queue_error_e tsq_err = 0; + + if (sync_fence) + *sync_fence = -1; + + if (!tbm_surface_queue_can_dequeue(wayland_vk_wsi_surface->tbm_queue, 0)) { + if (timeout_ns == 0) return NULL; + else { + tpl_result_t res = TPL_ERROR_NONE; + res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface, + timeout_ns); + if (res == TPL_ERROR_TIME_OUT) { + TPL_ERR("Failed to get buffer during timeout_ns(%u)", timeout_ns); + return NULL; + } else if (res != TPL_ERROR_NONE) { + TPL_ERR("Invalid parameter. twe_surface(%p) timeout_ns(%u)", + wayland_vk_wsi_surface->twe_surface, timeout_ns); + return NULL; + } + } + } + + tsq_err = tbm_surface_queue_dequeue(wayland_vk_wsi_surface->tbm_queue, + &tbm_surface); + if (!tbm_surface) { + TPL_ERR("Failed to get tbm_surface from tbm_surface_queue | tsq_err = %d", + tsq_err); + return NULL; + } + + tbm_surface_internal_ref(tbm_surface); + + if (sync_fence) { + *sync_fence = twe_surface_create_sync_fd(tbm_surface); + } + + TPL_LOG_T("WL_VK", "[DEQ] tbm_surface(%p) bo(%d)", + tbm_surface, + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + + return tbm_surface; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, + tbm_surface_h **buffers, + int *buffer_count) +{ + tbm_surface_h buffer = NULL; + tbm_surface_h *swapchain_buffers = NULL; + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tbm_surface_queue_error_e tsq_err; + int i, dequeue_count; + tpl_result_t ret = TPL_ERROR_NONE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + TPL_ASSERT(buffers); + TPL_ASSERT(buffer_count); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + swapchain_buffers = (tbm_surface_h *)calloc( + wayland_vk_wsi_surface->buffer_count, sizeof(tbm_surface_h)); + if (!swapchain_buffers) { + TPL_ERR("Failed to allocate memory for buffers."); + return TPL_ERROR_OUT_OF_MEMORY; + } + + for (i = 0 ; i < wayland_vk_wsi_surface->buffer_count ; i++) { + tsq_err = tbm_surface_queue_dequeue(wayland_vk_wsi_surface->tbm_queue, &buffer); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to get tbm_surface from tbm_surface_queue | tsq_err = %d", + tsq_err); + dequeue_count = i; + ret = TPL_ERROR_OUT_OF_MEMORY; + goto get_buffer_fail; + } + swapchain_buffers[i] = buffer; + TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p)", i, buffer); + } + + for (i = 0 ; i < wayland_vk_wsi_surface->buffer_count ; i++) { + tsq_err = tbm_surface_queue_release(wayland_vk_wsi_surface->tbm_queue, + swapchain_buffers[i]); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to release tbm_surface. | tsq_err = %d", tsq_err); + ret = TPL_ERROR_INVALID_OPERATION; + goto release_buffer_fail; + } + } + + *buffers = swapchain_buffers; + *buffer_count = wayland_vk_wsi_surface->buffer_count; + return TPL_ERROR_NONE; + +get_buffer_fail: + for (i = 0 ; i < dequeue_count ; i++) { + tsq_err = tbm_surface_queue_release(wayland_vk_wsi_surface->tbm_queue, + swapchain_buffers[i]); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to release tbm_surface. | tsq_err = %d", tsq_err); + goto release_buffer_fail; + } + } + +release_buffer_fail: + free(swapchain_buffers); + return ret; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, + tbm_format format, int width, + int height, int buffer_count, int present_mode) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_result_t res = TPL_ERROR_NONE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + TPL_ASSERT(wayland_vk_wsi_surface); + + wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) + surface->display->backend.data; + TPL_ASSERT(wayland_vk_wsi_display); + + res = twe_surface_create_swapchain(wayland_vk_wsi_surface->twe_surface, + width, height, format, + buffer_count, present_mode); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to create swapchain. twe_surface(%p)", + wayland_vk_wsi_surface->twe_surface); + return res; + } + + wayland_vk_wsi_surface->tbm_queue = twe_surface_get_tbm_queue( + wayland_vk_wsi_surface->twe_surface); + wayland_vk_wsi_surface->buffer_count = buffer_count; + + return TPL_ERROR_NONE; +} + +static tpl_result_t +__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) +{ + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_result_t res = TPL_ERROR_NONE; + + TPL_ASSERT(surface); + TPL_ASSERT(surface->backend.data); + TPL_ASSERT(surface->display); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + TPL_ASSERT(wayland_vk_wsi_surface); + + res = twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); + if (res != TPL_ERROR_NONE) { + TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", + wayland_vk_wsi_surface->twe_surface); + return res; + } + + return TPL_ERROR_NONE; +} + +tpl_bool_t +__tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy) +{ + if (!native_dpy) return TPL_FALSE; + + if (twe_check_native_handle_is_wl_display(native_dpy)) + return TPL_TRUE; + + return TPL_FALSE; +} + +void +__tpl_display_init_backend_wl_vk_wsi_thread(tpl_display_backend_t *backend) +{ + TPL_ASSERT(backend); + + backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD; + backend->data = NULL; + + backend->init = __tpl_wl_vk_wsi_display_init; + backend->fini = __tpl_wl_vk_wsi_display_fini; + backend->query_config = __tpl_wl_vk_wsi_display_query_config; + backend->filter_config = __tpl_wl_vk_wsi_display_filter_config; + backend->query_window_supported_buffer_count = + __tpl_wl_vk_wsi_display_query_window_supported_buffer_count; + backend->query_window_supported_present_modes = + __tpl_wl_vk_wsi_display_query_window_supported_present_modes; +} + +void +__tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) +{ + TPL_ASSERT(backend); + + backend->type = TPL_BACKEND_WAYLAND_VULKAN_WSI_THREAD; + backend->data = NULL; + + backend->init = __tpl_wl_vk_wsi_surface_init; + backend->fini = __tpl_wl_vk_wsi_surface_fini; + backend->validate = __tpl_wl_vk_wsi_surface_validate; + backend->dequeue_buffer = __tpl_wl_vk_wsi_surface_dequeue_buffer; + backend->enqueue_buffer = __tpl_wl_vk_wsi_surface_enqueue_buffer; + backend->get_swapchain_buffers = + __tpl_wl_vk_wsi_surface_get_swapchain_buffers; + backend->create_swapchain = __tpl_wl_vk_wsi_surface_create_swapchain; + backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain; +} -- 2.7.4 From 5254b6bd6ffb22c9a1e223acb7948a3a307c4856 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Wed, 8 Nov 2017 18:59:27 +0900 Subject: [PATCH 02/16] tpl_wayland_egl_thread: Modified the buffer_info to wait for draw done without using sub-thread. - twe_thread can not process any events while it is waiting for buffer draw done. - This is a temporary structure for guaranteeing buffer sync, and is not the best solution, so there are many parts to be improved. Change-Id: I2c81f171799312761fabce24ec72f947f6108012 Signed-off-by: joonbum.ko --- src/tpl_wayland_egl_thread.c | 105 +++++++++++++++++++------------------------ 1 file changed, 46 insertions(+), 59 deletions(-) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 7f12646..661b64b 100644 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -132,8 +132,6 @@ struct _twe_wl_buffer_info { int *rects; tpl_bool_t need_to_commit; - GThread *fence_waiting_thread; - /* for checking draw done */ tpl_bool_t draw_done; tbm_fd draw_done_fence; @@ -1243,11 +1241,6 @@ __cb_twe_buffer_free_callback(twe_wl_buffer_info *buf_info) wl_display_flush(disp_source->disp); - if (buf_info->fence_waiting_thread) { - g_thread_join(buf_info->fence_waiting_thread); - buf_info->fence_waiting_thread = NULL; - } - if (buf_info->draw_done_fence != -1) { close(buf_info->draw_done_fence); buf_info->draw_done_fence = -1; @@ -1434,6 +1427,7 @@ _twe_surface_set_wl_buffer_info(twe_wl_surf_source *surf_source, TPL_WARN("Failed to create TBM sync timeline: %d(%s)", errno, buf); } + buf_info->draw_done_fence = -1; buf_info->sync_timestamp = 0; buf_info->surf_source = surf_source; buf_info->num_rects = 0; @@ -1512,9 +1506,6 @@ __cb_tbm_queue_acquirable_callback(tbm_surface_queue_h surface_queue, uint64_t value = 1; int ret; - if (surf_source->disp_source->is_vulkan_dpy) - return; - ret = write(surf_source->event_fd, &value, sizeof(uint64_t)); if (ret == -1) { TPL_ERR("failed to send acquirable event. twe_wl_surf_source(%p)", @@ -1632,6 +1623,46 @@ _twe_surface_wait_vblank(twe_wl_surf_source *surf_source) return TPL_ERROR_NONE; } +static tpl_result_t +_twe_surface_wait_draw_done(tbm_surface_h tbm_surface) +{ + twe_wl_buffer_info *buf_info = NULL; + + if (!tbm_surface || !tbm_surface_internal_is_valid(tbm_surface)) { + TPL_ERR("Invalid tbm_surface(%p)", tbm_surface); + return TPL_ERROR_INVALID_PARAMETER; + } + + tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO, + (void **)&buf_info); + if (!buf_info) { + TPL_ERR("Failed to get twe_wl_buffer_info from tbm_surface(%p)", + tbm_surface); + return TPL_ERROR_INVALID_OPERATION; + } + + if (buf_info->draw_done_fence == -1) { + TPL_WARN("tbm_surface(%p) will be presented immediately.", tbm_surface); + return TPL_ERROR_NONE; + } + + TRACE_BEGIN("Fence waiting. BO(%d)", + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + if (tbm_sync_fence_wait(buf_info->draw_done_fence, -1) != 1) { + char buf[1024]; + strerror_r(errno, buf, sizeof(buf)); + TPL_ERR("Failed to wait sync fence(%d). | error: %d(%s)", + buf_info->draw_done_fence, errno, buf); + } + TRACE_END(); + + close(buf_info->draw_done_fence); + buf_info->draw_done_fence = -1; + buf_info->draw_done = TPL_TRUE; + + return TPL_ERROR_NONE; +} + static void _twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source, tbm_surface_h tbm_surface) @@ -1804,16 +1835,15 @@ _twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source) return; } - TRACE_ASYNC_END((int)tbm_surface, "DRAWING BO(%d)", - tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); - tbm_surface_internal_ref(tbm_surface); tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO, (void **)&buf_info); - if (buf_info && buf_info->fence_waiting_thread) { - g_thread_join(buf_info->fence_waiting_thread); - buf_info->fence_waiting_thread = NULL; + + if (!buf_info->draw_done && buf_info->draw_done_fence != -1) { + /* wait until draw done. */ + if (_twe_surface_wait_draw_done(tbm_surface) != TPL_ERROR_NONE) + TPL_ERR("Failed to wait drawing complete."); } if (!disp_source->is_vulkan_dpy) { /* wayland_egl */ @@ -2570,41 +2600,6 @@ twe_surface_commit_without_enqueue(twe_surface_h twe_surface, TPL_OBJECT_UNLOCK(&surf_source->obj); } -static gpointer -_twe_sync_fence_waiting(gpointer data) -{ - twe_wl_buffer_info *buf_info = (twe_wl_buffer_info *)data; - twe_wl_surf_source *surf_source = buf_info->surf_source; - uint64_t value = 1; - int ret; - - if (buf_info->draw_done_fence == -1) - return data; - - TRACE_BEGIN("Fence waiting. BO(%d)", - tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); - /* non worker thread mode */ - /* TODO: set max wait time */ - if (tbm_sync_fence_wait(buf_info->draw_done_fence, -1) != 1) { - char buf[1024]; - strerror_r(errno, buf, sizeof(buf)); - TPL_ERR("Failed to wait sync fence. | error: %d(%s)", errno, buf); - } - TRACE_END(); - - close(buf_info->draw_done_fence); - buf_info->draw_done_fence = -1; - buf_info->draw_done = TPL_TRUE; - - ret = write(surf_source->event_fd, &value, sizeof(uint64_t)); - if (ret == -1) { - TPL_ERR("failed to send acquirable event. twe_wl_surf_source(%p)", - surf_source); - } - - return data; -} - tpl_result_t twe_surface_set_sync_fd(tbm_surface_h tbm_surface, tbm_fd sync_fd) { @@ -2622,20 +2617,12 @@ twe_surface_set_sync_fd(tbm_surface_h tbm_surface, tbm_fd sync_fd) TPL_WARN("It will be managed with async mode."); } - if (buf_info->fence_waiting_thread) { - g_thread_join(buf_info->fence_waiting_thread); - buf_info->fence_waiting_thread = NULL; - } - if (buf_info->draw_done_fence != -1) { close(buf_info->draw_done_fence); buf_info->draw_done_fence = -1; } buf_info->draw_done_fence = sync_fd; - buf_info->fence_waiting_thread = g_thread_new("fence_waiting_thread", - _twe_sync_fence_waiting, - buf_info); return TPL_ERROR_NONE; } -- 2.7.4 From 87c82236b07d38e978157453953a801a6812b405 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Mon, 13 Nov 2017 14:03:32 +0900 Subject: [PATCH 03/16] tpl_wl_vk_thread: Added reset callback to tbm_queue for HWC. - Modified the tbm_queue to create from wayland_tbm_client to enable HWC. - Added reset callback and flag so that the client knows the reset via the tpl_surface_validate() function. Change-Id: I03dca08bb0c8618ec658dc4ebc226465167361ec Signed-off-by: joonbum.ko --- src/tpl_wayland_egl_thread.c | 24 +++++++++++++------- src/tpl_wl_vk_thread.c | 54 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 69 insertions(+), 9 deletions(-) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 661b64b..2e345ab 100644 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -2383,14 +2383,22 @@ twe_surface_create_swapchain(twe_surface_h twe_surface, } } - /* FIXME: vblank has performance problem so replace all present mode to MAILBOX */ - present_mode = TPL_DISPLAY_PRESENT_MODE_MAILBOX; - - surf_source->tbm_queue = tbm_surface_queue_create(buffer_count, - width, - height, - TBM_FORMAT_ARGB8888, - 0); + surf_source->tbm_queue = wayland_tbm_client_create_surface_queue( + disp_source->wl_tbm_client, + surf_source->surf, + buffer_count, + width, height, + TBM_FORMAT_ARGB8888); + + if (tbm_surface_queue_add_reset_cb(surf_source->tbm_queue, + __cb_tbm_queue_reset_callback, + NULL) != TBM_SURFACE_QUEUE_ERROR_NONE) { + TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)", + surf_source->tbm_queue); + tbm_surface_queue_destroy(surf_source->tbm_queue); + return TPL_ERROR_INVALID_OPERATION; + } + if (!surf_source->tbm_queue) { TPL_ERR("TBM surface queue creation failed!"); return TPL_ERROR_OUT_OF_MEMORY; diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index fd518bf..1b8f448 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -24,6 +24,8 @@ struct _tpl_wayland_vk_wsi_surface { twe_surface_h twe_surface; tbm_surface_queue_h tbm_queue; int buffer_count; + tpl_bool_t is_activated; + tpl_bool_t reset; }; static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( @@ -382,7 +384,10 @@ __tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); - return TPL_TRUE; + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = + (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + + return !(wayland_vk_wsi_surface->reset); } static tbm_surface_h @@ -508,6 +513,41 @@ release_buffer_fail: return ret; } +static void +__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, + void *data) +{ + tpl_surface_t *surface = NULL; + tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_bool_t is_activated = TPL_FALSE; + + surface = (tpl_surface_t *)data; + TPL_CHECK_ON_NULL_RETURN(surface); + + wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wayland_vk_wsi_surface); + + /* When queue_reset_callback is called, if is_activated is different from + * its previous state change the reset flag to TPL_TRUE to get a new buffer + * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ + is_activated = twe_surface_check_activated(wayland_vk_wsi_surface->twe_surface); + if (wayland_vk_wsi_surface->is_activated != is_activated) { + if (is_activated) { + TPL_LOG_T("WL_EGL", + "[ACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", + wayland_vk_wsi_surface, surface_queue); + } else { + TPL_LOG_T("WL_EGL", + "[DEACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", + wayland_vk_wsi_surface, surface_queue); + } + wayland_vk_wsi_surface->reset = TPL_TRUE; + } + + if (surface->reset_cb) + surface->reset_cb(surface->reset_data); +} + static tpl_result_t __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, int width, @@ -539,7 +579,19 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, wayland_vk_wsi_surface->tbm_queue = twe_surface_get_tbm_queue( wayland_vk_wsi_surface->twe_surface); + + /* Set reset_callback to tbm_queue */ + if (tbm_surface_queue_add_reset_cb(wayland_vk_wsi_surface->tbm_queue, + __cb_tbm_surface_queue_reset_callback, + (void *)surface)) { + TPL_ERR("TBM surface queue add reset cb failed!"); + twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); + wayland_vk_wsi_surface->tbm_queue = NULL; + return TPL_ERROR_INVALID_OPERATION; + } + wayland_vk_wsi_surface->buffer_count = buffer_count; + wayland_vk_wsi_surface->reset = TPL_FALSE; return TPL_ERROR_NONE; } -- 2.7.4 From 509df0a119293f2c36556af1877db533f2c34536 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Mon, 13 Nov 2017 15:24:51 +0900 Subject: [PATCH 04/16] tpl: Added internal functions to manage tpl_surface in runtime. Change-Id: Id9499bc999f7f147687838bbaf2b3191afcacb2a Signed-off-by: joonbum.ko --- src/tpl.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ src/tpl_internal.h | 6 ++++ src/tpl_surface.c | 19 ++++++++++- 3 files changed, 122 insertions(+), 1 deletion(-) diff --git a/src/tpl.c b/src/tpl.c index 7b0a213..d29e9de 100644 --- a/src/tpl.c +++ b/src/tpl.c @@ -6,6 +6,7 @@ unsigned int tpl_dump_lvl = 0; struct _tpl_runtime { tpl_hlist_t *displays[TPL_BACKEND_COUNT]; + tpl_hlist_t *surfaces[TPL_BACKEND_COUNT]; }; static tpl_runtime_t *runtime = NULL; @@ -326,6 +327,103 @@ __tpl_display_init_backend(tpl_display_t *display, tpl_backend_type_t type) } } +tpl_surface_t * +__tpl_runtime_find_surface(tpl_backend_type_t type, tpl_handle_t native_surface) +{ + tpl_surface_t *surface = NULL; + + if (runtime == NULL) + return NULL; + + pthread_mutex_lock(&runtime_mutex); + + if (type != TPL_BACKEND_UNKNOWN) { + if (runtime->surfaces[type] != NULL) { + surface = (tpl_surface_t *) __tpl_hashlist_lookup(runtime->surfaces[type], + (size_t) native_surface); + } + } else { + int i; + + for (i = 0; i < TPL_BACKEND_COUNT; i++) { + if (runtime->surfaces[i] != NULL) { + surface = (tpl_surface_t *) __tpl_hashlist_lookup(runtime->surfaces[i], + (size_t) native_surface); + } + if (surface != NULL) break; + } + } + + pthread_mutex_unlock(&runtime_mutex); + + return surface; +} + +tpl_result_t +__tpl_runtime_add_surface(tpl_surface_t *surface) +{ + tpl_result_t ret; + tpl_handle_t handle; + tpl_backend_type_t type; + + TPL_ASSERT(surface); + + handle = surface->native_handle; + type = surface->display->backend.type; + + TPL_ASSERT(0 <= type && TPL_BACKEND_COUNT > type); + + if (0 != pthread_mutex_lock(&runtime_mutex)) { + TPL_ERR("runtime_mutex pthread_mutex_lock failed."); + return TPL_ERROR_INVALID_OPERATION; + } + + if (TPL_ERROR_NONE != __tpl_runtime_init()) { + TPL_ERR("__tpl_runtime_init() failed."); + pthread_mutex_unlock(&runtime_mutex); + return TPL_ERROR_INVALID_OPERATION; + } + + if (NULL == runtime->surfaces[type]) { + runtime->surfaces[type] = __tpl_hashlist_create(); + if (NULL == runtime->surfaces[type]) { + TPL_ERR("__tpl_hashlist_create failed."); + pthread_mutex_unlock(&runtime_mutex); + return TPL_ERROR_INVALID_OPERATION; + } + } + + ret = __tpl_hashlist_insert(runtime->surfaces[type], + (size_t) handle, (void *) surface); + if (TPL_ERROR_NONE != ret) { + TPL_ERR("__tpl_hashlist_insert failed. list(%p), handle(%p), surface(%p)", + runtime->surfaces[type], handle, surface); + pthread_mutex_unlock(&runtime_mutex); + return TPL_ERROR_INVALID_OPERATION; + } + + pthread_mutex_unlock(&runtime_mutex); + + return TPL_ERROR_NONE; +} + +void +__tpl_runtime_remove_surface(tpl_surface_t *surface) +{ + tpl_handle_t handle = surface->native_handle; + tpl_backend_type_t type = surface->backend.type; + + pthread_mutex_lock(&runtime_mutex); + + if (type != TPL_BACKEND_UNKNOWN) { + if (runtime != NULL && runtime->surfaces[type] != NULL) + __tpl_hashlist_delete(runtime->surfaces[type], + (size_t) handle); + } + + pthread_mutex_unlock(&runtime_mutex); +} + void __tpl_surface_init_backend(tpl_surface_t *surface, tpl_backend_type_t type) { diff --git a/src/tpl_internal.h b/src/tpl_internal.h index 56552a4..d8f037d 100644 --- a/src/tpl_internal.h +++ b/src/tpl_internal.h @@ -201,6 +201,12 @@ __tpl_runtime_find_display(tpl_backend_type_t type, tpl_result_t __tpl_runtime_add_display(tpl_display_t *display); void __tpl_runtime_remove_display(tpl_display_t *display); +tpl_surface_t * +__tpl_runtime_find_surface(tpl_backend_type_t type, + tpl_handle_t native_surface); +tpl_result_t __tpl_runtime_add_surface(tpl_surface_t *surface); +void __tpl_runtime_remove_surface(tpl_surface_t *surface); + /* Backend initialization functions. */ tpl_backend_type_t __tpl_display_choose_backend(tpl_handle_t native_dpy); tpl_bool_t __tpl_display_choose_backend_gbm(tpl_handle_t native_dpy); diff --git a/src/tpl_surface.c b/src/tpl_surface.c index 254da8d..f9d9443 100644 --- a/src/tpl_surface.c +++ b/src/tpl_surface.c @@ -6,6 +6,7 @@ __tpl_surface_fini(tpl_surface_t *surface) TPL_ASSERT(surface); surface->backend.fini(surface); + __tpl_runtime_remove_surface(surface); } static void @@ -22,7 +23,8 @@ tpl_surface_t * tpl_surface_create(tpl_display_t *display, tpl_handle_t handle, tpl_surface_type_t type, tbm_format format) { - tpl_surface_t *surface; + tpl_surface_t *surface = NULL; + tpl_result_t ret = TPL_ERROR_NONE; if (!display) { TPL_ERR("Display is NULL!"); @@ -34,6 +36,13 @@ tpl_surface_create(tpl_display_t *display, tpl_handle_t handle, return NULL; } + surface = __tpl_runtime_find_surface(type, handle); + if (surface) { + TPL_LOG_F("[REUSE] tpl_display_t(%p) tpl_surface_t(%p) native_handle(%p) format(%d)", + display, surface, handle, format); + return surface; + } + surface = (tpl_surface_t *) calloc(1, sizeof(tpl_surface_t)); if (!surface) { TPL_ERR("Failed to allocate memory for surface!"); @@ -67,6 +76,14 @@ tpl_surface_create(tpl_display_t *display, tpl_handle_t handle, return NULL; } + /* Add it to the runtime. */ + ret = __tpl_runtime_add_surface(surface); + if (ret != TPL_ERROR_NONE) { + TPL_ERR("Failed to add surface to runtime list!"); + tpl_object_unreference((tpl_object_t *) surface); + return NULL; + } + TPL_LOG_F("tpl_display_t(%p) tpl_surface_t(%p) native_handle(%p) format(%d)", display, surface, handle, format); return surface; -- 2.7.4 From 6f39fef5fbe789c94f5db7305f9d1d2cd3d72f32 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Tue, 14 Nov 2017 11:48:51 +0900 Subject: [PATCH 05/16] tpl_wl_vk_thread: Added handling for reset of tbm_surface_queue. - This modification is for HWC conversion in reserved memory situations. Change-Id: I024111e6963fc91b29a888f3d7434e21509c71f8 Signed-off-by: joonbum.ko --- src/tpl_surface.c | 2 +- src/tpl_wl_vk_thread.c | 68 ++++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 62 insertions(+), 8 deletions(-) diff --git a/src/tpl_surface.c b/src/tpl_surface.c index f9d9443..a7a6b1e 100644 --- a/src/tpl_surface.c +++ b/src/tpl_surface.c @@ -36,7 +36,7 @@ tpl_surface_create(tpl_display_t *display, tpl_handle_t handle, return NULL; } - surface = __tpl_runtime_find_surface(type, handle); + surface = __tpl_runtime_find_surface(display->backend.type, handle); if (surface) { TPL_LOG_F("[REUSE] tpl_display_t(%p) tpl_surface_t(%p) native_handle(%p) format(%d)", display, surface, handle, format); diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 1b8f448..197b133 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -285,6 +285,7 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) } wayland_vk_wsi_surface->twe_surface = twe_surface; + wayland_vk_wsi_surface->is_activated = TPL_FALSE; TPL_LOG_T("WL_VK", "[INIT]tpl_surface(%p) tpl_wayland_vk_wsi_surface(%p) twe_surface(%p)", @@ -402,33 +403,62 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface = NULL; tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = + (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; tbm_surface_queue_error_e tsq_err = 0; + tpl_result_t lock_ret = TPL_ERROR_NONE; if (sync_fence) *sync_fence = -1; + /* After the can dequeue state, call twe_display_lock to prevent other + * events from being processed in wayland_egl_thread + * during below dequeue procedure. */ + lock_ret = twe_display_lock(wayland_vk_wsi_display->twe_display); + if (!tbm_surface_queue_can_dequeue(wayland_vk_wsi_surface->tbm_queue, 0)) { if (timeout_ns == 0) return NULL; else { tpl_result_t res = TPL_ERROR_NONE; + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); + TPL_OBJECT_UNLOCK(surface); res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface, timeout_ns); + TPL_OBJECT_LOCK(surface); + lock_ret = twe_display_lock(wayland_vk_wsi_display->twe_display); + if (res == TPL_ERROR_TIME_OUT) { TPL_ERR("Failed to get buffer during timeout_ns(%u)", timeout_ns); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); return NULL; } else if (res != TPL_ERROR_NONE) { TPL_ERR("Invalid parameter. twe_surface(%p) timeout_ns(%u)", wayland_vk_wsi_surface->twe_surface, timeout_ns); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); return NULL; } } } + if (wayland_vk_wsi_surface->reset) { + TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.", + wayland_vk_wsi_surface->tbm_queue); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return NULL; + } + + tsq_err = tbm_surface_queue_dequeue(wayland_vk_wsi_surface->tbm_queue, &tbm_surface); if (!tbm_surface) { TPL_ERR("Failed to get tbm_surface from tbm_surface_queue | tsq_err = %d", tsq_err); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); return NULL; } @@ -442,6 +472,9 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + if (lock_ret == TPL_ERROR_NONE) + twe_display_unlock(wayland_vk_wsi_display->twe_display); + return tbm_surface; } @@ -514,8 +547,8 @@ release_buffer_fail: } static void -__cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, - void *data) +__cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue, + void *data) { tpl_surface_t *surface = NULL; tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; @@ -531,19 +564,22 @@ __cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue, * its previous state change the reset flag to TPL_TRUE to get a new buffer * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ is_activated = twe_surface_check_activated(wayland_vk_wsi_surface->twe_surface); + if (wayland_vk_wsi_surface->is_activated != is_activated) { if (is_activated) { - TPL_LOG_T("WL_EGL", + TPL_LOG_T("WL_VK", "[ACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", wayland_vk_wsi_surface, surface_queue); } else { - TPL_LOG_T("WL_EGL", + TPL_LOG_T("WL_VK", "[DEACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", wayland_vk_wsi_surface, surface_queue); } - wayland_vk_wsi_surface->reset = TPL_TRUE; + wayland_vk_wsi_surface->is_activated = is_activated; } + wayland_vk_wsi_surface->reset = TPL_TRUE; + if (surface->reset_cb) surface->reset_cb(surface->reset_data); } @@ -568,6 +604,16 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, surface->display->backend.data; TPL_ASSERT(wayland_vk_wsi_display); + if (wayland_vk_wsi_surface->tbm_queue) { + TPL_LOG_T("WL_VK", "[REUSE] wayland_vk_wsi_surface(%p) tbm_queue(%p) size(%d)", + wayland_vk_wsi_surface, wayland_vk_wsi_surface->tbm_queue, + wayland_vk_wsi_surface->buffer_count); + wayland_vk_wsi_surface->buffer_count = + tbm_surface_queue_get_size(wayland_vk_wsi_surface->tbm_queue); + wayland_vk_wsi_surface->reset = TPL_FALSE; + return TPL_ERROR_NONE; + } + res = twe_surface_create_swapchain(wayland_vk_wsi_surface->twe_surface, width, height, format, buffer_count, present_mode); @@ -582,8 +628,8 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, /* Set reset_callback to tbm_queue */ if (tbm_surface_queue_add_reset_cb(wayland_vk_wsi_surface->tbm_queue, - __cb_tbm_surface_queue_reset_callback, - (void *)surface)) { + __cb_tbm_queue_reset_callback, + (void *)surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("TBM surface queue add reset cb failed!"); twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); wayland_vk_wsi_surface->tbm_queue = NULL; @@ -609,6 +655,12 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; TPL_ASSERT(wayland_vk_wsi_surface); + if (wayland_vk_wsi_surface->reset) { + TPL_LOG_T("WL_VK", + "Since reset is in the TRUE state, it will not be destroyed."); + return TPL_ERROR_NONE; + } + res = twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); if (res != TPL_ERROR_NONE) { TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", @@ -616,6 +668,8 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) return res; } + wayland_vk_wsi_surface->tbm_queue = NULL; + return TPL_ERROR_NONE; } -- 2.7.4 From bb382a53ddf0774d3597059031d1b6274de524df Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Tue, 28 Nov 2017 19:14:03 +0900 Subject: [PATCH 06/16] tpl_wayland_egl_thread: Modified backend name for printing debug logs. - It will be optimized. Change-Id: Iae60aa3a4e7d7f0b55c332b69a92d9e26c8ce69b Signed-off-by: joonbum.ko --- .vscode/settings.json | 5 +++ src/tpl_wayland_egl_thread.c | 87 +++++++++++++++++++++++--------------------- 2 files changed, 50 insertions(+), 42 deletions(-) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..d10fd7e --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "files.associations": { + "tpl_utils.h": "c" + } +} \ No newline at end of file diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 2e345ab..d3116fd 100644 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -23,6 +23,9 @@ static int buffer_info_key; #define CLIENT_QUEUE_SIZE 3 #define VK_CLIENT_QUEUE_SIZE 3 +/* backend name will be optimized */ +#define BACKEND "WL_VK_GL" + typedef struct _twe_wl_disp_source twe_wl_disp_source; typedef struct _twe_wl_surf_source twe_wl_surf_source; typedef struct _twe_wl_buffer_info twe_wl_buffer_info; @@ -227,7 +230,7 @@ _twe_thread_del_source_finalize(GSource *source) { twe_del_source *del_source = (twe_del_source *)source; - TPL_LOG_T("WL_EGL", "gsource(%p) event_fd(%d)", + TPL_LOG_T(BACKEND, "gsource(%p) event_fd(%d)", source, del_source->event_fd); close(del_source->event_fd); @@ -342,7 +345,7 @@ _twe_thread_tdm_source_finalize(GSource *source) { twe_tdm_source *tdm_source = (twe_tdm_source *)source; - TPL_LOG_T("WL_EGL", "tdm_destroy| tdm_source(%p) tdm_client(%p)", + TPL_LOG_T(BACKEND, "tdm_destroy| tdm_source(%p) tdm_client(%p)", tdm_source, tdm_source->tdm_client); if (tdm_source->tdm_client) { @@ -395,8 +398,8 @@ _twe_thread_tdm_source_create(void) tdm_fd, G_IO_IN); - TPL_LOG_T("WL_EGL", "TPL_WAIT_VBLANK:DEFAULT_ENABLED"); - TPL_LOG_T("WL_EGL", "tdm_source(%p) tdm_client(%p) tdm_fd(%d)", + TPL_LOG_T(BACKEND, "TPL_WAIT_VBLANK:DEFAULT_ENABLED"); + TPL_LOG_T(BACKEND, "tdm_source(%p) tdm_client(%p) tdm_fd(%d)", tdm_source, client, tdm_fd); return tdm_source; @@ -474,7 +477,7 @@ twe_thread_create(void) thread->ctx = _twe_ctx; _twe_ctx->ref_cnt++; - TPL_LOG_T("WL_EGL", "_twe_ctx(%p) twe_thread(%p)", _twe_ctx, thread); + TPL_LOG_T(BACKEND, "_twe_ctx(%p) twe_thread(%p)", _twe_ctx, thread); return thread; } @@ -511,7 +514,7 @@ twe_thread_destroy(twe_thread* thread) _twe_ctx = NULL; } - TPL_LOG_T("WL_EGL", "[THREAD DESTROY] twe_thread(%p)", thread); + TPL_LOG_T(BACKEND, "[THREAD DESTROY] twe_thread(%p)", thread); thread->ctx = NULL; free(thread); @@ -621,7 +624,7 @@ _twe_thread_wl_disp_finalize(GSource *source) { twe_wl_disp_source *disp_source = (twe_wl_disp_source *)source; - TPL_LOG_T("WL_EGL", "finalize| disp_source(%p)", disp_source); + TPL_LOG_T(BACKEND, "finalize| disp_source(%p)", disp_source); __tpl_object_fini(&disp_source->obj); @@ -657,7 +660,7 @@ _twe_display_init_wl_tbm_client(struct wl_display *display, wl_proxy_set_queue(wl_tbm, ev_queue); - TPL_LOG_T("WL_EGL", "wl_tbm_client init| wl_tbm_client(%p)", wl_tbm_client); + TPL_LOG_T(BACKEND, "wl_tbm_client init| wl_tbm_client(%p)", wl_tbm_client); return wl_tbm_client; } @@ -671,7 +674,7 @@ _twe_display_fini_wl_tbm_client(struct wayland_tbm_client *wl_tbm_client) wl_proxy_set_queue(wl_tbm, NULL); } - TPL_LOG_T("WL_EGL", "wl_tbm_client deinit| wl_tbm_client(%p)", wl_tbm_client); + TPL_LOG_T(BACKEND, "wl_tbm_client deinit| wl_tbm_client(%p)", wl_tbm_client); wayland_tbm_client_deinit(wl_tbm_client); } @@ -795,7 +798,7 @@ _twe_display_shm_init(twe_wl_disp_source *disp_source) wl_proxy_set_queue((struct wl_proxy *)disp_source->tss, NULL); - TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) init.", disp_source->tss); + TPL_LOG_T(BACKEND, "tizen_surface_shm(%p) init.", disp_source->tss); fini: if (queue) @@ -810,7 +813,7 @@ static void _twe_display_shm_fini(twe_wl_disp_source *disp_source) { if (disp_source->tss) { - TPL_LOG_T("WL_EGL", "tizen_surface_shm(%p) fini.", disp_source->tss); + TPL_LOG_T(BACKEND, "tizen_surface_shm(%p) fini.", disp_source->tss); tizen_surface_shm_destroy(disp_source->tss); disp_source->tss = NULL; } @@ -870,7 +873,7 @@ static void _twe_display_vk_fini(twe_wl_disp_source *disp_source) { if (disp_source->wl_vk_client) { - TPL_LOG_T("WL_VK", "wl_vk_client(%p) fini.", disp_source->wl_vk_client); + TPL_LOG_T(BACKEND, "wl_vk_client(%p) fini.", disp_source->wl_vk_client); wayland_vulkan_destroy(disp_source->wl_vk_client); } } @@ -901,7 +904,7 @@ _twe_thread_wl_disp_source_destroy(void *source) wl_event_queue_destroy(disp_source->ev_queue); TPL_OBJECT_UNLOCK(&disp_source->obj); - TPL_LOG_T("WL_EGL", "[DEL] twe_display(%p) wl_display(%p)", + TPL_LOG_T(BACKEND, "[DEL] twe_display(%p) wl_display(%p)", disp_source, disp_source->disp); g_source_remove_poll(&disp_source->gsource, &disp_source->gfd); @@ -975,7 +978,7 @@ twe_display_add(twe_thread* thread, g_source_add_poll(&source->gsource, &source->gfd); g_source_attach(&source->gsource, g_main_loop_get_context(ctx->twe_loop)); - TPL_LOG_T("WL_EGL", "add| gsource(%p) ev_queue(%p) wl_display(%p)", + TPL_LOG_T(BACKEND, "add| gsource(%p) ev_queue(%p) wl_display(%p)", source, source->ev_queue, display); return (twe_display_h)source; @@ -1084,7 +1087,7 @@ __cb_destroy_callback(void *private) twe_wl_surf_source *surf_source = (twe_wl_surf_source *)private; if (surf_source) { - TPL_LOG_T("WL_EGL", "[DESTROY_CB] wl_egl_window(%p) surf_source(%p)", + TPL_LOG_T(BACKEND, "[DESTROY_CB] wl_egl_window(%p) surf_source(%p)", surf_source->wl_egl_window, surf_source); TPL_OBJECT_LOCK(&surf_source->obj); surf_source->wl_egl_window->private = NULL; @@ -1110,7 +1113,7 @@ __cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private) req_w = wl_egl_window->width; req_h = wl_egl_window->height; - TPL_LOG_T("WL_EGL", "[RESIZE_CB] wl_egl_window(%p) (%dx%d) -> (%dx%d)", + TPL_LOG_T(BACKEND, "[RESIZE_CB] wl_egl_window(%p) (%dx%d) -> (%dx%d)", wl_egl_window, cur_w, cur_h, req_w, req_h); if (tbm_surface_queue_reset(source->tbm_queue, req_w, req_h, format) @@ -1129,7 +1132,7 @@ __cb_rotate_callback(struct wl_egl_window *wl_egl_window, void *private) int rotation = wl_egl_window->rotation; twe_wl_surf_source *source = (twe_wl_surf_source *)private; - TPL_LOG_T("WL_EGL", "[ROTATE_CB] wl_egl_window(%p) (%d) -> (%d)", + TPL_LOG_T(BACKEND, "[ROTATE_CB] wl_egl_window(%p) (%d) -> (%d)", wl_egl_window, source->rotation, rotation); source->rotation = rotation; @@ -1162,7 +1165,7 @@ static void __cb_tss_flusher_flush_callback(void *data, twe_wl_disp_source *disp_source = surf_source->disp_source; int ret; - TPL_LOG_T("WL_EGL", "[FLUSH_CB] surf_source(%p)", surf_source); + TPL_LOG_T(BACKEND, "[FLUSH_CB] surf_source(%p)", surf_source); /*First distach pending queue for TPL - dispatch buffer-release @@ -1203,7 +1206,7 @@ static void __cb_tss_flusher_free_flush_callback(void *data, twe_wl_disp_source *disp_source = surf_source->disp_source; int ret; - TPL_LOG_T("WL_EGL", "[FREE_FLUSH_CB] surf_source(%p)", surf_source); + TPL_LOG_T(BACKEND, "[FREE_FLUSH_CB] surf_source(%p)", surf_source); /* First distach panding queue for TPL - dispatch buffer-release @@ -1236,7 +1239,7 @@ __cb_twe_buffer_free_callback(twe_wl_buffer_info *buf_info) twe_wl_surf_source *surf_source = buf_info->surf_source; twe_wl_disp_source *disp_source = surf_source->disp_source; - TPL_LOG_T("WL_EGL", "[FREE] twe_buffer(%p) wl_buffer(%p)", + TPL_LOG_T(BACKEND, "[FREE] twe_buffer(%p) wl_buffer(%p)", buf_info, buf_info->wl_buffer); wl_display_flush(disp_source->disp); @@ -1294,7 +1297,7 @@ __cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer) TPL_OBJECT_UNLOCK(&surf_source->obj); } - TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", + TPL_LOG_T(BACKEND, "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", buf_info->wl_buffer, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); tbm_surface_internal_unref(tbm_surface); @@ -1368,7 +1371,7 @@ _twe_surface_set_wl_buffer_info(twe_wl_surf_source *surf_source, TPL_OBJECT_UNLOCK(&surf_source->obj); } - TPL_LOG_T("WL_EGL", + TPL_LOG_T(BACKEND, "[REUSE_BUF] buf_info(%p) tbm_surface(%p) bo(%d) (%dx%d) transform(%d)", buf_info, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)), @@ -1452,7 +1455,7 @@ _twe_surface_set_wl_buffer_info(twe_wl_surf_source *surf_source, TPL_OBJECT_UNLOCK(&surf_source->obj); } - TPL_LOG_T("WL_EGL", + TPL_LOG_T(BACKEND, "[NEW_BUF] buf_info(%p) tbm_surface(%p) bo(%d) (%dx%d) transform(%d)", buf_info, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)), @@ -1478,7 +1481,7 @@ _twe_surface_cancel_dequeued_buffer(twe_wl_surf_source *surf_source, return; } - TPL_LOG_T("WL_EGL", + TPL_LOG_T(BACKEND, "[CANCEL_BUFFER] Stop tracking of canceled tbm_surface(%p)", tbm_surface); @@ -1495,7 +1498,7 @@ static void __cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue, void *data) { - TPL_LOG_T("WL_EGL", "tbm_queue(%p) has been reset!", tbm_queue); + TPL_LOG_T(BACKEND, "tbm_queue(%p) has been reset!", tbm_queue); } static void @@ -1688,7 +1691,7 @@ _twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source, buf_info->sync_timestamp++; - TPL_LOG_T("WL_VK", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", + TPL_LOG_T(BACKEND, "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", buf_info->wl_buffer, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); @@ -1780,7 +1783,7 @@ _twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source, buf_info->need_to_commit = TPL_FALSE; - TPL_LOG_T("WL_EGL", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", + TPL_LOG_T(BACKEND, "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", buf_info->wl_buffer, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); @@ -1821,7 +1824,7 @@ _twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source) } if (!disp_source->is_vulkan_dpy && !surf_source->vblank_done) { - TPL_LOG_T("WL_EGL", "[ACQ_skip] It will be acquired next vblank."); + TPL_LOG_T(BACKEND, "[ACQ_skip] It will be acquired next vblank."); TPL_OBJECT_UNLOCK(&surf_source->obj); return; } @@ -1847,7 +1850,7 @@ _twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source) } if (!disp_source->is_vulkan_dpy) { /* wayland_egl */ - TPL_LOG_T("WL_EGL", "[ACQ] tbm_surface(%p) bo(%d)", + TPL_LOG_T(BACKEND, "[ACQ] tbm_surface(%p) bo(%d)", tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); @@ -1861,7 +1864,7 @@ _twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source) case TPL_DISPLAY_PRESENT_MODE_MAILBOX: if (surf_source->draw_done_buffer) { g_mutex_lock(&surf_source->free_queue_mutex); - TPL_LOG_T("WL_VK", "[SKIP] tbm_surface(%p) bo(%d)", + TPL_LOG_T(BACKEND, "[SKIP] tbm_surface(%p) bo(%d)", tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo( tbm_surface, 0))); @@ -1943,7 +1946,7 @@ _twe_thread_wl_surface_finalize(GSource *source) { twe_wl_surf_source *surf_source = (twe_wl_surf_source *)source; - TPL_LOG_T("WL_EGL", "gsource(%p) event_fd(%d)", + TPL_LOG_T(BACKEND, "gsource(%p) event_fd(%d)", source, surf_source->event_fd); close(surf_source->event_fd); @@ -1975,7 +1978,7 @@ _twe_surface_buffer_flusher_init(twe_wl_surf_source *surf_source) tizen_surface_shm_flusher_add_listener(surf_source->tss_flusher, &tss_flusher_listener, surf_source); - TPL_LOG_T("WL_EGL", + TPL_LOG_T(BACKEND, "tss_flusher init. surf_source(%p) tss_flusher(%p)", surf_source, surf_source->tss_flusher); } @@ -1984,7 +1987,7 @@ static void _twe_surface_buffer_flusher_fini(twe_wl_surf_source *surf_source) { if (surf_source->tss_flusher) { - TPL_LOG_T("WL_EGL", + TPL_LOG_T(BACKEND, "tss_flusher fini. surf_source(%p) tss_flusher(%p)", surf_source, surf_source->tss_flusher); tizen_surface_shm_flusher_destroy(surf_source->tss_flusher); @@ -2019,7 +2022,7 @@ _twe_surface_create_vblank(tdm_client *tdm_client) tdm_client_vblank_set_enable_fake(vblank, 1); tdm_client_vblank_set_sync(vblank, 0); - TPL_LOG_T("WL_EGL", "[VBLANK INIT] vblank(%p)", vblank); + TPL_LOG_T(BACKEND, "[VBLANK INIT] vblank(%p)", vblank); return vblank; } @@ -2165,7 +2168,7 @@ _twe_thread_wl_surf_source_destroy(void *source) } if (surf_source->vblank) { - TPL_LOG_T("WL_EGL", "[VBLANK FINI] twe_wl_surf_source(%p) vblank(%p)", + TPL_LOG_T(BACKEND, "[VBLANK FINI] twe_wl_surf_source(%p) vblank(%p)", surf_source, surf_source->vblank); tdm_client_vblank_destroy(surf_source->vblank); } @@ -2174,7 +2177,7 @@ _twe_thread_wl_surf_source_destroy(void *source) surf_source->rotate_cb = NULL; if (surf_source->wl_egl_window) { - TPL_LOG_T("WL_EGL", "twe_surface(%p) wl_egl_window(%p) wl_surface(%p)", + TPL_LOG_T(BACKEND, "twe_surface(%p) wl_egl_window(%p) wl_surface(%p)", surf_source, surf_source->wl_egl_window, surf_source->surf); surf_source->wl_egl_window->destroy_window_callback = NULL; surf_source->wl_egl_window->resize_callback = NULL; @@ -2298,7 +2301,7 @@ twe_surface_add(twe_thread* thread, g_mutex_init(&source->free_queue_mutex); g_cond_init(&source->free_queue_cond); - TPL_LOG_T("WL_EGL", + TPL_LOG_T(BACKEND, "gsource(%p) native_handle(%p) wl_surface(%p) event_fd(%d)", source, native_handle, source->surf, source->event_fd); @@ -2328,7 +2331,7 @@ twe_surface_del(twe_surface_h twe_surface) return TPL_ERROR_INVALID_PARAMETER; } - TPL_LOG_T("WL_EGL", "twe_surface(%p) will be destroyed in thread", + TPL_LOG_T(BACKEND, "twe_surface(%p) will be destroyed in thread", twe_surface); surf_del_source = surf_source->surf_del_source; @@ -2428,9 +2431,9 @@ twe_surface_create_swapchain(twe_surface_h twe_surface, surf_source->swapchain_properties.present_mode = present_mode; surf_source->swapchain_properties.buffer_count = buffer_count; - TPL_LOG_T("WL_VK", "[SWAPCHAIN_CREATE][1/2] twe_surface(%p) tbm_queue(%p)", + TPL_LOG_T(BACKEND, "[SWAPCHAIN_CREATE][1/2] twe_surface(%p) tbm_queue(%p)", twe_surface, surf_source->tbm_queue); - TPL_LOG_T("WL_VK", + TPL_LOG_T(BACKEND, "[SWAPCHAIN_CREATE][2/2] w(%d) h(%d) f(%d) p(%d) b_cnt(%d)", width, height, format, present_mode, buffer_count); @@ -2447,7 +2450,7 @@ twe_surface_destroy_swapchain(twe_surface_h twe_surface) return TPL_ERROR_INVALID_PARAMETER; } - TPL_LOG_T("WL_VK", "[SWAPCHAIN_DESTROY] twe_surface(%p) tbm_queue(%p)", + TPL_LOG_T(BACKEND, "[SWAPCHAIN_DESTROY] twe_surface(%p) tbm_queue(%p)", twe_surface, surf_source->tbm_queue); if (surf_source->tbm_queue) { @@ -2502,7 +2505,7 @@ twe_surface_set_rotation_capablity(twe_surface_h twe_surface, tpl_bool_t set) return; } - TPL_LOG_T("WL_EGL", "twe_surface(%p) rotation capability set to [%s]", + TPL_LOG_T(BACKEND, "twe_surface(%p) rotation capability set to [%s]", source, (set ? "TRUE" : "FALSE")); source->rotation_capability = set; -- 2.7.4 From 72d0a97060e6191f467f3063a9ecdded570231b3 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Wed, 29 Nov 2017 12:00:29 +0900 Subject: [PATCH 07/16] tpl_wayland_egl_thread: Added the missing code to allocate list. - vblank_waiting_buffers list is needed to support FIFO & FIFO_RELAXED modes. Change-Id: I93a90ad4711bcb8f13640cc0942dc4175cdff8cc Signed-off-by: joonbum.ko --- src/tpl_wayland_egl_thread.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index d3116fd..7944b82 100644 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -2158,6 +2158,12 @@ _twe_thread_wl_surf_source_destroy(void *source) surf_source->committed_buffers = NULL; } + if (surf_source->vblank_waiting_buffers) { + __tpl_list_free(surf_source->vblank_waiting_buffers, + (tpl_free_func_t)__cb_buffer_remove_from_list); + surf_source->vblank_waiting_buffers = NULL; + } + if (!disp_source->is_vulkan_dpy) { _twe_surface_buffer_flusher_fini(surf_source); } @@ -2425,6 +2431,11 @@ twe_surface_create_swapchain(twe_surface_h twe_surface, return TPL_ERROR_INVALID_OPERATION; } + if (present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO + || present_mode == TPL_DISPLAY_PRESENT_MODE_FIFO_RELAXED) { + surf_source->vblank_waiting_buffers = __tpl_list_alloc(); + } + surf_source->format = format; surf_source->swapchain_properties.width = width; surf_source->swapchain_properties.height = height; -- 2.7.4 From 25a1f86e50b1a9a863ac0b3022f65ee4864175ba Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Thu, 30 Nov 2017 14:34:26 +0900 Subject: [PATCH 08/16] tpl_wayland_egl_thread: Fixed to work with IMMEDIATE in case vblank creation failed. Change-Id: I3c5a8addf4a3b2ab4cf25c8bccfc66bcddfebb02 Signed-off-by: joonbum.ko --- src/tpl_wayland_egl_thread.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 7944b82..4c60f59 100644 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -1887,8 +1887,10 @@ _twe_thread_wl_surface_acquire_and_commit(twe_wl_surf_source *surf_source) if (surf_source->vblank_waiting_buffers) { __tpl_list_push_back(surf_source->vblank_waiting_buffers, (void *)tbm_surface); - if (_twe_surface_wait_vblank(surf_source) != TPL_ERROR_NONE) - TPL_ERR("Failed to set wait vblank"); + if (_twe_surface_wait_vblank(surf_source) != TPL_ERROR_NONE) { + TPL_WARN("Failed to set wait vblank. Falling back to IMMEDIATE_MODE."); + _twe_thread_wl_vk_surface_commit(surf_source, tbm_surface); + } } else { TPL_ERR("Invalid list. vblank_waiting_buffers is NULL."); } -- 2.7.4 From 480bf591c42d1fe1e77def21e2612926ea784d55 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Thu, 30 Nov 2017 20:06:50 +0900 Subject: [PATCH 09/16] tpl_wl_vk_thread: Added TRACE point to debug. Change-Id: Ia622b63760e5ec3f955eb3322a2d708e50ee0af1 Signed-off-by: joonbum.ko --- src/tpl_wayland_egl_thread.c | 10 ++++++++++ src/tpl_wl_vk_thread.c | 2 ++ 2 files changed, 12 insertions(+) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 4c60f59..07b2d35 100644 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -1297,6 +1297,10 @@ __cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer) TPL_OBJECT_UNLOCK(&surf_source->obj); } + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + tbm_bo_export(tbm_surface_internal_get_bo( + tbm_surface, 0))); + TPL_LOG_T(BACKEND, "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)", buf_info->wl_buffer, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); @@ -1689,6 +1693,9 @@ _twe_thread_wl_vk_surface_commit(twe_wl_surf_source *surf_source, surf_source->swapchain_properties.height); wl_surface_commit(wl_surface); + TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + buf_info->sync_timestamp++; TPL_LOG_T(BACKEND, "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -1781,6 +1788,9 @@ _twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source, wl_surface_commit(wl_surface); + TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); + buf_info->need_to_commit = TPL_FALSE; TPL_LOG_T(BACKEND, "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 197b133..b917001 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -423,8 +423,10 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, if (lock_ret == TPL_ERROR_NONE) twe_display_unlock(wayland_vk_wsi_display->twe_display); TPL_OBJECT_UNLOCK(surface); + TRACE_BEGIN("WAIT_DEQUEUEABLE"); res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface, timeout_ns); + TRACE_END(); TPL_OBJECT_LOCK(surface); lock_ret = twe_display_lock(wayland_vk_wsi_display->twe_display); -- 2.7.4 From 926dae9c7a6f548fe5abc0435e9fc75d22a456a9 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Thu, 30 Nov 2017 20:20:55 +0900 Subject: [PATCH 10/16] tpl_wayland_egl_thread: Add tbm_surface_queue_release on buffer flush cb This patch fixes the case that unnecessary buffer release cb is called after buffer flush. Change-Id: Iaa2023ecb6d990815e89b63a48f6709e694bbaeb Signed-off-by: joonbum.ko --- src/tpl_wayland_egl_thread.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 07b2d35..4db6a8c 100644 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -1193,6 +1193,20 @@ static void __cb_tss_flusher_flush_callback(void *data, TPL_OBJECT_LOCK(&surf_source->obj); __tpl_list_fini(surf_source->committed_buffers, (tpl_free_func_t)__cb_buffer_remove_from_list); + while (!__tpl_list_is_empty(surf_source->committed_buffers)) { + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tbm_surface_h tbm_surface = + __tpl_list_pop_front(surf_source->committed_buffers, + (tpl_free_func_t)__cb_buffer_remove_from_list); + + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", + tbm_bo_export(tbm_surface_internal_get_bo( + tbm_surface, 0))); + tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + tbm_surface, tsq_err); + } TPL_OBJECT_UNLOCK(&surf_source->obj); } -- 2.7.4 From 54361c826d1ff271d68419b10c5d0ebf08c8fe72 Mon Sep 17 00:00:00 2001 From: "joonbum.ko" Date: Tue, 5 Dec 2017 17:04:17 +0900 Subject: [PATCH 11/16] Package version up tp 1.5.0 - minor version up to 5 from 4. (4 -> 5) - minor version is separated from tizen_4.0 Change-Id: I5f5b7eff3a60e32fc1437f1f91a7d0393330e857 Signed-off-by: joonbum.ko --- packaging/libtpl-egl.spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/libtpl-egl.spec b/packaging/libtpl-egl.spec index d643afa..38d6051 100644 --- a/packaging/libtpl-egl.spec +++ b/packaging/libtpl-egl.spec @@ -2,8 +2,8 @@ #TPL VERSION MACROS %define TPL_VERSION_MAJOR 1 -%define TPL_VERSION_MINOR 4 -%define TPL_VERSION_PATCH 12 +%define TPL_VERSION_MINOR 5 +%define TPL_VERSION_PATCH 0 %define TPL_VERSION %{TPL_VERSION_MAJOR}.%{TPL_VERSION_MINOR}.%{TPL_VERSION_PATCH} #TPL WINDOW SYSTEM DEFINITION -- 2.7.4 From 29ea8ad6fbd8d2212dafc29077a6869f01ce2da5 Mon Sep 17 00:00:00 2001 From: Hoyub Lee Date: Wed, 13 Dec 2017 12:08:51 +0900 Subject: [PATCH 12/16] tpl_wayland_egl_thread: Add necessity check for buffer release On buffer release cb, there was no checking mechanism to check if this buffer needs to be released or not. This patch will add checking if this buffer has to be released or not. Therefore, unnecessary buffer release cb will be detected and handled correctly. Change-Id: I51b9d614da44fac4919539749612ca024b56aa9a Signed-off-by: Hoyub Lee --- src/tpl_wayland_egl_thread.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/tpl_wayland_egl_thread.c b/src/tpl_wayland_egl_thread.c index 4db6a8c..e8bf76b 100644 --- a/src/tpl_wayland_egl_thread.c +++ b/src/tpl_wayland_egl_thread.c @@ -135,6 +135,9 @@ struct _twe_wl_buffer_info { int *rects; tpl_bool_t need_to_commit; + /* for checking need release */ + tpl_bool_t need_to_release; + /* for checking draw done */ tpl_bool_t draw_done; tbm_fd draw_done_fence; @@ -1292,7 +1295,7 @@ __cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer) if (tbm_surface_internal_is_valid(tbm_surface)) { tbm_surface_internal_get_user_data(tbm_surface, KEY_BUFFER_INFO, (void **)&buf_info); - if (buf_info) { + if (buf_info && buf_info->need_to_release) { twe_wl_surf_source *surf_source = buf_info->surf_source; tbm_surface_queue_error_e tsq_err; @@ -1311,6 +1314,8 @@ __cb_buffer_release_callback(void *data, struct wl_proxy *wl_buffer) TPL_OBJECT_UNLOCK(&surf_source->obj); } + buf_info->need_to_release = TPL_FALSE; + TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)", tbm_bo_export(tbm_surface_internal_get_bo( tbm_surface, 0))); @@ -1806,6 +1811,7 @@ _twe_thread_wl_surface_commit(twe_wl_surf_source *surf_source, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); buf_info->need_to_commit = TPL_FALSE; + buf_info->need_to_release = TPL_TRUE; TPL_LOG_T(BACKEND, "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)", buf_info->wl_buffer, tbm_surface, -- 2.7.4 From dc0662688ed4dc1c6c99ad6b5dd26f33941b537f Mon Sep 17 00:00:00 2001 From: Hoyub Lee Date: Wed, 13 Dec 2017 22:10:48 +0900 Subject: [PATCH 13/16] tpl_wayland_egl_thread: Replace unnecessary internal header with public one Change-Id: I1307adf16f3a3373de8d5da746d8756823083481 Signed-off-by: Hoyub Lee --- src/tpl_wayland_egl_thread.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tpl_wayland_egl_thread.h b/src/tpl_wayland_egl_thread.h index 4c2fd4f..3e22829 100644 --- a/src/tpl_wayland_egl_thread.h +++ b/src/tpl_wayland_egl_thread.h @@ -1,7 +1,7 @@ #include #include +#include -#include "wayland-egl/wayland-egl-priv.h" #include "tpl.h" typedef struct _twe_thread twe_thread; -- 2.7.4 From c12a520284783bb650fa4917a22a1df644ca6470 Mon Sep 17 00:00:00 2001 From: Hoyub Lee Date: Wed, 13 Dec 2017 21:10:08 +0900 Subject: [PATCH 14/16] tc: Add member variable for choosing backend on Config Change-Id: Ifd04a8f8c5840248d234b1b2fdb82d28b598f4ee Signed-off-by: Hoyub Lee --- tc/src/tpl-test_base.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tc/src/tpl-test_base.h b/tc/src/tpl-test_base.h index 309400a..071357e 100644 --- a/tc/src/tpl-test_base.h +++ b/tc/src/tpl-test_base.h @@ -31,6 +31,7 @@ typedef struct { int width; int height; int depth; + tpl_backend_type_t backend; } Config; -- 2.7.4 From f367e0d244b210f8a3f6339818d257038c59cf79 Mon Sep 17 00:00:00 2001 From: Hoyub Lee Date: Wed, 13 Dec 2017 21:20:50 +0900 Subject: [PATCH 15/16] tc: Add parsing backend functionality on Config parsing function Change-Id: Ibbabb404114ebad01fb695ba8aeeb5ded5182d20 Signed-off-by: Hoyub Lee --- tc/src/main.cpp | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/tc/src/main.cpp b/tc/src/main.cpp index e9c9b1c..25bb681 100644 --- a/tc/src/main.cpp +++ b/tc/src/main.cpp @@ -20,9 +20,11 @@ void tpl_test_print_help() { printf("\n\n=== How to setup test configurations ===\n\n"); - printf("--config.width=WIDTH (default: 720)\n"); - printf("--config.height=HEIGHT (default: 1280)\n"); - printf("--config.depth=DEPTH (default: 24)\n"); + printf("--config.width=WIDTH (default: 720)\n"); + printf("--config.height=HEIGHT (default: 1280)\n"); + printf("--config.depth=DEPTH (default: 24)\n"); + printf("--config.backend=BACKEND BACKEND is 'TPL_WAYLAND', 'TPL_WAYLAND_THREAD', or 'TPL_TBM'.\n"); + printf(" (default: TPL_WAYLAND)\n"); printf("\n\n"); } @@ -35,17 +37,20 @@ tpl_test_parse_arguments(int argc, char **argv) config.width = 720; config.height = 1280; config.depth = 24; + config.backend = TPL_BACKEND_WAYLAND; // Check option int opt_width = 0; int opt_height = 0; int opt_depth = 0; + int opt_backend = 0; struct option longopts[] = { {"help", no_argument, NULL, 'h'}, {"config.width", required_argument, &opt_width, 1}, {"config.height", required_argument, &opt_height, 1}, {"config.depth", optional_argument, &opt_depth, 1}, + {"config.backend", required_argument, &opt_backend, 1}, {NULL, 0, NULL, 0} }; @@ -69,6 +74,23 @@ tpl_test_parse_arguments(int argc, char **argv) printf("Depth set: %s\n", optarg); config.depth = atoi(optarg); opt_depth = 0; + } else if (opt_backend == 1) { + char backend[100]; + strncpy(backend, optarg, 100); + + // Parse backend argument + if (strncmp(backend, "TPL_WAYLAND", 100) == 0) { + config.backend = TPL_BACKEND_WAYLAND; + } else if (strncmp(backend, "TPL_WAYLAND_THREAD", 100) == 0) { + config.backend = TPL_BACKEND_WAYLAND_THREAD; + } else if (strncmp(backend, "TPL_TBM", 100) == 0) { + config.backend = TPL_BACKEND_TBM; + } else { + printf("Unrecognized backend: %s, Using default backend.", backend); + strncpy(backend, "TPL_WAYLAND", 100); + } + + printf("Backend set: %s\n", backend); } else { break; } -- 2.7.4 From d96e2c234d1b48e3e931a4b1153ef947a558c3ec Mon Sep 17 00:00:00 2001 From: Hoyub Lee Date: Wed, 13 Dec 2017 21:27:16 +0900 Subject: [PATCH 16/16] tc: Apply config.backend on TPLWayland Change-Id: I40a1f898d76b1fac772ce42a29b1ee71deae38b7 Signed-off-by: Hoyub Lee --- tc/src/tpl-test_wayland.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tc/src/tpl-test_wayland.cpp b/tc/src/tpl-test_wayland.cpp index 9ba3722..591d595 100644 --- a/tc/src/tpl-test_wayland.cpp +++ b/tc/src/tpl-test_wayland.cpp @@ -84,7 +84,7 @@ TPLWayland::tpl_backend_initialize(Config *config) wl_win.native = wl_egl_window_create(wl_win.surface, config->width, config->height); - tpl_display = tpl_display_create(TPL_BACKEND_WAYLAND, + tpl_display = tpl_display_create(config->backend, (tpl_handle_t)wl_disp.display); tpl_surface = tpl_surface_create(tpl_display, (tpl_handle_t)wl_win.native, -- 2.7.4