From 5fefb2956af2ed1f6d8f4f048d86febd82e59946 Mon Sep 17 00:00:00 2001 From: Joonbum Ko Date: Thu, 1 Apr 2021 16:22:02 +0900 Subject: [PATCH] Implement tpl_wl_vk_surface using tpl_gthread_utils. Change-Id: Ibb58ef10fa02fb6220453c5c18b7760a7a4d9994 Signed-off-by: Joonbum Ko Re-implement tpl_wl_vk_surface using tpl_gthread_utils Change-Id: I59ce5fb2092f60956ac1a2322f701b4a610016fe Signed-off-by: Joonbum Ko --- src/tpl_wl_vk_thread.c | 690 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 540 insertions(+), 150 deletions(-) diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index 3a846b9..fceee7e 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -64,6 +64,8 @@ struct _tpl_wl_vk_display { struct _tpl_wl_vk_swapchain { tpl_wl_vk_surface_t *wl_vk_surface; + tbm_surface_queue_h tbm_queue; + struct { int width; int height; @@ -82,8 +84,6 @@ struct _tpl_wl_vk_surface { tpl_wl_vk_swapchain_t *swapchain; - tbm_surface_queue_h tbm_queue; - struct wl_surface *wl_surface; struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ @@ -872,55 +872,341 @@ __tpl_wl_vk_wsi_display_query_window_supported_present_modes( return TPL_ERROR_NONE; } +static void +_tpl_wl_vk_surface_buffer_clear(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + tpl_wl_vk_swapchain_t *swapchain = NULL; + tpl_wl_vk_buffer_t *wl_vk_buffer = NULL; + tpl_bool_t need_to_release = TPL_FALSE; + tpl_bool_t need_to_cancel = TPL_FALSE; + buffer_status_t status = RELEASED; + int idx = 0; + + while (wl_vk_surface->buffer_cnt && idx < BUFFER_ARRAY_SIZE) { + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + tpl_gmutex_lock(&wl_vk_surface->buffers_mutex); + wl_vk_buffer = wl_vk_surface->buffers[idx]; + + if (wl_vk_buffer) { + wl_vk_surface->buffers[idx] = NULL; + wl_vk_surface->buffer_cnt--; + } else { + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + idx++; + continue; + } + + tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex); + + tpl_gmutex_lock(&wl_vk_buffer->mutex); + + status = wl_vk_buffer->status; + + TPL_DEBUG("[idx:%d] wl_vk_buffer(%p) tbm_surface(%p) status(%s)", + idx, wl_vk_buffer, + wl_vk_buffer->tbm_surface, + status_to_string[status]); + + if (status >= ENQUEUED) { + tpl_bool_t need_to_wait = TPL_FALSE; + tpl_result_t wait_result = TPL_ERROR_NONE; + + if (!wl_vk_display->use_explicit_sync && + status < WAITING_VBLANK) + need_to_wait = TPL_TRUE; + + if (wl_vk_display->use_explicit_sync && + status < COMMITTED) + need_to_wait = TPL_TRUE; + + if (need_to_wait) { + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + wait_result = tpl_cond_timed_wait(&wl_vk_buffer->cond, + &wl_vk_buffer->mutex, + 16); /* 16ms */ + tpl_gmutex_lock(&wl_vk_display->wl_event_mutex); + + status = wl_vk_buffer->status; + + if (wait_result == TPL_ERROR_TIME_OUT) + TPL_WARN("timeout occured waiting signaled. wl_vk_buffer(%p)", + wl_vk_buffer); + } + } + + /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */ + /* It has been acquired but has not yet been released, so this + * buffer must be released. */ + need_to_release = (status >= ACQUIRED && status <= COMMITTED); + + /* After dequeue, it has not been enqueued yet + * so cancel_dequeue must be performed. */ + need_to_cancel = (status == DEQUEUED); + + if (swapchain && swapchain->tbm_queue) { + if (need_to_release) { + tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, + wl_vk_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)", + wl_vk_buffer->tbm_surface, tsq_err); + } + + if (need_to_cancel) { + tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue, + wl_vk_buffer->tbm_surface); + if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) + TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)", + wl_vk_buffer->tbm_surface, tsq_err); + } + } + + wl_vk_buffer->status = RELEASED; + + tpl_gmutex_unlock(&wl_vk_buffer->mutex); + + if (need_to_release || need_to_cancel) + tbm_surface_internal_unref(wl_vk_buffer->tbm_surface); + + tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex); + + idx++; + } +} + +static tdm_client_vblank* +_thread_create_tdm_client_vblank(tdm_client *tdm_client) +{ + tdm_client_vblank *vblank = NULL; + tdm_client_output *tdm_output = NULL; + tdm_error tdm_err = TDM_ERROR_NONE; + + if (!tdm_client) { + TPL_ERR("Invalid parameter. tdm_client(%p)", tdm_client); + return NULL; + } + + tdm_output = tdm_client_get_output(tdm_client, "primary", &tdm_err); + if (!tdm_output || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to get tdm_client_output. tdm_err(%d)", tdm_err); + return NULL; + } + + vblank = tdm_client_output_create_vblank(tdm_output, &tdm_err); + if (!vblank || tdm_err != TDM_ERROR_NONE) { + TPL_ERR("Failed to create vblank. tdm_err(%d)", tdm_err); + return NULL; + } + + tdm_client_vblank_set_enable_fake(vblank, 1); + tdm_client_vblank_set_sync(vblank, 0); + + return vblank; +} + +static void +_thread_wl_vk_surface_init(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display; + + /* tbm_surface_queue will be created at swapchain_create */ + + wl_vk_surface->vblank = _thread_create_tdm_client_vblank( + wl_vk_display->tdm_client); + if (wl_vk_surface->vblank) { + TPL_INFO("[VBLANK_INIT]", + "wl_vk_surface(%p) tdm_client(%p) vblank(%p)", + wl_vk_surface, wl_vk_display->tdm_client, + wl_vk_surface->vblank); + } + + if (wl_vk_display->explicit_sync && wl_vk_display->use_explicit_sync) { + wl_vk_surface->surface_sync = + zwp_linux_explicit_synchronization_v1_get_synchronization( + wl_vk_display->explicit_sync, wl_vk_surface->wl_surface); + if (wl_vk_surface->surface_sync) { + TPL_INFO("[EXPLICIT_SYNC_INIT]", + "wl_vk_surface(%p) surface_sync(%p)", + wl_vk_surface, wl_vk_surface->surface_sync); + } else { + TPL_WARN("Failed to create surface_sync. | wl_vk_surface(%p)", + wl_vk_surface); + wl_vk_display->use_explicit_sync = TPL_FALSE; + } + } + + wl_vk_surface->vblank_waiting_buffers = __tpl_list_alloc(); +} + +static void +_thread_wl_vk_surface_fini(tpl_wl_vk_surface_t *wl_vk_surface) +{ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + + TPL_INFO("[SURFACE_FINI]", + "wl_vk_surface(%p) wl_surface(%p)", + wl_vk_surface, wl_vk_surface->wl_surface); + + if (wl_vk_surface->vblank_waiting_buffers) { + __tpl_list_free(wl_vk_surface->vblank_waiting_buffers, NULL); + wl_vk_surface->vblank_waiting_buffers = NULL; + } + + if (wl_vk_surface->surface_sync) { + TPL_INFO("[SURFACE_SYNC_DESTROY]", + "wl_vk_surface(%p) surface_sync(%p)", + wl_vk_surface, wl_vk_surface->surface_sync); + zwp_linux_surface_synchronization_v1_destroy(wl_vk_surface->surface_sync); + wl_vk_surface->surface_sync = NULL; + } + + if (wl_vk_surface->vblank) { + TPL_INFO("[VBLANK_DESTROY]", + "wl_vk_surface(%p) vblank(%p)", + wl_vk_surface, wl_vk_surface->vblank); + tdm_client_vblank_destroy(wl_vk_surface->vblank); + wl_vk_surface->vblank = NULL; + } + + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); +} + +static tpl_bool_t +__thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + + wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource); + + if (message == 1) { /* Initialize surface */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) initialize message received!", + wl_vk_surface); + _thread_wl_vk_surface_init(wl_vk_surface); + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } else if (message == 2) { /* Create tbm_surface_queue */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) queue creation message received!", + wl_vk_surface); + + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } else if (message == 3) { /* Acquirable message */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) acquirable message received!", + wl_vk_surface); + _thread_surface_queue_acquire(wl_vk_surface); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } else if (message == 4) { /* swapchain destroy */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!", + wl_vk_surface); + + tpl_gcond_signal(&wl_vk_surface->surf_cond); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + } + + return TPL_TRUE; +} + +static void +__thread_func_surf_finalize(tpl_gsource *gsource) +{ + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + + wl_vk_surface = (tpl_wl_vk_surface_t *)tpl_gsource_get_data(gsource); + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); + + _thread_wl_vk_surface_fini(wl_vk_surface); + + TPL_DEBUG("[FINALIZE] wl_vk_surface(%p) tpl_gsource(%p)", + wl_vk_surface, gsource); +} + +static tpl_gsource_functions surf_funcs = { + .prepare = NULL, + .check = NULL, + .dispatch = __thread_func_surf_dispatch, + .finalize = __thread_func_surf_finalize, +}; + + static tpl_result_t __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wl_vk_display_t *wl_vk_display = NULL; - twe_surface_h twe_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; + tpl_wl_vk_display_t *wl_vk_display = NULL; + tpl_gsource *surf_source = NULL; TPL_ASSERT(surface); + TPL_ASSERT(surface->display); TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); TPL_ASSERT(surface->native_handle); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) calloc(1, + wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; + TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER); + + wl_vk_surface = (tpl_wl_vk_surface_t *) calloc(1, sizeof(tpl_wl_vk_surface_t)); - if (!wayland_vk_wsi_surface) { + if (!wl_vk_surface) { TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t."); return TPL_ERROR_OUT_OF_MEMORY; } - wl_vk_display = - (tpl_wl_vk_display_t *)surface->display->backend.data; - if (!wl_vk_display) { - TPL_ERR("Invalid parameter. wl_vk_display(%p)", - wl_vk_display); - free(wayland_vk_wsi_surface); - return TPL_ERROR_INVALID_PARAMETER; + surf_source = tpl_gsource_create(wl_vk_display->thread, (void *)wl_vk_surface, + -1, &surf_funcs, SOURCE_TYPE_NORMAL); + if (!surf_source) { + TPL_ERR("Failed to create surf_source with wl_vk_surface(%p)", + wl_vk_surface); + free(wl_vk_surface); + surface->backend.data = NULL; + return TPL_ERROR_INVALID_OPERATION; } - surface->backend.data = (void *)wayland_vk_wsi_surface; - wayland_vk_wsi_surface->tbm_queue = NULL; + surface->backend.data = (void *)wl_vk_surface; + surface->width = -1; + surface->height = -1; - twe_surface = twe_surface_add(wl_vk_display->thread, - wl_vk_display->twe_display, - surface->native_handle, - surface->format, surface->num_buffers); - if (!twe_surface) { - TPL_ERR("Failed to add native_surface(%p) to thread(%p)", - surface->native_handle, wl_vk_display->thread); - free(wayland_vk_wsi_surface); - surface->backend.data = NULL; - return TPL_ERROR_OUT_OF_MEMORY; + wl_vk_surface->surf_source = surf_source; + wl_vk_surface->swapchain = NULL; + + wl_vk_surface->wl_vk_display = wl_vk_display; + wl_vk_surface->wl_surface = (struct wl_surface *)surface->native_handle; + + wl_vk_surface->reset = TPL_FALSE; + wl_vk_surface->is_activated = TPL_FALSE; + wl_vk_surface->vblank_done = TPL_FALSE; + + wl_vk_surface->render_done_cnt = 0; + + wl_vk_surface->vblank = NULL; + wl_vk_surface->surface_sync = NULL; + + { + int i = 0; + for (i = 0; i < BUFFER_ARRAY_SIZE; i++) + wl_vk_surface->buffers[i] = NULL; + wl_vk_surface->buffer_cnt = 0; } - wayland_vk_wsi_surface->twe_surface = twe_surface; - wayland_vk_wsi_surface->is_activated = TPL_FALSE; - wayland_vk_wsi_surface->swapchain_buffers = NULL; + tpl_gmutex_init(&wl_vk_surface->surf_mutex); + tpl_gcond_init(&wl_vk_surface->surf_cond); - TPL_LOG_T("WL_VK", - "[INIT]tpl_surface(%p) tpl_wl_vk_surface(%p) twe_surface(%p)", - surface, wayland_vk_wsi_surface, twe_surface); + tpl_gmutex_init(&wl_vk_surface->buffers_mutex); + + /* Initialize in thread */ + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + tpl_gsource_send_message(wl_vk_surface->surf_source, 1); + tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + + TPL_INFO("[SURFACE_INIT]", + "tpl_surface(%p) wl_vk_surface(%p) gsource(%p)", + surface, wl_vk_surface, wl_vk_surface->surf_source); return TPL_ERROR_NONE; } @@ -928,42 +1214,48 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) static void __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - if (wayland_vk_wsi_surface == NULL) return; + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; - if (wl_vk_display == NULL) return; + TPL_CHECK_ON_NULL_RETURN(wl_vk_display); - if (wayland_vk_wsi_surface->tbm_queue) - __tpl_wl_vk_wsi_surface_destroy_swapchain(surface); + TPL_INFO("[SURFACE_FINI][BEGIN]", + "wl_vk_surface(%p) wl_surface(%p)", + wl_vk_surface, wl_vk_surface->wl_surface); - if (wayland_vk_wsi_surface->swapchain_buffers) { - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; - } + if (wl_vk_surface->swapchain && wl_vk_surface->swapchain->tbm_queue) { + /* finalize swapchain */ - TPL_LOG_T("WL_VK", - "[FINI] wayland_vk_wsi_surface(%p) native_surface(%p) twe_surface(%p)", - wayland_vk_wsi_surface, surface->native_handle, - wayland_vk_wsi_surface->twe_surface); - - if (twe_surface_del(wayland_vk_wsi_surface->twe_surface) - != TPL_ERROR_NONE) { - TPL_ERR("Failed to delete twe_surface(%p) from thread(%p)", - wayland_vk_wsi_surface->twe_surface, - wl_vk_display->thread); } - wayland_vk_wsi_surface->twe_surface = NULL; + wl_vk_surface->swapchain = NULL; + + if (wl_vk_surface->surf_source) + tpl_gsource_destroy(wl_vk_surface->surf_source, TPL_TRUE); + wl_vk_surface->surf_source = NULL; - free(wayland_vk_wsi_surface); + _print_buffer_lists(wl_vk_surface); + + wl_vk_surface->wl_surface = NULL; + wl_vk_surface->wl_vk_display = NULL; + wl_vk_surface->tpl_surface = NULL; + + tpl_gmutex_lock(&wl_vk_surface->surf_mutex); + tpl_gmutex_unlock(&wl_vk_surface->surf_mutex); + tpl_gmutex_clear(&wl_vk_surface->surf_mutex); + tpl_gcond_clear(&wl_vk_surface->surf_cond); + + TPL_INFO("[SURFACE_FINI][END]", "wl_vk_surface(%p)", wl_vk_surface); + + free(wl_vk_surface); surface->backend.data = NULL; } @@ -979,7 +1271,7 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display->native_handle); TPL_ASSERT(tbm_surface); - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; tbm_surface_queue_error_e tsq_err; @@ -999,7 +1291,7 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, num_rects, rects); } } - tsq_err = tbm_surface_queue_enqueue(wayland_vk_wsi_surface->tbm_queue, + tsq_err = tbm_surface_queue_enqueue(wl_vk_surface->tbm_queue, tbm_surface); if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) { tbm_surface_internal_unref(tbm_surface); @@ -1010,7 +1302,7 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, if (sync_fence != -1) { tpl_result_t res = TPL_ERROR_NONE; - res = twe_surface_set_sync_fd(wayland_vk_wsi_surface->twe_surface, + res = twe_surface_set_sync_fd(wl_vk_surface->twe_surface, tbm_surface, sync_fence); if (res != TPL_ERROR_NONE) { TPL_WARN("Failed to set sync_fd(%d). Fallback to async mode.", @@ -1032,23 +1324,23 @@ __tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - return !(wayland_vk_wsi_surface->reset); + return !(wl_vk_surface->reset); } static tpl_result_t __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - if (!wayland_vk_wsi_surface) { - TPL_ERR("Invalid backend surface. surface(%p) wayland_vk_wsi_surface(%p)", - surface, wayland_vk_wsi_surface); + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + if (!wl_vk_surface) { + TPL_ERR("Invalid backend surface. surface(%p) wl_vk_surface(%p)", + surface, wl_vk_surface); return TPL_ERROR_INVALID_PARAMETER; } @@ -1059,7 +1351,7 @@ __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_internal_unref(tbm_surface); - tsq_err = tbm_surface_queue_cancel_dequeue(wayland_vk_wsi_surface->tbm_queue, + tsq_err = tbm_surface_queue_cancel_dequeue(wl_vk_surface->tbm_queue, tbm_surface); if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface); @@ -1082,7 +1374,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display); tbm_surface_h tbm_surface = NULL; - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; tpl_wl_vk_display_t *wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; @@ -1096,7 +1388,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_OBJECT_UNLOCK(surface); TRACE_BEGIN("WAIT_DEQUEUEABLE"); lock_res = twe_display_lock(wl_vk_display->twe_display); - res = twe_surface_wait_dequeueable(wayland_vk_wsi_surface->twe_surface, + res = twe_surface_wait_dequeueable(wl_vk_surface->twe_surface, timeout_ns); TRACE_END(); TPL_OBJECT_LOCK(surface); @@ -1109,26 +1401,26 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, return NULL; } else if (res != TPL_ERROR_NONE) { TPL_ERR("Invalid operation. twe_surface(%p) timeout_ns(%" PRIu64 ")", - wayland_vk_wsi_surface->twe_surface, timeout_ns); + wl_vk_surface->twe_surface, timeout_ns); if (lock_res == TPL_ERROR_NONE) twe_display_unlock(wl_vk_display->twe_display); return NULL; } - if (wayland_vk_wsi_surface->reset) { + if (wl_vk_surface->reset) { TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.", - wayland_vk_wsi_surface->tbm_queue); + wl_vk_surface->tbm_queue); if (lock_res == TPL_ERROR_NONE) twe_display_unlock(wl_vk_display->twe_display); return NULL; } - tsq_err = tbm_surface_queue_dequeue(wayland_vk_wsi_surface->tbm_queue, + tsq_err = tbm_surface_queue_dequeue(wl_vk_surface->tbm_queue, &tbm_surface); if (!tbm_surface) { TPL_ERR("Failed to get tbm_surface from tbm_surface_queue(%p) | tsq_err = %d", - wayland_vk_wsi_surface->tbm_queue, tsq_err); + wl_vk_surface->tbm_queue, tsq_err); if (lock_res == TPL_ERROR_NONE) twe_display_unlock(wl_vk_display->twe_display); return NULL; @@ -1141,7 +1433,7 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, } TPL_LOG_T("WL_VK", "[DEQ] tbm_queue(%p) tbm_surface(%p) bo(%d)", - wayland_vk_wsi_surface->tbm_queue, tbm_surface, + wl_vk_surface->tbm_queue, tbm_surface, tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0))); if (lock_res == TPL_ERROR_NONE) @@ -1155,7 +1447,7 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, tbm_surface_h **buffers, int *buffer_count) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; int i; tpl_result_t ret = TPL_ERROR_NONE; @@ -1167,51 +1459,51 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, TPL_ASSERT(buffers); TPL_ASSERT(buffer_count); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data; if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { - ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, + ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface, NULL, buffer_count); if (ret != TPL_ERROR_NONE) { TPL_ERR("Failed to get buffer_count. twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); twe_display_unlock(wl_vk_display->twe_display); return ret; } - wayland_vk_wsi_surface->swapchain_buffers = (tbm_surface_h *)calloc( + wl_vk_surface->swapchain_buffers = (tbm_surface_h *)calloc( *buffer_count, sizeof(tbm_surface_h)); - if (!wayland_vk_wsi_surface->swapchain_buffers) { + if (!wl_vk_surface->swapchain_buffers) { TPL_ERR("Failed to allocate memory for buffers."); twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_OUT_OF_MEMORY; } - ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, - wayland_vk_wsi_surface->swapchain_buffers, + ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface, + wl_vk_surface->swapchain_buffers, buffer_count); if (ret != TPL_ERROR_NONE) { - TPL_ERR("Failed to get swapchain_buffers. wayland_vk_wsi_surface(%p) twe_surface(%p)", - wayland_vk_wsi_surface, wayland_vk_wsi_surface->twe_surface); - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; + TPL_ERR("Failed to get swapchain_buffers. wl_vk_surface(%p) twe_surface(%p)", + wl_vk_surface, wl_vk_surface->twe_surface); + free(wl_vk_surface->swapchain_buffers); + wl_vk_surface->swapchain_buffers = NULL; twe_display_unlock(wl_vk_display->twe_display); return ret; } for (i = 0; i < *buffer_count; i++) { - if (wayland_vk_wsi_surface->swapchain_buffers[i]) { + if (wl_vk_surface->swapchain_buffers[i]) { TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)", - i, wayland_vk_wsi_surface->swapchain_buffers[i], + i, wl_vk_surface->swapchain_buffers[i], tbm_bo_export(tbm_surface_internal_get_bo( - wayland_vk_wsi_surface->swapchain_buffers[i], 0))); - tbm_surface_internal_ref(wayland_vk_wsi_surface->swapchain_buffers[i]); + wl_vk_surface->swapchain_buffers[i], 0))); + tbm_surface_internal_ref(wl_vk_surface->swapchain_buffers[i]); } } - *buffers = wayland_vk_wsi_surface->swapchain_buffers; + *buffers = wl_vk_surface->swapchain_buffers; twe_display_unlock(wl_vk_display->twe_display); } @@ -1224,34 +1516,34 @@ __cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue, void *data) { tpl_surface_t *surface = NULL; - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_bool_t is_activated = TPL_FALSE; surface = (tpl_surface_t *)data; TPL_CHECK_ON_NULL_RETURN(surface); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; - TPL_CHECK_ON_NULL_RETURN(wayland_vk_wsi_surface); + wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + TPL_CHECK_ON_NULL_RETURN(wl_vk_surface); /* When queue_reset_callback is called, if is_activated is different from * its previous state change the reset flag to TPL_TRUE to get a new buffer * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */ - is_activated = twe_surface_check_activated(wayland_vk_wsi_surface->twe_surface); + is_activated = twe_surface_check_activated(wl_vk_surface->twe_surface); - if (wayland_vk_wsi_surface->is_activated != is_activated) { + if (wl_vk_surface->is_activated != is_activated) { if (is_activated) { TPL_LOG_T("WL_VK", - "[ACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", - wayland_vk_wsi_surface, surface_queue); + "[ACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)", + wl_vk_surface, surface_queue); } else { TPL_LOG_T("WL_VK", - "[DEACTIVATED_CB] wayland_vk_wsi_surface(%p) tbm_queue(%p)", - wayland_vk_wsi_surface, surface_queue); + "[DEACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)", + wl_vk_surface, surface_queue); } - wayland_vk_wsi_surface->is_activated = is_activated; + wl_vk_surface->is_activated = is_activated; } - wayland_vk_wsi_surface->reset = TPL_TRUE; + wl_vk_surface->reset = TPL_TRUE; if (surface->reset_cb) surface->reset_cb(surface->reset_data); @@ -1262,7 +1554,7 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, int width, int height, int buffer_count, int present_mode) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; @@ -1270,78 +1562,78 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, TPL_ASSERT(surface->backend.data); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; - TPL_ASSERT(wayland_vk_wsi_surface); + wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + TPL_ASSERT(wl_vk_surface); wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; TPL_ASSERT(wl_vk_display); - if (wayland_vk_wsi_surface->tbm_queue) { - int old_width = tbm_surface_queue_get_width(wayland_vk_wsi_surface->tbm_queue); - int old_height = tbm_surface_queue_get_height(wayland_vk_wsi_surface->tbm_queue); + if (wl_vk_surface->tbm_queue) { + int old_width = tbm_surface_queue_get_width(wl_vk_surface->tbm_queue); + int old_height = tbm_surface_queue_get_height(wl_vk_surface->tbm_queue); if (old_width != width || old_height != height) { - tbm_surface_queue_reset(wayland_vk_wsi_surface->tbm_queue, + tbm_surface_queue_reset(wl_vk_surface->tbm_queue, width, height, format); TPL_LOG_T("WL_VK", - "[RESIZE] wayland_vk_wsi_surface(%p) tbm_queue(%p), (%d x %d) -> (%d x %d)", - wayland_vk_wsi_surface, wayland_vk_wsi_surface->tbm_queue, + "[RESIZE] wl_vk_surface(%p) tbm_queue(%p), (%d x %d) -> (%d x %d)", + wl_vk_surface, wl_vk_surface->tbm_queue, old_width, old_height, width, height); } - if (wayland_vk_wsi_surface->swapchain_buffers) { + if (wl_vk_surface->swapchain_buffers) { int i; - for (i = 0; i < wayland_vk_wsi_surface->buffer_count; i++) { - if (wayland_vk_wsi_surface->swapchain_buffers[i]) { - TPL_DEBUG("unref tbm_surface(%p)", wayland_vk_wsi_surface->swapchain_buffers[i]); - tbm_surface_internal_unref(wayland_vk_wsi_surface->swapchain_buffers[i]); - wayland_vk_wsi_surface->swapchain_buffers[i] = NULL; + for (i = 0; i < wl_vk_surface->buffer_count; i++) { + if (wl_vk_surface->swapchain_buffers[i]) { + TPL_DEBUG("unref tbm_surface(%p)", wl_vk_surface->swapchain_buffers[i]); + tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]); + wl_vk_surface->swapchain_buffers[i] = NULL; } } - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; + free(wl_vk_surface->swapchain_buffers); + wl_vk_surface->swapchain_buffers = NULL; } - wayland_vk_wsi_surface->buffer_count = - tbm_surface_queue_get_size(wayland_vk_wsi_surface->tbm_queue); - wayland_vk_wsi_surface->reset = TPL_FALSE; + wl_vk_surface->buffer_count = + tbm_surface_queue_get_size(wl_vk_surface->tbm_queue); + wl_vk_surface->reset = TPL_FALSE; - __tpl_util_atomic_inc(&wayland_vk_wsi_surface->swapchain_reference); + __tpl_util_atomic_inc(&wl_vk_surface->swapchain_reference); - TPL_LOG_T("WL_VK", "[REUSE] wayland_vk_wsi_surface(%p) tbm_queue(%p) size(%d)", - wayland_vk_wsi_surface, wayland_vk_wsi_surface->tbm_queue, - wayland_vk_wsi_surface->buffer_count); + TPL_LOG_T("WL_VK", "[REUSE] wl_vk_surface(%p) tbm_queue(%p) size(%d)", + wl_vk_surface, wl_vk_surface->tbm_queue, + wl_vk_surface->buffer_count); return TPL_ERROR_NONE; } - res = twe_surface_create_swapchain(wayland_vk_wsi_surface->twe_surface, + res = twe_surface_create_swapchain(wl_vk_surface->twe_surface, width, height, format, buffer_count, present_mode); if (res != TPL_ERROR_NONE) { TPL_ERR("Failed to create swapchain. twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); return res; } - wayland_vk_wsi_surface->tbm_queue = twe_surface_get_tbm_queue( - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->tbm_queue = twe_surface_get_tbm_queue( + wl_vk_surface->twe_surface); /* Set reset_callback to tbm_queue */ - if (tbm_surface_queue_add_reset_cb(wayland_vk_wsi_surface->tbm_queue, + if (tbm_surface_queue_add_reset_cb(wl_vk_surface->tbm_queue, __cb_tbm_queue_reset_callback, (void *)surface) != TBM_SURFACE_QUEUE_ERROR_NONE) { TPL_ERR("TBM surface queue add reset cb failed!"); - twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); - wayland_vk_wsi_surface->tbm_queue = NULL; + twe_surface_destroy_swapchain(wl_vk_surface->twe_surface); + wl_vk_surface->tbm_queue = NULL; return TPL_ERROR_INVALID_OPERATION; } - wayland_vk_wsi_surface->buffer_count = buffer_count; - wayland_vk_wsi_surface->reset = TPL_FALSE; + wl_vk_surface->buffer_count = buffer_count; + wl_vk_surface->reset = TPL_FALSE; - __tpl_util_atomic_set(&wayland_vk_wsi_surface->swapchain_reference, 1); + __tpl_util_atomic_set(&wl_vk_surface->swapchain_reference, 1); return TPL_ERROR_NONE; } @@ -1349,7 +1641,7 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { - tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wl_vk_surface = NULL; tpl_wl_vk_display_t *wl_vk_display = NULL; tpl_result_t res = TPL_ERROR_NONE; unsigned int ref; @@ -1359,49 +1651,49 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) TPL_ASSERT(surface->display); TPL_ASSERT(surface->display->backend.data); - wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data; wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data; if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) { - ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference); + ref = __tpl_util_atomic_dec(&wl_vk_surface->swapchain_reference); if (ref > 0) { TPL_LOG_T("WL_VK", "This swapchain is still valid. | twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } - if (wayland_vk_wsi_surface->reset) { + if (wl_vk_surface->reset) { TPL_LOG_T("WL_VK", "Since reset is in the TRUE state, it will not be destroyed."); twe_display_unlock(wl_vk_display->twe_display); return TPL_ERROR_NONE; } - if (wayland_vk_wsi_surface->swapchain_buffers) { + if (wl_vk_surface->swapchain_buffers) { int i; - for (i = 0; i < wayland_vk_wsi_surface->buffer_count; i++) { + for (i = 0; i < wl_vk_surface->buffer_count; i++) { TPL_DEBUG("Stop tracking tbm_surface(%p)", - wayland_vk_wsi_surface->swapchain_buffers[i]); - tbm_surface_internal_unref(wayland_vk_wsi_surface->swapchain_buffers[i]); - wayland_vk_wsi_surface->swapchain_buffers[i] = NULL; + wl_vk_surface->swapchain_buffers[i]); + tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]); + wl_vk_surface->swapchain_buffers[i] = NULL; } - free(wayland_vk_wsi_surface->swapchain_buffers); - wayland_vk_wsi_surface->swapchain_buffers = NULL; + free(wl_vk_surface->swapchain_buffers); + wl_vk_surface->swapchain_buffers = NULL; } - res = twe_surface_destroy_swapchain(wayland_vk_wsi_surface->twe_surface); + res = twe_surface_destroy_swapchain(wl_vk_surface->twe_surface); if (res != TPL_ERROR_NONE) { TPL_ERR("Failed to destroy swapchain. twe_surface(%p)", - wayland_vk_wsi_surface->twe_surface); + wl_vk_surface->twe_surface); twe_display_unlock(wl_vk_display->twe_display); return res; } - wayland_vk_wsi_surface->tbm_queue = NULL; + wl_vk_surface->tbm_queue = NULL; twe_display_unlock(wl_vk_display->twe_display); } @@ -1458,3 +1750,101 @@ __tpl_surface_init_backend_wl_vk_wsi_thread(tpl_surface_backend_t *backend) backend->create_swapchain = __tpl_wl_vk_wsi_surface_create_swapchain; backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain; } + +static void +__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) +{ + tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface; + tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display; + + TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)", + wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface); + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) { + wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL; + wl_egl_surface->buffer_cnt--; + + wl_egl_buffer->idx = -1; + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + + wl_display_flush(wl_egl_display->wl_display); + + if (wl_egl_buffer->wl_buffer) { + wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client, + (void *)wl_egl_buffer->wl_buffer); + wl_egl_buffer->wl_buffer = NULL; + } + + if (wl_egl_buffer->buffer_release) { + zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release); + wl_egl_buffer->buffer_release = NULL; + } + + if (wl_egl_buffer->release_fence_fd != -1) { + close(wl_egl_buffer->release_fence_fd); + wl_egl_buffer->release_fence_fd = -1; + } + + if (wl_egl_buffer->waiting_source) { + tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE); + wl_egl_buffer->waiting_source = NULL; + } + + if (wl_egl_buffer->commit_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd); + if (ret == -1) + TPL_ERR("Failed to send commit_sync signal to fd(%d)", + wl_egl_buffer->commit_sync_fd); + close(wl_egl_buffer->commit_sync_fd); + wl_egl_buffer->commit_sync_fd = -1; + } + + if (wl_egl_buffer->presentation_sync_fd != -1) { + int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd); + if (ret == -1) + TPL_ERR("Failed to send presentation_sync signal to fd(%d)", + wl_egl_buffer->presentation_sync_fd); + close(wl_egl_buffer->presentation_sync_fd); + wl_egl_buffer->presentation_sync_fd = -1; + } + + if (wl_egl_buffer->rects) { + free(wl_egl_buffer->rects); + wl_egl_buffer->rects = NULL; + wl_egl_buffer->num_rects = 0; + } + + wl_egl_buffer->tbm_surface = NULL; + wl_egl_buffer->bo_name = -1; + + free(wl_egl_buffer); +} + +static int +_get_tbm_surface_bo_name(tbm_surface_h tbm_surface) +{ + return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)); +} + +static void +_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) +{ + int idx = 0; + + tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)", + wl_egl_surface, wl_egl_surface->buffer_cnt); + for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) { + tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx]; + if (wl_egl_buffer) { + TPL_INFO("[INFO]", + "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)", + idx, wl_egl_buffer, wl_egl_buffer->tbm_surface, + wl_egl_buffer->bo_name, + status_to_string[wl_egl_buffer->status]); + } + } + tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); +} -- 2.7.4