_thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
static tpl_result_t
_thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
+static void
+_thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
+ tpl_wl_vk_buffer_t *wl_vk_buffer);
static tpl_bool_t
_check_native_handle_is_wl_display(tpl_handle_t native_dpy)
tpl_wl_vk_surface_t *wl_vk_surface = NULL;
tpl_wl_vk_display_t *wl_vk_display = NULL;
tpl_wl_vk_swapchain_t *swapchain = NULL;
- tpl_result_t res = TPL_ERROR_NONE;
TPL_ASSERT(surface);
TPL_ASSERT(surface->display);
tpl_wl_vk_swapchain_t *swapchain = NULL;
tpl_wl_vk_surface_t *wl_vk_surface = NULL;
tpl_wl_vk_display_t *wl_vk_display = NULL;
- tpl_result_t res = TPL_ERROR_NONE;
- unsigned int ref;
TPL_ASSERT(surface);
TPL_ASSERT(surface->display);
TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
wl_vk_display->wl_tbm_client, tbm_surface);
} else {
- TPL_LOG_T("WL_EGL",
+ TPL_LOG_T("WL_VK",
"[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
}
return TPL_ERROR_NONE;
}
+static void
+__cb_buffer_fenced_release(void *data,
+ struct zwp_linux_buffer_release_v1 *release,
+ int32_t fence)
+{
+ tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
+ tbm_surface_h tbm_surface = NULL;
+
+ TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
+
+ tbm_surface = wl_vk_buffer->tbm_surface;
+
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
+ tpl_wl_vk_swapchain_t *swapchain = NULL;
+
+ if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
+ TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
+ tbm_surface_internal_unref(tbm_surface);
+ return;
+ }
+
+ swapchain = wl_vk_surface->swapchain;
+
+ tpl_gmutex_lock(&wl_vk_buffer->mutex);
+ if (wl_vk_buffer->status == COMMITTED) {
+ tbm_surface_queue_error_e tsq_err;
+
+ zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
+ wl_vk_buffer->buffer_release = NULL;
+
+ wl_vk_buffer->release_fence_fd = fence;
+ wl_vk_buffer->status = RELEASED;
+
+ TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
+ wl_vk_buffer->bo_name,
+ fence);
+ TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
+ wl_vk_buffer->bo_name);
+
+ TPL_LOG_T("WL_VK",
+ "[FENCED_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ wl_vk_buffer, tbm_surface,
+ wl_vk_buffer->bo_name,
+ fence);
+
+ tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+ tbm_surface_internal_unref(tbm_surface);
+ }
+
+ tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+ }
+}
+
+static void
+__cb_buffer_immediate_release(void *data,
+ struct zwp_linux_buffer_release_v1 *release)
+{
+ tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
+ tbm_surface_h tbm_surface = NULL;
+
+ TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer);
+
+ tbm_surface = wl_vk_buffer->tbm_surface;
+
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
+ tpl_wl_vk_swapchain_t *swapchain = NULL;
+
+ if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
+ TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
+ tbm_surface_internal_unref(tbm_surface);
+ return;
+ }
+
+ swapchain = wl_vk_surface->swapchain;
+
+ tpl_gmutex_lock(&wl_vk_buffer->mutex);
+ if (wl_vk_buffer->status == COMMITTED) {
+ tbm_surface_queue_error_e tsq_err;
+
+ zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
+ wl_vk_buffer->buffer_release = NULL;
+
+ wl_vk_buffer->release_fence_fd = -1;
+ wl_vk_buffer->status = RELEASED;
+
+ TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+ TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ TPL_LOG_T("WL_VK",
+ "[IMMEDIATE_RELEASE] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_vk_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+ tbm_surface_internal_unref(tbm_surface);
+ }
+
+ tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+ }
+}
+
+static const struct zwp_linux_buffer_release_v1_listener zwp_release_listner = {
+ __cb_buffer_fenced_release,
+ __cb_buffer_immediate_release,
+};
+
+static void
+__cb_wl_buffer_release(void *data, struct wl_proxy *wl_buffer)
+{
+ tpl_wl_vk_buffer_t *wl_vk_buffer = (tpl_wl_vk_buffer_t *)data;
+ tbm_surface_h tbm_surface = NULL;
+
+ TPL_CHECK_ON_NULL_RETURN(wl_vk_buffer)
+
+ tbm_surface = wl_vk_buffer->tbm_surface;
+
+ if (tbm_surface_internal_is_valid(tbm_surface)) {
+ tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
+ tpl_wl_vk_swapchain_t *swapchain = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+
+ if (wl_vk_surface == NULL || wl_vk_surface->swapchain == NULL) {
+ TPL_ERR("Invalid wl_vk_surface(%p)", wl_vk_surface);
+ tbm_surface_internal_unref(tbm_surface);
+ return;
+ }
+
+ swapchain = wl_vk_surface->swapchain;
+
+ tpl_gmutex_lock(&wl_vk_buffer->mutex);
+
+ if (wl_vk_buffer->status == COMMITTED) {
+
+ tsq_err = tbm_surface_queue_release(swapchain->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
+
+ wl_vk_buffer->status = RELEASED;
+
+ TRACE_MARK("[RELEASE] BO(%d)", wl_vk_buffer->bo_name);
+ TRACE_ASYNC_END(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
+ wl_vk_buffer->bo_name);
+
+ TPL_LOG_T("WL_VK", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_vk_buffer->wl_buffer, tbm_surface,
+ wl_vk_buffer->bo_name);
+
+ tbm_surface_internal_unref(tbm_surface);
+ }
+
+ tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+ } else {
+ TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
+ }
+}
+
+static const struct wl_buffer_listener wl_buffer_release_listener = {
+ (void *)__cb_wl_buffer_release,
+};
+
+static void
+__cb_tdm_client_vblank(tdm_client_vblank *vblank, tdm_error error,
+ unsigned int sequence, unsigned int tv_sec,
+ unsigned int tv_usec, void *user_data)
+{
+ tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)user_data;
+ tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
+
+ TRACE_ASYNC_END((int)wl_vk_surface, "WAIT_VBLANK");
+ TPL_DEBUG("[VBLANK] wl_vk_surface(%p)", wl_vk_surface);
+
+ if (error == TDM_ERROR_TIMEOUT)
+ TPL_WARN("[TDM_ERROR_TIMEOUT] It will keep going. wl_vk_surface(%p)",
+ wl_vk_surface);
+
+ wl_vk_surface->vblank_done = TPL_TRUE;
+
+ tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+ wl_vk_buffer = (tpl_wl_vk_buffer_t *)__tpl_list_pop_front(
+ wl_vk_surface->vblank_waiting_buffers,
+ NULL);
+ if (wl_vk_buffer)
+ _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
+ tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+}
+
+static tpl_result_t
+_thread_surface_vblank_wait(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+ tdm_error tdm_err = TDM_ERROR_NONE;
+ tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
+
+ if (wl_vk_surface->vblank == NULL) {
+ wl_vk_surface->vblank =
+ _thread_create_tdm_client_vblank(wl_vk_display->tdm_client);
+ if (!wl_vk_surface->vblank) {
+ TPL_WARN("Failed to create vblank. wl_vk_surface(%p)",
+ wl_vk_surface);
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ tdm_err = tdm_client_vblank_wait(wl_vk_surface->vblank,
+ wl_vk_surface->post_interval,
+ __cb_tdm_client_vblank,
+ (void *)wl_vk_surface);
+
+ if (tdm_err == TDM_ERROR_NONE) {
+ wl_vk_surface->vblank_done = TPL_FALSE;
+ TRACE_ASYNC_BEGIN((int)wl_vk_surface, "WAIT_VBLANK");
+ } else {
+ TPL_ERR("Failed to tdm_client_vblank_wait. tdm_err(%d)", tdm_err);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ return TPL_ERROR_NONE;
+}
+
+static void
+_thread_wl_surface_commit(tpl_wl_vk_surface_t *wl_vk_surface,
+ tpl_wl_vk_buffer_t *wl_vk_buffer)
+{
+ tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
+ struct wl_surface *wl_surface = wl_vk_surface->wl_surface;
+ uint32_t version;
+
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
+ "wl_vk_buffer sould be not NULL");
+
+ if (wl_vk_buffer->wl_buffer == NULL) {
+ wl_vk_buffer->wl_buffer =
+ (struct wl_proxy *)wayland_tbm_client_create_buffer(
+ wl_vk_display->wl_tbm_client,
+ wl_vk_buffer->tbm_surface);
+ }
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer->wl_buffer != NULL,
+ "[FATAL] Failed to create wl_buffer");
+
+ version = wl_proxy_get_version((struct wl_proxy *)wl_surface);
+
+ wl_surface_attach(wl_surface, (void *)wl_vk_buffer->wl_buffer,
+ wl_vk_buffer->dx, wl_vk_buffer->dy);
+
+ if (wl_vk_buffer->num_rects < 1 || wl_vk_buffer->rects == NULL) {
+ if (version < 4) {
+ wl_surface_damage(wl_surface,
+ wl_vk_buffer->dx, wl_vk_buffer->dy,
+ wl_vk_buffer->width, wl_vk_buffer->height);
+ } else {
+ wl_surface_damage_buffer(wl_surface,
+ 0, 0,
+ wl_vk_buffer->width, wl_vk_buffer->height);
+ }
+ } else {
+ int i;
+ for (i = 0; i < wl_vk_buffer->num_rects; i++) {
+ int inverted_y =
+ wl_vk_buffer->height - (wl_vk_buffer->rects[i * 4 + 1] +
+ wl_vk_buffer->rects[i * 4 + 3]);
+ if (version < 4) {
+ wl_surface_damage(wl_surface,
+ wl_vk_buffer->rects[i * 4 + 0],
+ inverted_y,
+ wl_vk_buffer->rects[i * 4 + 2],
+ wl_vk_buffer->rects[i * 4 + 3]);
+ } else {
+ wl_surface_damage_buffer(wl_surface,
+ wl_vk_buffer->rects[i * 4 + 0],
+ inverted_y,
+ wl_vk_buffer->rects[i * 4 + 2],
+ wl_vk_buffer->rects[i * 4 + 3]);
+ }
+ }
+ }
+
+ if (wl_vk_display->use_explicit_sync &&
+ wl_vk_surface->surface_sync) {
+
+ zwp_linux_surface_synchronization_v1_set_acquire_fence(wl_vk_surface->surface_sync,
+ wl_vk_buffer->acquire_fence_fd);
+ TPL_DEBUG("[SET_ACQUIRE_FENCE] wl_vk_surface(%p) tbm_surface(%p) acquire_fence(%d)",
+ wl_vk_surface, wl_vk_buffer->tbm_surface, wl_vk_buffer->acquire_fence_fd);
+ close(wl_vk_buffer->acquire_fence_fd);
+ wl_vk_buffer->acquire_fence_fd = -1;
+
+ wl_vk_buffer->buffer_release =
+ zwp_linux_surface_synchronization_v1_get_release(wl_vk_surface->surface_sync);
+ if (!wl_vk_buffer->buffer_release) {
+ TPL_ERR("Failed to get buffer_release. wl_vk_surface(%p)", wl_vk_surface);
+ } else {
+ zwp_linux_buffer_release_v1_add_listener(
+ wl_vk_buffer->buffer_release, &zwp_release_listner, wl_vk_buffer);
+ TPL_DEBUG("add explicit_sync_release_listener.");
+ }
+ } else {
+ wl_buffer_add_listener((void *)wl_vk_buffer->wl_buffer,
+ &wl_buffer_release_listener, wl_vk_buffer);
+ }
+
+ wl_surface_commit(wl_surface);
+
+ wl_display_flush(wl_vk_display->wl_display);
+
+ TRACE_ASYNC_BEGIN(wl_vk_buffer->bo_name, "[COMMIT ~ RELEASE] BO(%d)",
+ wl_vk_buffer->bo_name);
+
+ tpl_gmutex_lock(&wl_vk_buffer->mutex);
+
+ wl_vk_buffer->need_to_commit = TPL_FALSE;
+ wl_vk_buffer->status = COMMITTED;
+
+ tpl_gcond_signal(&wl_vk_buffer->cond);
+
+ tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+ TPL_LOG_T("WL_VK",
+ "[COMMIT] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface,
+ wl_vk_buffer->bo_name);
+
+ if (wl_vk_display->use_wait_vblank &&
+ _thread_surface_vblank_wait(wl_vk_surface) != TPL_ERROR_NONE)
+ TPL_ERR("Failed to set wait vblank.");
+}
+
tpl_bool_t
__tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
{