tpl_wl_vk_surface_t *wl_vk_surface;
};
-static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(
- tpl_surface_t *surface);
+static void
+_print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface);
+static int
+_get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
+static void
+__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer);
+static tpl_result_t
+_thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
+static void
+_thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
static tpl_bool_t
_check_native_handle_is_wl_display(tpl_handle_t native_dpy)
/* If an error occurs in tdm_client_handle_events, it cannot be recovered.
* When tdm_source is no longer available due to an unexpected situation,
- * wl_egl_thread must remove it from the thread and destroy it.
+ * wl_vk_thread must remove it from the thread and destroy it.
* In that case, tdm_vblank can no longer be used for surfaces and displays
* that used this tdm_source. */
if (tdm_err != TDM_ERROR_NONE) {
if (wl_vk_display->explicit_sync) {
wl_proxy_set_queue((struct wl_proxy *)wl_vk_display->explicit_sync,
wl_vk_display->ev_queue);
- TPL_LOG_T("WL_EGL", "zwp_linux_explicit_synchronization_v1(%p) init.",
+ TPL_LOG_T("WL_VK", "zwp_linux_explicit_synchronization_v1(%p) init.",
wl_vk_display->explicit_sync);
}
if (wl_vk_display->wl_initialized)
_thread_wl_display_fini(wl_vk_display);
- TPL_LOG_T("WL_EGL", "finalize| wl_vk_display(%p) tpl_gsource(%p)",
+ TPL_LOG_T("WL_VK", "finalize| wl_vk_display(%p) tpl_gsource(%p)",
wl_vk_display, gsource);
return;
tpl_gmutex_init(&wl_vk_display->wl_event_mutex);
/* Create gthread */
- wl_vk_display->thread = tpl_gthread_create("wl_egl_thread",
+ wl_vk_display->thread = tpl_gthread_create("wl_vk_thread",
(tpl_gthread_func)_thread_init,
(void *)wl_vk_display);
if (!wl_vk_display->thread) {
- TPL_ERR("Failed to create wl_egl_thread");
+ TPL_ERR("Failed to create wl_vk_thread");
goto free_display;
}
tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
TPL_DEBUG("wl_vk_surface(%p) queue creation message received!",
wl_vk_surface);
-
+ if (_thread_swapchain_create_tbm_queue(wl_vk_surface)
+ != TPL_ERROR_NONE) {
+ TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
+ wl_vk_surface);
+ }
tpl_gcond_signal(&wl_vk_surface->surf_cond);
tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
} else if (message == 3) { /* Acquirable message */
tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
TPL_DEBUG("wl_vk_surface(%p) swapchain destroy message received!",
wl_vk_surface);
-
+ _thread_swapchain_destroy_tbm_queue(wl_vk_surface);
tpl_gcond_signal(&wl_vk_surface->surf_cond);
tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
}
surface->backend.data = NULL;
}
+static void
+__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
+ void *data)
+{
+ tpl_wl_vk_surface_t *wl_vk_surface = NULL;
+ tpl_wl_vk_display_t *wl_vk_display = NULL;
+ tpl_wl_vk_swapchain_t *swapchain = NULL;
+ tpl_surface_t *surface = NULL;
+ tpl_bool_t is_activated = TPL_FALSE;
+ int width, height;
+
+ wl_vk_surface = (tpl_wl_vk_surface_t *)data;
+ TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
+
+ wl_vk_display = wl_vk_surface->wl_vk_display;
+ TPL_CHECK_ON_NULL_RETURN(wl_vk_display);
+
+ surface = wl_vk_surface->tpl_surface;
+ TPL_CHECK_ON_NULL_RETURN(surface);
+
+ swapchain = wl_vk_surface->swapchain;
+ TPL_CHECK_ON_NULL_RETURN(swapchain);
+
+ /* When the queue is resized, change the reset flag to TPL_TRUE to reflect
+ * the changed window size at the next frame. */
+ width = tbm_surface_queue_get_width(tbm_queue);
+ height = tbm_surface_queue_get_height(tbm_queue);
+ if (surface->width != width || surface->height != height) {
+ TPL_INFO("[QUEUE_RESIZE]",
+ "wl_vk_surface(%p) tbm_queue(%p) (%dx%d) -> (%dx%d)",
+ wl_vk_surface, tbm_queue,
+ surface->width, surface->height, width, height);
+ }
+
+ /* When queue_reset_callback is called, if is_activated is different from
+ * its previous state change the reset flag to TPL_TRUE to get a new buffer
+ * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
+ is_activated = wayland_tbm_client_queue_check_activate(wl_vk_display->wl_tbm_client,
+ swapchain->tbm_queue);
+ if (wl_vk_surface->is_activated != is_activated) {
+ if (is_activated) {
+ TPL_INFO("[ACTIVATED]",
+ "wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
+ wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
+ } else {
+ TPL_LOG_T("[DEACTIVATED]",
+ " wl_vk_surface(%p) wl_surface(%p) tbm_queue(%p)",
+ wl_vk_surface, wl_vk_surface->wl_surface, tbm_queue);
+ }
+ }
+
+ wl_vk_surface->reset = TPL_TRUE;
+
+ if (surface->reset_cb)
+ surface->reset_cb(surface->reset_data);
+}
+
+static void
+__cb_tbm_queue_acquirable_callback(tbm_surface_queue_h tbm_queue,
+ void *data)
+{
+ TPL_IGNORE(tbm_queue);
+
+ tpl_wl_vk_surface_t *wl_vk_surface = (tpl_wl_vk_surface_t *)data;
+ TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
+
+ tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+
+ tpl_gsource_send_message(wl_vk_surface->surf_source, 3);
+
+ tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+}
+
static tpl_result_t
-__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface,
- tbm_surface_h tbm_surface,
- int num_rects, const int *rects,
- tbm_fd sync_fence)
+_thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+ TPL_ASSERT (wl_vk_surface);
+
+ tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
+ tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
+ tbm_surface_queue_h tbm_queue = NULL;
+ tbm_bufmgr bufmgr = NULL;
+ unsigned int capability;
+
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
+ TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+
+ if (swapchain->properties.buffer_count < wl_vk_display->min_buffer) {
+ TPL_ERR("buffer count(%d) must be higher than (%d)",
+ swapchain->properties.buffer_count,
+ wl_vk_display->min_buffer);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (swapchain->properties.buffer_count > wl_vk_display->max_buffer) {
+ TPL_ERR("buffer count(%d) must be lower than (%d)",
+ swapchain->properties.buffer_count,
+ wl_vk_display->max_buffer);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!(swapchain->properties.present_mode & wl_vk_display->present_modes)) {
+ TPL_ERR("Unsupported present_mode(%d)",
+ swapchain->properties.present_mode);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (swapchain->tbm_queue) {
+ int old_width = tbm_surface_queue_get_width(swapchain->tbm_queue);
+ int old_height = tbm_surface_queue_get_height(swapchain->tbm_queue);
+
+ if (swapchain->swapchain_buffers) {
+ int i;
+ for (i = 0; i < swapchain->properties.buffer_count; i++) {
+ if (swapchain->swapchain_buffers[i]) {
+ TPL_DEBUG("unref tbm_surface(%p)", swapchain->swapchain_buffers[i]);
+ tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
+ swapchain->swapchain_buffers[i] = NULL;
+ }
+ }
+
+ free(swapchain->swapchain_buffers);
+ swapchain->swapchain_buffers = NULL;
+ }
+
+ if (old_width != swapchain->properties.width ||
+ old_height != swapchain->properties.height) {
+ tbm_surface_queue_reset(swapchain->tbm_queue,
+ swapchain->properties.width,
+ swapchain->properties.height,
+ swapchain->properties.format);
+ TPL_INFO("[RESIZE]",
+ "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) (%dx%d)->(%dx%d)",
+ wl_vk_surface, swapchain, swapchain->tbm_queue,
+ old_width, old_height,
+ swapchain->properties.width,
+ swapchain->properties.height);
+ }
+
+ swapchain->properties.buffer_count =
+ tbm_surface_queue_get_size(swapchain->tbm_queue);
+
+ wl_vk_surface->reset = TPL_FALSE;
+
+ __tpl_util_atomic_inc(&swapchain->ref_cnt);
+
+ TPL_INFO("[SWAPCHAIN_REUSE]",
+ "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p) buffer_count(%d)",
+ wl_vk_surface, swapchain, swapchain->tbm_queue,
+ swapchain->properties.buffer_count);
+
+ return TPL_ERROR_NONE;
+ }
+
+ bufmgr = tbm_bufmgr_init(-1);
+ capability = tbm_bufmgr_get_capability(bufmgr);
+ tbm_bufmgr_deinit(bufmgr);
+
+ if (capability & TBM_BUFMGR_CAPABILITY_TILED_MEMORY) {
+ tbm_queue = wayland_tbm_client_create_surface_queue_tiled(
+ wl_vk_display->wl_tbm_client,
+ wl_vk_surface->wl_surface,
+ swapchain->properties.buffer_count,
+ swapchain->properties.width,
+ swapchain->properties.height,
+ TBM_FORMAT_ARGB8888);
+ } else {
+ tbm_queue = wayland_tbm_client_create_surface_queue(
+ wl_vk_display->wl_tbm_client,
+ wl_vk_surface->wl_surface,
+ swapchain->properties.buffer_count,
+ swapchain->properties.width,
+ swapchain->properties.height,
+ TBM_FORMAT_ARGB8888);
+ }
+
+ if (!tbm_queue) {
+ TPL_ERR("Failed to create tbm_queue. wl_vk_surface(%p)",
+ wl_vk_surface);
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+
+ if (tbm_surface_queue_set_modes(
+ tbm_queue, TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
+ TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (tbm_surface_queue_add_reset_cb(
+ tbm_queue,
+ __cb_tbm_queue_reset_callback,
+ (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register reset callback to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (tbm_surface_queue_add_acquirable_cb(
+ tbm_queue,
+ __cb_tbm_queue_acquirable_callback,
+ (void *)wl_vk_surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to register acquirable callback to tbm_surface_queue(%p)",
+ tbm_queue);
+ tbm_surface_queue_destroy(tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ swapchain->tbm_queue = tbm_queue;
+
+ TPL_INFO("[TBM_QUEUE_CREATED]",
+ "wl_vk_surface(%p) wl_vk_swapchain(%p) tbm_queue(%p)",
+ wl_vk_surface, swapchain, tbm_queue);
+
+ return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface,
+ tbm_format format, int width,
+ int height, int buffer_count, int present_mode)
+{
+ tpl_wl_vk_surface_t *wl_vk_surface = NULL;
+ tpl_wl_vk_display_t *wl_vk_display = NULL;
+ tpl_wl_vk_swapchain_t *swapchain = NULL;
+ tpl_result_t res = TPL_ERROR_NONE;
+
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
+
+ wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
+
+ wl_vk_display = (tpl_wl_vk_display_t *)
+ surface->display->backend.data;
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
+
+ swapchain = wl_vk_surface->swapchain;
+
+ if (swapchain == NULL) {
+ swapchain =
+ (tpl_wl_vk_swapchain_t *)calloc(1, sizeof(tpl_wl_vk_swapchain_t));
+ TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
+ swapchain->tbm_queue = NULL;
+ }
+
+ swapchain->properties.buffer_count = buffer_count;
+ swapchain->properties.width = width;
+ swapchain->properties.height = height;
+ swapchain->properties.present_mode = present_mode;
+ swapchain->wl_vk_surface = wl_vk_surface;
+
+ wl_vk_surface->swapchain = swapchain;
+
+ tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+ /* send swapchain create tbm_queue message */
+ tpl_gsource_send_message(wl_vk_surface->surf_source, 2);
+ tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
+ tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(
+ swapchain->tbm_queue != NULL,
+ "[CRITICAL FAIL] Failed to create tbm_surface_queue");
+
+ wl_vk_surface->reset = TPL_FALSE;
+
+ __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
+
+ return TPL_ERROR_NONE;
+}
+
+static void
+_thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+ TPL_ASSERT(wl_vk_surface);
+
+ tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
+
+ TPL_CHECK_ON_NULL_RETURN(swapchain);
+
+ if (swapchain->tbm_queue) {
+ TPL_INFO("[TBM_QUEUE_DESTROY]",
+ "wl_vk_surface(%p) swapchain(%p) tbm_queue(%p)",
+ wl_vk_surface, swapchain, swapchain->tbm_queue);
+ tbm_surface_queue_destroy(swapchain->tbm_queue);
+ swapchain->tbm_queue = NULL;
+ }
+}
+
+static tpl_result_t
+__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface)
{
+ tpl_wl_vk_swapchain_t *swapchain = NULL;
+ tpl_wl_vk_surface_t *wl_vk_surface = NULL;
+ tpl_wl_vk_display_t *wl_vk_display = NULL;
+ tpl_result_t res = TPL_ERROR_NONE;
+ unsigned int ref;
+
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
+
+ wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_surface, TPL_ERROR_INVALID_PARAMETER);
+
+ wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_display, TPL_ERROR_INVALID_PARAMETER);
+
+ swapchain = wl_vk_surface->swapchain;
+ if (!swapchain) {
+ TPL_ERR("wl_vk_surface(%p)->swapchain is NULL. already destroyed.",
+ wl_vk_surface);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
+ TPL_INFO("[DESTROY_SWAPCHAIN]",
+ "wl_vk_surface(%p) swapchain(%p) still valid.",
+ wl_vk_surface, swapchain);
+ return TPL_ERROR_NONE;
+ }
+
+ TPL_INFO("[DESTROY_SWAPCHAIN][BEGIN]",
+ "wl_vk_surface(%p) swapchain(%p)",
+ wl_vk_surface, wl_vk_surface->swapchain);
+
+ if (swapchain->swapchain_buffers) {
+ for (int i = 0; i < swapchain->properties.buffer_count; i++) {
+ if (swapchain->swapchain_buffers[i]) {
+ TPL_DEBUG("Stop tracking tbm_surface(%p)",
+ swapchain->swapchain_buffers[i]);
+ tbm_surface_internal_unref(swapchain->swapchain_buffers[i]);
+ swapchain->swapchain_buffers[i] = NULL;
+ }
+ }
+
+ free(swapchain->swapchain_buffers);
+ swapchain->swapchain_buffers = NULL;
+ }
+
+ _tpl_wl_vk_surface_buffer_clear(wl_vk_surface);
+
+ tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
+ tpl_gsource_send_message(wl_vk_surface->surf_source, 4);
+ tpl_gcond_wait(&wl_vk_surface->surf_cond, &wl_vk_surface->surf_mutex);
+ tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
+
+ _print_buffer_lists(wl_vk_surface);
+
+ free(swapchain);
+ wl_vk_surface->swapchain = NULL;
+ return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface,
+ tbm_surface_h **buffers,
+ int *buffer_count)
+{
TPL_ASSERT(surface);
+ TPL_ASSERT(surface->backend.data);
TPL_ASSERT(surface->display);
- TPL_ASSERT(surface->display->native_handle);
- TPL_ASSERT(tbm_surface);
+ TPL_ASSERT(surface->display->backend.data);
tpl_wl_vk_surface_t *wl_vk_surface =
- (tpl_wl_vk_surface_t *) surface->backend.data;
- tbm_surface_queue_error_e tsq_err;
+ (tpl_wl_vk_surface_t *)surface->backend.data;
+ tpl_wl_vk_display_t *wl_vk_display =
+ (tpl_wl_vk_display_t *)surface->display->backend.data;
+ tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
+ tpl_result_t ret = TPL_ERROR_NONE;
+ int i;
- if (!tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Failed to enqueue tbm_surface(%p) Invalid value.",
- tbm_surface);
- return TPL_ERROR_INVALID_PARAMETER;
+ TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+ TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
+
+ tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
+
+ if (!buffers) {
+ *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
+ tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+ return TPL_ERROR_NONE;
+ }
+
+ swapchain->swapchain_buffers = (tbm_surface_h *)calloc(
+ *buffer_count,
+ sizeof(tbm_surface_h));
+ if (!swapchain->swapchain_buffers) {
+ TPL_ERR("Failed to allocate swapchain_buffers. buffer_count(%d)",
+ *buffer_count);
+ tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+ return TPL_ERROR_OUT_OF_MEMORY;
+ }
+
+ ret = wayland_tbm_client_queue_get_surfaces(wl_vk_display->wl_tbm_client,
+ swapchain->tbm_queue,
+ swapchain->swapchain_buffers,
+ buffer_count);
+ if (!ret) {
+ TPL_ERR("Failed to get buffers from wl_tbm_client(%p) tbm_queue(%p)",
+ wl_vk_display->wl_tbm_client, swapchain->tbm_queue);
+ free(swapchain->swapchain_buffers);
+ swapchain->swapchain_buffers = NULL;
+ tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ for (i = 0; i < *buffer_count; i++) {
+ if (swapchain->swapchain_buffers[i]) {
+ TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)",
+ i, swapchain->swapchain_buffers[i],
+ _get_tbm_surface_bo_name(swapchain->swapchain_buffers[i]));
+ tbm_surface_internal_ref(swapchain->swapchain_buffers[i]);
+ }
}
+ *buffers = swapchain->swapchain_buffers;
+
+ tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
+
+ return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface,
+ int num_rects, const int *rects,
+ tbm_fd sync_fence)
+{
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->backend.data);
+
+ tpl_wl_vk_surface_t *wl_vk_surface =
+ (tpl_wl_vk_surface_t *) surface->backend.data;
+ tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ int bo_name = -1;
+
+ TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+ TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
+ TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
+ TPL_ERROR_INVALID_PARAMETER);
+
+ bo_name = _get_tbm_surface_bo_name(tbm_surface);
+
/* If there are received region information,
* save it to buf_info in tbm_surface user_data using below API. */
if (num_rects && rects) {
return tbm_surface;
}
-static tpl_result_t
-__tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface,
- tbm_surface_h **buffers,
- int *buffer_count)
-{
- tpl_wl_vk_surface_t *wl_vk_surface = NULL;
- tpl_wl_vk_display_t *wl_vk_display = NULL;
- int i;
- tpl_result_t ret = TPL_ERROR_NONE;
-
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
- TPL_ASSERT(surface->display);
- TPL_ASSERT(surface->display->backend.data);
- TPL_ASSERT(buffers);
- TPL_ASSERT(buffer_count);
-
- wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
- wl_vk_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
-
- if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) {
- ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface,
- NULL, buffer_count);
- if (ret != TPL_ERROR_NONE) {
- TPL_ERR("Failed to get buffer_count. twe_surface(%p)",
- wl_vk_surface->twe_surface);
- twe_display_unlock(wl_vk_display->twe_display);
- return ret;
- }
-
- wl_vk_surface->swapchain_buffers = (tbm_surface_h *)calloc(
- *buffer_count,
- sizeof(tbm_surface_h));
- if (!wl_vk_surface->swapchain_buffers) {
- TPL_ERR("Failed to allocate memory for buffers.");
- twe_display_unlock(wl_vk_display->twe_display);
- return TPL_ERROR_OUT_OF_MEMORY;
- }
-
- ret = twe_surface_get_swapchain_buffers(wl_vk_surface->twe_surface,
- wl_vk_surface->swapchain_buffers,
- buffer_count);
- if (ret != TPL_ERROR_NONE) {
- TPL_ERR("Failed to get swapchain_buffers. wl_vk_surface(%p) twe_surface(%p)",
- wl_vk_surface, wl_vk_surface->twe_surface);
- free(wl_vk_surface->swapchain_buffers);
- wl_vk_surface->swapchain_buffers = NULL;
- twe_display_unlock(wl_vk_display->twe_display);
- return ret;
- }
-
- for (i = 0; i < *buffer_count; i++) {
- if (wl_vk_surface->swapchain_buffers[i]) {
- TPL_DEBUG("swapchain_buffers[%d] = tbm_surface(%p) bo(%d)",
- i, wl_vk_surface->swapchain_buffers[i],
- tbm_bo_export(tbm_surface_internal_get_bo(
- wl_vk_surface->swapchain_buffers[i], 0)));
- tbm_surface_internal_ref(wl_vk_surface->swapchain_buffers[i]);
- }
- }
-
- *buffers = wl_vk_surface->swapchain_buffers;
-
- twe_display_unlock(wl_vk_display->twe_display);
- }
-
- return TPL_ERROR_NONE;
-}
-
-static void
-__cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue,
- void *data)
-{
- tpl_surface_t *surface = NULL;
- tpl_wl_vk_surface_t *wl_vk_surface = NULL;
- tpl_bool_t is_activated = TPL_FALSE;
-
- surface = (tpl_surface_t *)data;
- TPL_CHECK_ON_NULL_RETURN(surface);
-
- wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
- TPL_CHECK_ON_NULL_RETURN(wl_vk_surface);
-
- /* When queue_reset_callback is called, if is_activated is different from
- * its previous state change the reset flag to TPL_TRUE to get a new buffer
- * with the changed state(ACTIVATED/DEACTIVATED) at the next frame. */
- is_activated = twe_surface_check_activated(wl_vk_surface->twe_surface);
-
- if (wl_vk_surface->is_activated != is_activated) {
- if (is_activated) {
- TPL_LOG_T("WL_VK",
- "[ACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)",
- wl_vk_surface, surface_queue);
- } else {
- TPL_LOG_T("WL_VK",
- "[DEACTIVATED_CB] wl_vk_surface(%p) tbm_queue(%p)",
- wl_vk_surface, surface_queue);
- }
- wl_vk_surface->is_activated = is_activated;
- }
-
- wl_vk_surface->reset = TPL_TRUE;
-
- if (surface->reset_cb)
- surface->reset_cb(surface->reset_data);
-}
-
-static tpl_result_t
-__tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface,
- tbm_format format, int width,
- int height, int buffer_count, int present_mode)
-{
- tpl_wl_vk_surface_t *wl_vk_surface = NULL;
- tpl_wl_vk_display_t *wl_vk_display = NULL;
- tpl_result_t res = TPL_ERROR_NONE;
-
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
- TPL_ASSERT(surface->display);
-
- wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
- TPL_ASSERT(wl_vk_surface);
-
- wl_vk_display = (tpl_wl_vk_display_t *)
- surface->display->backend.data;
- TPL_ASSERT(wl_vk_display);
-
- if (wl_vk_surface->tbm_queue) {
- int old_width = tbm_surface_queue_get_width(wl_vk_surface->tbm_queue);
- int old_height = tbm_surface_queue_get_height(wl_vk_surface->tbm_queue);
-
- if (old_width != width || old_height != height) {
- tbm_surface_queue_reset(wl_vk_surface->tbm_queue,
- width, height, format);
- TPL_LOG_T("WL_VK",
- "[RESIZE] wl_vk_surface(%p) tbm_queue(%p), (%d x %d) -> (%d x %d)",
- wl_vk_surface, wl_vk_surface->tbm_queue,
- old_width, old_height, width, height);
- }
-
- if (wl_vk_surface->swapchain_buffers) {
- int i;
- for (i = 0; i < wl_vk_surface->buffer_count; i++) {
- if (wl_vk_surface->swapchain_buffers[i]) {
- TPL_DEBUG("unref tbm_surface(%p)", wl_vk_surface->swapchain_buffers[i]);
- tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]);
- wl_vk_surface->swapchain_buffers[i] = NULL;
- }
- }
-
- free(wl_vk_surface->swapchain_buffers);
- wl_vk_surface->swapchain_buffers = NULL;
- }
-
- wl_vk_surface->buffer_count =
- tbm_surface_queue_get_size(wl_vk_surface->tbm_queue);
- wl_vk_surface->reset = TPL_FALSE;
-
- __tpl_util_atomic_inc(&wl_vk_surface->swapchain_reference);
-
- TPL_LOG_T("WL_VK", "[REUSE] wl_vk_surface(%p) tbm_queue(%p) size(%d)",
- wl_vk_surface, wl_vk_surface->tbm_queue,
- wl_vk_surface->buffer_count);
- return TPL_ERROR_NONE;
- }
-
- res = twe_surface_create_swapchain(wl_vk_surface->twe_surface,
- width, height, format,
- buffer_count, present_mode);
- if (res != TPL_ERROR_NONE) {
- TPL_ERR("Failed to create swapchain. twe_surface(%p)",
- wl_vk_surface->twe_surface);
- return res;
- }
-
- wl_vk_surface->tbm_queue = twe_surface_get_tbm_queue(
- wl_vk_surface->twe_surface);
-
- /* Set reset_callback to tbm_queue */
- if (tbm_surface_queue_add_reset_cb(wl_vk_surface->tbm_queue,
- __cb_tbm_queue_reset_callback,
- (void *)surface) != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("TBM surface queue add reset cb failed!");
- twe_surface_destroy_swapchain(wl_vk_surface->twe_surface);
- wl_vk_surface->tbm_queue = NULL;
- return TPL_ERROR_INVALID_OPERATION;
- }
-
- wl_vk_surface->buffer_count = buffer_count;
- wl_vk_surface->reset = TPL_FALSE;
-
- __tpl_util_atomic_set(&wl_vk_surface->swapchain_reference, 1);
-
- return TPL_ERROR_NONE;
-}
-
-static tpl_result_t
-__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface)
-{
- tpl_wl_vk_surface_t *wl_vk_surface = NULL;
- tpl_wl_vk_display_t *wl_vk_display = NULL;
- tpl_result_t res = TPL_ERROR_NONE;
- unsigned int ref;
-
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
- TPL_ASSERT(surface->display);
- TPL_ASSERT(surface->display->backend.data);
-
- wl_vk_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
- wl_vk_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
-
- if (twe_display_lock(wl_vk_display->twe_display) == TPL_ERROR_NONE) {
- ref = __tpl_util_atomic_dec(&wl_vk_surface->swapchain_reference);
- if (ref > 0) {
- TPL_LOG_T("WL_VK",
- "This swapchain is still valid. | twe_surface(%p)",
- wl_vk_surface->twe_surface);
- twe_display_unlock(wl_vk_display->twe_display);
- return TPL_ERROR_NONE;
- }
-
-
- if (wl_vk_surface->reset) {
- TPL_LOG_T("WL_VK",
- "Since reset is in the TRUE state, it will not be destroyed.");
- twe_display_unlock(wl_vk_display->twe_display);
- return TPL_ERROR_NONE;
- }
-
- if (wl_vk_surface->swapchain_buffers) {
- int i;
- for (i = 0; i < wl_vk_surface->buffer_count; i++) {
- TPL_DEBUG("Stop tracking tbm_surface(%p)",
- wl_vk_surface->swapchain_buffers[i]);
- tbm_surface_internal_unref(wl_vk_surface->swapchain_buffers[i]);
- wl_vk_surface->swapchain_buffers[i] = NULL;
- }
-
- free(wl_vk_surface->swapchain_buffers);
- wl_vk_surface->swapchain_buffers = NULL;
- }
-
- res = twe_surface_destroy_swapchain(wl_vk_surface->twe_surface);
- if (res != TPL_ERROR_NONE) {
- TPL_ERR("Failed to destroy swapchain. twe_surface(%p)",
- wl_vk_surface->twe_surface);
- twe_display_unlock(wl_vk_display->twe_display);
- return res;
- }
-
- wl_vk_surface->tbm_queue = NULL;
-
- twe_display_unlock(wl_vk_display->twe_display);
- }
-
- return TPL_ERROR_NONE;
-}
-
tpl_bool_t
__tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
{
}
static void
-__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer)
+__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
{
- tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+ tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
+ tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
- TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
- wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);
+ TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
+ wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
- tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
- if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) {
- wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL;
- wl_egl_surface->buffer_cnt--;
+ tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
+ if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
+ wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
+ wl_vk_surface->buffer_cnt--;
- wl_egl_buffer->idx = -1;
+ wl_vk_buffer->idx = -1;
}
- tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+ tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
- wl_display_flush(wl_egl_display->wl_display);
+ wl_display_flush(wl_vk_display->wl_display);
- if (wl_egl_buffer->wl_buffer) {
- wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
- (void *)wl_egl_buffer->wl_buffer);
- wl_egl_buffer->wl_buffer = NULL;
+ if (wl_vk_buffer->wl_buffer) {
+ wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
+ (void *)wl_vk_buffer->wl_buffer);
+ wl_vk_buffer->wl_buffer = NULL;
}
- if (wl_egl_buffer->buffer_release) {
- zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
- wl_egl_buffer->buffer_release = NULL;
+ if (wl_vk_buffer->buffer_release) {
+ zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
+ wl_vk_buffer->buffer_release = NULL;
}
- if (wl_egl_buffer->release_fence_fd != -1) {
- close(wl_egl_buffer->release_fence_fd);
- wl_egl_buffer->release_fence_fd = -1;
+ if (wl_vk_buffer->release_fence_fd != -1) {
+ close(wl_vk_buffer->release_fence_fd);
+ wl_vk_buffer->release_fence_fd = -1;
}
- if (wl_egl_buffer->waiting_source) {
- tpl_gsource_destroy(wl_egl_buffer->waiting_source, TPL_FALSE);
- wl_egl_buffer->waiting_source = NULL;
+ if (wl_vk_buffer->waiting_source) {
+ tpl_gsource_destroy(wl_vk_buffer->waiting_source, TPL_FALSE);
+ wl_vk_buffer->waiting_source = NULL;
}
- if (wl_egl_buffer->commit_sync_fd != -1) {
- int ret = _write_to_eventfd(wl_egl_buffer->commit_sync_fd);
+ if (wl_vk_buffer->commit_sync_fd != -1) {
+ int ret = _write_to_eventfd(wl_vk_buffer->commit_sync_fd);
if (ret == -1)
TPL_ERR("Failed to send commit_sync signal to fd(%d)",
- wl_egl_buffer->commit_sync_fd);
- close(wl_egl_buffer->commit_sync_fd);
- wl_egl_buffer->commit_sync_fd = -1;
+ wl_vk_buffer->commit_sync_fd);
+ close(wl_vk_buffer->commit_sync_fd);
+ wl_vk_buffer->commit_sync_fd = -1;
}
- if (wl_egl_buffer->presentation_sync_fd != -1) {
- int ret = _write_to_eventfd(wl_egl_buffer->presentation_sync_fd);
+ if (wl_vk_buffer->presentation_sync_fd != -1) {
+ int ret = _write_to_eventfd(wl_vk_buffer->presentation_sync_fd);
if (ret == -1)
TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
- wl_egl_buffer->presentation_sync_fd);
- close(wl_egl_buffer->presentation_sync_fd);
- wl_egl_buffer->presentation_sync_fd = -1;
+ wl_vk_buffer->presentation_sync_fd);
+ close(wl_vk_buffer->presentation_sync_fd);
+ wl_vk_buffer->presentation_sync_fd = -1;
}
- if (wl_egl_buffer->rects) {
- free(wl_egl_buffer->rects);
- wl_egl_buffer->rects = NULL;
- wl_egl_buffer->num_rects = 0;
+ if (wl_vk_buffer->rects) {
+ free(wl_vk_buffer->rects);
+ wl_vk_buffer->rects = NULL;
+ wl_vk_buffer->num_rects = 0;
}
- wl_egl_buffer->tbm_surface = NULL;
- wl_egl_buffer->bo_name = -1;
+ wl_vk_buffer->tbm_surface = NULL;
+ wl_vk_buffer->bo_name = -1;
- free(wl_egl_buffer);
+ free(wl_vk_buffer);
}
static int
}
static void
-_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
+_print_buffer_lists(tpl_wl_vk_surface_t *wl_vk_surface)
{
int idx = 0;
- tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
- TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)",
- wl_egl_surface, wl_egl_surface->buffer_cnt);
+ tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
+ TPL_INFO("[BUFFERS_INFO]", "wl_vk_surface(%p) buffer_cnt(%d)",
+ wl_vk_surface, wl_vk_surface->buffer_cnt);
for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
- tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
- if (wl_egl_buffer) {
+ tpl_wl_vk_buffer_t *wl_vk_buffer = wl_vk_surface->buffers[idx];
+ if (wl_vk_buffer) {
TPL_INFO("[INFO]",
- "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
- idx, wl_egl_buffer, wl_egl_buffer->tbm_surface,
- wl_egl_buffer->bo_name,
- status_to_string[wl_egl_buffer->status]);
+ "INDEX[%d] | wl_vk_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
+ idx, wl_vk_buffer, wl_vk_buffer->tbm_surface,
+ wl_vk_buffer->bo_name,
+ status_to_string[wl_vk_buffer->status]);
}
}
- tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+ tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
}