#define BUFFER_ARRAY_SIZE 10
#define VK_CLIENT_QUEUE_SIZE 3
+static int wl_vk_buffer_key;
+#define KEY_WL_VK_BUFFER (unsigned long)(&wl_vk_buffer_key)
+
typedef struct _tpl_wl_vk_display tpl_wl_vk_display_t;
typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t;
typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t;
_thread_swapchain_create_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
static void
_thread_swapchain_destroy_tbm_queue(tpl_wl_vk_surface_t *wl_vk_surface);
+static tpl_result_t
+_thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface);
static tpl_bool_t
_check_native_handle_is_wl_display(tpl_handle_t native_dpy)
tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
TPL_DEBUG("wl_vk_surface(%p) acquirable message received!",
wl_vk_surface);
- _thread_surface_queue_acquire(wl_vk_surface);
+ if (_thread_surface_queue_acquire(wl_vk_surface)
+ != TPL_ERROR_NONE) {
+ TPL_ERR("Failed to acquire from tbm_queue. wl_vk_surface(%p)",
+ wl_vk_surface);
+ }
tpl_gmutex_unlock(&wl_vk_surface->surf_mutex);
} else if (message == 4) { /* swapchain destroy */
tpl_gmutex_lock(&wl_vk_surface->surf_mutex);
surface->backend.data = NULL;
}
+static tpl_bool_t
+__tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface)
+{
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->backend.data);
+
+ tpl_wl_vk_surface_t *wl_vk_surface =
+ (tpl_wl_vk_surface_t *)surface->backend.data;
+
+ return !(wl_vk_surface->reset);
+}
+
static void
__cb_tbm_queue_reset_callback(tbm_surface_queue_h tbm_queue,
void *data)
return TPL_ERROR_NONE;
}
-static tpl_result_t
-__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface,
- tbm_surface_h tbm_surface,
- int num_rects, const int *rects,
- tbm_fd sync_fence)
+static void
+__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
{
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
+ tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
+ tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
- tpl_wl_vk_surface_t *wl_vk_surface =
- (tpl_wl_vk_surface_t *) surface->backend.data;
- tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- int bo_name = -1;
+ TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
+ wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
- TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
- TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
- TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
- TPL_ERROR_INVALID_PARAMETER);
+ tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
+ if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
+ wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
+ wl_vk_surface->buffer_cnt--;
- bo_name = _get_tbm_surface_bo_name(tbm_surface);
+ wl_vk_buffer->idx = -1;
+ }
+ tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
- /* If there are received region information,
- * save it to buf_info in tbm_surface user_data using below API. */
- if (num_rects && rects) {
- tpl_result_t ret = TPL_ERROR_NONE;
- ret = twe_surface_set_damage_region(tbm_surface, num_rects, rects);
- if (ret != TPL_ERROR_NONE) {
- TPL_WARN("Failed to set damage region. num_rects(%d) rects(%p)",
- num_rects, rects);
- }
+ wl_display_flush(wl_vk_display->wl_display);
+
+ if (wl_vk_buffer->wl_buffer) {
+ wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
+ (void *)wl_vk_buffer->wl_buffer);
+ wl_vk_buffer->wl_buffer = NULL;
}
- tsq_err = tbm_surface_queue_enqueue(wl_vk_surface->tbm_queue,
- tbm_surface);
- if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) {
- tbm_surface_internal_unref(tbm_surface);
- } else {
- TPL_ERR("Failed to enqeueue tbm_surface. | tsq_err = %d", tsq_err);
- return TPL_ERROR_INVALID_OPERATION;
+
+ if (wl_vk_buffer->buffer_release) {
+ zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
+ wl_vk_buffer->buffer_release = NULL;
}
- if (sync_fence != -1) {
- tpl_result_t res = TPL_ERROR_NONE;
- res = twe_surface_set_sync_fd(wl_vk_surface->twe_surface,
- tbm_surface, sync_fence);
- if (res != TPL_ERROR_NONE) {
- TPL_WARN("Failed to set sync_fd(%d). Fallback to async mode.",
- sync_fence);
- }
+ if (wl_vk_buffer->release_fence_fd != -1) {
+ close(wl_vk_buffer->release_fence_fd);
+ wl_vk_buffer->release_fence_fd = -1;
}
- TPL_LOG_T("WL_VK", "[ENQ] tbm_surface(%p) bo(%d) sync_fence(%d)",
- tbm_surface,
- tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)),
- sync_fence);
+ if (wl_vk_buffer->rects) {
+ free(wl_vk_buffer->rects);
+ wl_vk_buffer->rects = NULL;
+ wl_vk_buffer->num_rects = 0;
+ }
- return TPL_ERROR_NONE;
+ wl_vk_buffer->tbm_surface = NULL;
+ wl_vk_buffer->bo_name = -1;
+
+ free(wl_vk_buffer);
}
-static tpl_bool_t
-__tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface)
+static tpl_wl_vk_buffer_t *
+_get_wl_vk_buffer(tbm_surface_h tbm_surface)
{
- TPL_ASSERT(surface);
- TPL_ASSERT(surface->backend.data);
-
- tpl_wl_vk_surface_t *wl_vk_surface =
- (tpl_wl_vk_surface_t *)surface->backend.data;
-
- return !(wl_vk_surface->reset);
+ tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
+ tbm_surface_internal_get_user_data(tbm_surface, KEY_WL_VK_BUFFER,
+ (void **)&wl_vk_buffer);
+ return wl_vk_buffer;
}
-static tpl_result_t
-__tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
- tbm_surface_h tbm_surface)
+static tpl_wl_vk_buffer_t *
+_wl_vk_buffer_create(tpl_wl_vk_surface_t *wl_vk_surface,
+ tbm_surface_h tbm_surface)
{
- tpl_wl_vk_surface_t *wl_vk_surface = NULL;
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
- wl_vk_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
- if (!wl_vk_surface) {
- TPL_ERR("Invalid backend surface. surface(%p) wl_vk_surface(%p)",
- surface, wl_vk_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
- if (!tbm_surface_internal_is_valid(tbm_surface)) {
- TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
- return TPL_ERROR_INVALID_PARAMETER;
- }
+ if (!wl_vk_buffer) {
+ wl_vk_buffer = (tpl_wl_vk_buffer_t *)calloc(1, sizeof(tpl_wl_vk_buffer_t));
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_vk_buffer, NULL);
- tbm_surface_internal_unref(tbm_surface);
+ tbm_surface_internal_add_user_data(tbm_surface, KEY_WL_VK_BUFFER,
+ (tbm_data_free)__cb_wl_vk_buffer_free);
+ tbm_surface_internal_set_user_data(tbm_surface, KEY_WL_VK_BUFFER,
+ wl_vk_buffer);
- tsq_err = tbm_surface_queue_cancel_dequeue(wl_vk_surface->tbm_queue,
- tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
- TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface);
- return TPL_ERROR_INVALID_OPERATION;
+ wl_vk_buffer->wl_buffer = NULL;
+ wl_vk_buffer->tbm_surface = tbm_surface;
+ wl_vk_buffer->bo_name = _get_tbm_surface_bo_name(tbm_surface);
+ wl_vk_buffer->wl_vk_surface = wl_vk_surface;
+
+ wl_vk_buffer->status = RELEASED;
+
+ wl_vk_buffer->acquire_fence_fd = -1;
+ wl_vk_buffer->release_fence_fd = -1;
+
+ wl_vk_buffer->dx = 0;
+ wl_vk_buffer->dy = 0;
+ wl_vk_buffer->width = tbm_surface_get_width(tbm_surface);
+ wl_vk_buffer->height = tbm_surface_get_height(tbm_surface);
+
+ wl_vk_buffer->rects = NULL;
+ wl_vk_buffer->num_rects = 0;
+
+ tpl_gmutex_init(&wl_vk_buffer->mutex);
+ tpl_gcond_init(&wl_vk_buffer->cond);
+
+ tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
+ {
+ int i;
+ for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
+ if (wl_vk_surface->buffers[i] == NULL) break;
+
+ /* If this exception is reached,
+ * it may be a critical memory leak problem. */
+ if (i == BUFFER_ARRAY_SIZE) {
+ tpl_wl_vk_buffer_t *evicted_buffer = NULL;
+ int evicted_idx = 0; /* evict the frontmost buffer */
+
+ evicted_buffer = wl_vk_surface->buffers[evicted_idx];
+
+ TPL_WARN("wl_vk_surface(%p) buffers array is full. evict one.",
+ wl_vk_surface);
+ TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
+ evicted_buffer, evicted_buffer->tbm_surface,
+ status_to_string[evicted_buffer->status]);
+
+ /* [TODO] need to think about whether there will be
+ * better modifications */
+ wl_vk_surface->buffer_cnt--;
+ wl_vk_surface->buffers[evicted_idx] = NULL;
+
+ i = evicted_idx;
+ }
+
+ wl_vk_surface->buffer_cnt++;
+ wl_vk_surface->buffers[i] = wl_vk_buffer;
+ wl_vk_buffer->idx = i;
+ }
+ tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
+
+ TPL_INFO("[WL_VK_BUFFER_CREATE]",
+ "wl_vk_surface(%p) wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_vk_surface, wl_vk_buffer, tbm_surface,
+ wl_vk_buffer->bo_name);
}
- TPL_LOG_T("WL_VK", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
- surface, tbm_surface);
+ wl_vk_buffer->need_to_commit = TPL_FALSE;
+ wl_vk_buffer->buffer_release = NULL;
- return TPL_ERROR_NONE;
+ return wl_vk_buffer;
}
static tbm_surface_h
__tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface,
- uint64_t timeout_ns,
- tbm_fd *sync_fence)
+ uint64_t timeout_ns,
+ int32_t *release_fence)
{
TPL_ASSERT(surface);
TPL_ASSERT(surface->backend.data);
TPL_ASSERT(surface->display);
+ TPL_ASSERT(surface->display->backend.data);
+ TPL_OBJECT_CHECK_RETURN(surface, NULL);
- tbm_surface_h tbm_surface = NULL;
tpl_wl_vk_surface_t *wl_vk_surface =
(tpl_wl_vk_surface_t *)surface->backend.data;
tpl_wl_vk_display_t *wl_vk_display =
(tpl_wl_vk_display_t *)surface->display->backend.data;
- tbm_surface_queue_error_e tsq_err = 0;
- tpl_result_t lock_res = TPL_ERROR_NONE;
- tpl_result_t res = TPL_ERROR_NONE;
+ tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
+ tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
+
+ tpl_result_t res = TPL_ERROR_NONE;
- if (sync_fence)
- *sync_fence = -1;
+ tbm_surface_h tbm_surface = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, NULL);
+ TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, NULL);
TPL_OBJECT_UNLOCK(surface);
TRACE_BEGIN("WAIT_DEQUEUEABLE");
- lock_res = twe_display_lock(wl_vk_display->twe_display);
- res = twe_surface_wait_dequeueable(wl_vk_surface->twe_surface,
- timeout_ns);
+ if (timeout_ns != UINT64_MAX) {
+ tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
+ swapchain->tbm_queue, timeout_ns/1000);
+ } else {
+ tsq_err = tbm_surface_queue_can_dequeue(
+ swapchain->tbm_queue, 1);
+ }
TRACE_END();
TPL_OBJECT_LOCK(surface);
if (res == TPL_ERROR_TIME_OUT) {
TPL_ERR("Failed to get buffer during timeout_ns(%" PRIu64 ")",
timeout_ns);
- if (lock_res == TPL_ERROR_NONE)
- twe_display_unlock(wl_vk_display->twe_display);
return NULL;
} else if (res != TPL_ERROR_NONE) {
- TPL_ERR("Invalid operation. twe_surface(%p) timeout_ns(%" PRIu64 ")",
- wl_vk_surface->twe_surface, timeout_ns);
- if (lock_res == TPL_ERROR_NONE)
- twe_display_unlock(wl_vk_display->twe_display);
+ TPL_ERR("Invalid operation. wl_vk_surface(%p) tbm_queue(%p)",
+ wl_vk_surface, swapchain->tbm_queue);
return NULL;
}
+ tpl_gmutex_lock(&wl_vk_display->wl_event_mutex);
+
if (wl_vk_surface->reset) {
- TPL_LOG_T("WL_VK", "tbm_queue(%p) has been reset. Do not process dequeue.",
- wl_vk_surface->tbm_queue);
- if (lock_res == TPL_ERROR_NONE)
- twe_display_unlock(wl_vk_display->twe_display);
+ TPL_WARN("Invalid swapchain(%p) tbm_queue(%p) should be re-created.",
+ swapchain, swapchain->tbm_queue);
+ tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
return NULL;
}
-
- tsq_err = tbm_surface_queue_dequeue(wl_vk_surface->tbm_queue,
+ tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue,
&tbm_surface);
if (!tbm_surface) {
- TPL_ERR("Failed to get tbm_surface from tbm_surface_queue(%p) | tsq_err = %d",
- wl_vk_surface->tbm_queue, tsq_err);
- if (lock_res == TPL_ERROR_NONE)
- twe_display_unlock(wl_vk_display->twe_display);
+ TPL_ERR("Failed to dequeue from tbm_queue(%p) wl_vk_surface(%p)| tsq_err = %d",
+ swapchain->tbm_queue, wl_vk_surface, tsq_err);
+ tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
return NULL;
}
tbm_surface_internal_ref(tbm_surface);
- if (sync_fence) {
- *sync_fence = twe_surface_create_sync_fd(tbm_surface);
+ wl_vk_buffer = _wl_vk_buffer_create(wl_vk_surface, tbm_surface);
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer, "Failed to create/get wl_vk_buffer");
+
+ tpl_gmutex_lock(&wl_vk_buffer->mutex);
+ wl_vk_buffer->status = DEQUEUED;
+
+ if (release_fence) {
+ if (wl_vk_surface->surface_sync) {
+ *release_fence = wl_vk_buffer->release_fence_fd;
+ TPL_DEBUG("wl_vk_surface(%p) wl_vk_buffer(%p) release_fence_fd(%d)",
+ wl_vk_surface, wl_vk_buffer, *release_fence);
+ wl_vk_buffer->release_fence_fd = -1;
+ } else {
+ *release_fence = -1;
+ }
}
- TPL_LOG_T("WL_VK", "[DEQ] tbm_queue(%p) tbm_surface(%p) bo(%d)",
- wl_vk_surface->tbm_queue, tbm_surface,
- tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0)));
+ wl_vk_surface->reset = TPL_FALSE;
- if (lock_res == TPL_ERROR_NONE)
- twe_display_unlock(wl_vk_display->twe_display);
+ TPL_LOG_T("WL_VK", "[DEQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
+ wl_vk_buffer, tbm_surface, wl_vk_buffer->bo_name,
+ release_fence ? *release_fence : -1);
+
+ tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+ tpl_gmutex_unlock(&wl_vk_display->wl_event_mutex);
return tbm_surface;
}
+static tpl_result_t
+__tpl_wl_vk_wsi_surface_cancel_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface)
+{
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->backend.data);
+
+ tpl_wl_vk_surface_t *wl_vk_surface =
+ (tpl_wl_vk_surface_t *)surface->backend.data;
+ tpl_wl_vk_swapchain_t *swapchain = NULL;
+ tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
+ TPL_ERROR_INVALID_PARAMETER);
+
+ swapchain = wl_vk_surface->swapchain;
+ TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+ TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue,
+ TPL_ERROR_INVALID_PARAMETER);
+
+ wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
+ if (wl_vk_buffer) {
+ tpl_gmutex_lock(&wl_vk_buffer->mutex);
+ wl_vk_buffer->status = RELEASED;
+ tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+ }
+
+ tbm_surface_internal_unref(tbm_surface);
+
+ TPL_INFO("[CANCEL BUFFER]",
+ "wl_vk_surface(%p) swapchain(%p) tbm_surface(%p) bo(%d)",
+ wl_vk_surface, swapchain, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ tsq_err = tbm_surface_queue_cancel_dequeue(swapchain->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to cancel dequeue tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface,
+ int num_rects, const int *rects,
+ int32_t acquire_fence)
+{
+ TPL_ASSERT(surface);
+ TPL_ASSERT(surface->display);
+ TPL_ASSERT(surface->backend.data);
+ TPL_OBJECT_CHECK_RETURN(surface, TPL_ERROR_INVALID_PARAMETER);
+
+ tpl_wl_vk_surface_t *wl_vk_surface =
+ (tpl_wl_vk_surface_t *) surface->backend.data;
+ tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
+ tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ int bo_name = -1;
+
+ TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+ TPL_CHECK_ON_NULL_RETURN_VAL(tbm_surface, TPL_ERROR_INVALID_PARAMETER);
+ TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
+ TPL_ERROR_INVALID_PARAMETER);
+
+ wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
+ bo_name = wl_vk_buffer->bo_name;
+
+ tpl_gmutex_lock(&wl_vk_buffer->mutex);
+
+ /* If there are received region information, save it to wl_vk_buffer */
+ if (num_rects && rects) {
+ if (wl_vk_buffer->rects != NULL) {
+ free(wl_vk_buffer->rects);
+ wl_vk_buffer->rects = NULL;
+ wl_vk_buffer->num_rects = 0;
+ }
+
+ wl_vk_buffer->rects = (int *)calloc(1, (sizeof(int) * 4 * num_rects));
+ wl_vk_buffer->num_rects = num_rects;
+
+ if (wl_vk_buffer->rects) {
+ memcpy((char *)wl_vk_buffer->rects, (char *)rects,
+ sizeof(int) * 4 * num_rects);
+ } else {
+ TPL_ERR("Failed to allocate memory for rects info.");
+ }
+ }
+
+ if (wl_vk_buffer->acquire_fence_fd != -1)
+ close(wl_vk_buffer->acquire_fence_fd);
+
+ wl_vk_buffer->acquire_fence_fd = acquire_fence;
+
+ wl_vk_buffer->status = ENQUEUED;
+ TPL_LOG_T("WL_VK",
+ "[ENQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d) acquire_fence(%d)",
+ wl_vk_buffer, tbm_surface, bo_name, acquire_fence);
+
+ tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+ tsq_err = tbm_surface_queue_enqueue(swapchain->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ tbm_surface_internal_unref(tbm_surface);
+ TPL_ERR("Failed to enqueue tbm_surface(%p). wl_vk_surface(%p) tsq_err=%d",
+ tbm_surface, wl_vk_surface, tsq_err);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ tbm_surface_internal_unref(tbm_surface);
+
+ return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+_thread_surface_queue_acquire(tpl_wl_vk_surface_t *wl_vk_surface)
+{
+ tbm_surface_h tbm_surface = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
+ tpl_wl_vk_swapchain_t *swapchain = wl_vk_surface->swapchain;
+ tpl_wl_vk_buffer_t *wl_vk_buffer = NULL;
+ tpl_bool_t ready_to_commit = TPL_TRUE;
+
+ TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+
+ while (tbm_surface_queue_can_acquire(swapchain->tbm_queue, 0)) {
+ tsq_err = tbm_surface_queue_acquire(swapchain->tbm_queue,
+ &tbm_surface);
+ if (!tbm_surface || tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to acquire from tbm_queue(%p)",
+ swapchain->tbm_queue);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ tbm_surface_internal_ref(tbm_surface);
+
+ wl_vk_buffer = _get_wl_vk_buffer(tbm_surface);
+ TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_vk_buffer != NULL,
+ "wl_vk_buffer sould be not NULL");
+
+ tpl_gmutex_lock(&wl_vk_buffer->mutex);
+
+ wl_vk_buffer->status = ACQUIRED;
+
+ TPL_LOG_T("WL_VK", "[ACQ] wl_vk_buffer(%p) tbm_surface(%p) bo(%d)",
+ wl_vk_buffer, tbm_surface,
+ _get_tbm_surface_bo_name(tbm_surface));
+
+ if (wl_vk_buffer->wl_buffer == NULL) {
+ wl_vk_buffer->wl_buffer =
+ (struct wl_proxy *)wayland_tbm_client_create_buffer(
+ wl_vk_display->wl_tbm_client, tbm_surface);
+
+ if (!wl_vk_buffer->wl_buffer) {
+ TPL_WARN("Failed to create wl_buffer. wl_tbm_client(%p) tbm_surface(%p)",
+ wl_vk_display->wl_tbm_client, tbm_surface);
+ } else {
+ TPL_LOG_T("WL_EGL",
+ "[WL_BUFFER_CREATE] wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
+ wl_vk_buffer, wl_vk_buffer->wl_buffer, tbm_surface);
+ }
+ }
+
+ if (!wl_vk_display->use_wait_vblank || wl_vk_surface->vblank_done)
+ ready_to_commit = TPL_TRUE;
+ else {
+ wl_vk_buffer->status = WAITING_VBLANK;
+ __tpl_list_push_back(wl_vk_surface->vblank_waiting_buffers, wl_vk_buffer);
+ ready_to_commit = TPL_FALSE;
+ }
+
+ tpl_gmutex_unlock(&wl_vk_buffer->mutex);
+
+ if (ready_to_commit)
+ _thread_wl_surface_commit(wl_vk_surface, wl_vk_buffer);
+ }
+
+ return TPL_ERROR_NONE;
+}
+
tpl_bool_t
__tpl_display_choose_backend_wayland_vk_wsi_thread(tpl_handle_t native_dpy)
{
backend->fini = __tpl_wl_vk_wsi_surface_fini;
backend->validate = __tpl_wl_vk_wsi_surface_validate;
backend->cancel_dequeued_buffer =
- __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer;
+ __tpl_wl_vk_wsi_surface_cancel_buffer;
backend->dequeue_buffer = __tpl_wl_vk_wsi_surface_dequeue_buffer;
backend->enqueue_buffer = __tpl_wl_vk_wsi_surface_enqueue_buffer;
backend->get_swapchain_buffers =
backend->destroy_swapchain = __tpl_wl_vk_wsi_surface_destroy_swapchain;
}
-static void
-__cb_wl_vk_buffer_free(tpl_wl_vk_buffer_t *wl_vk_buffer)
-{
- tpl_wl_vk_surface_t *wl_vk_surface = wl_vk_buffer->wl_vk_surface;
- tpl_wl_vk_display_t *wl_vk_display = wl_vk_surface->wl_vk_display;
-
- TPL_INFO("[BUFFER_FREE]", "wl_vk_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
- wl_vk_buffer, wl_vk_buffer->wl_buffer, wl_vk_buffer->tbm_surface);
-
- tpl_gmutex_lock(&wl_vk_surface->buffers_mutex);
- if (wl_vk_buffer->idx >= 0 && wl_vk_surface->buffers[wl_vk_buffer->idx]) {
- wl_vk_surface->buffers[wl_vk_buffer->idx] = NULL;
- wl_vk_surface->buffer_cnt--;
-
- wl_vk_buffer->idx = -1;
- }
- tpl_gmutex_unlock(&wl_vk_surface->buffers_mutex);
-
- wl_display_flush(wl_vk_display->wl_display);
-
- if (wl_vk_buffer->wl_buffer) {
- wayland_tbm_client_destroy_buffer(wl_vk_display->wl_tbm_client,
- (void *)wl_vk_buffer->wl_buffer);
- wl_vk_buffer->wl_buffer = NULL;
- }
-
- if (wl_vk_buffer->buffer_release) {
- zwp_linux_buffer_release_v1_destroy(wl_vk_buffer->buffer_release);
- wl_vk_buffer->buffer_release = NULL;
- }
-
- if (wl_vk_buffer->release_fence_fd != -1) {
- close(wl_vk_buffer->release_fence_fd);
- wl_vk_buffer->release_fence_fd = -1;
- }
-
- if (wl_vk_buffer->waiting_source) {
- tpl_gsource_destroy(wl_vk_buffer->waiting_source, TPL_FALSE);
- wl_vk_buffer->waiting_source = NULL;
- }
-
- if (wl_vk_buffer->commit_sync_fd != -1) {
- int ret = _write_to_eventfd(wl_vk_buffer->commit_sync_fd);
- if (ret == -1)
- TPL_ERR("Failed to send commit_sync signal to fd(%d)",
- wl_vk_buffer->commit_sync_fd);
- close(wl_vk_buffer->commit_sync_fd);
- wl_vk_buffer->commit_sync_fd = -1;
- }
-
- if (wl_vk_buffer->presentation_sync_fd != -1) {
- int ret = _write_to_eventfd(wl_vk_buffer->presentation_sync_fd);
- if (ret == -1)
- TPL_ERR("Failed to send presentation_sync signal to fd(%d)",
- wl_vk_buffer->presentation_sync_fd);
- close(wl_vk_buffer->presentation_sync_fd);
- wl_vk_buffer->presentation_sync_fd = -1;
- }
-
- if (wl_vk_buffer->rects) {
- free(wl_vk_buffer->rects);
- wl_vk_buffer->rects = NULL;
- wl_vk_buffer->num_rects = 0;
- }
-
- wl_vk_buffer->tbm_surface = NULL;
- wl_vk_buffer->bo_name = -1;
-
- free(wl_vk_buffer);
-}
-
static int
_get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
{