tpl_list_t *buffers;
int buffer_cnt; /* the number of using wl_egl_buffers */
tpl_gmutex_rec buffers_mutex;
- tbm_surface_h last_enq_buffer;
tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
tpl_bool_t frontbuffer_activated;
tpl_bool_t buffers_finalize_done;
tpl_bool_t need_force_release;
+ tpl_bool_t buffers_commit_done;
/* To make sure that tpl_gsource has been successfully finalized. */
tpl_bool_t gsource_finalized;
_get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
static void
_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface);
-static tpl_bool_t
-_check_tbm_surface_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface);
static void
__cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer);
static tpl_wl_egl_buffer_t *
wl_egl_surface->presentation_sync.fd = -1;
wl_egl_surface->sent_message = NONE_MESSAGE;
- wl_egl_surface->last_enq_buffer = NULL;
wl_egl_surface->buffers = __tpl_list_alloc();
wl_egl_surface->wl_egl_window = NULL;
}
- wl_egl_surface->last_enq_buffer = NULL;
-
wl_egl_surface->wl_surface = NULL;
wl_egl_surface->wl_egl_display = NULL;
wl_egl_surface->tpl_surface = NULL;
wl_egl_buffer->w_transform = -1;
tpl_gmutex_init(&wl_egl_buffer->mutex);
- tpl_gcond_init(&wl_egl_buffer->cond);
tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
__tpl_list_push_back(wl_egl_surface->buffers, (void *)wl_egl_buffer);
return wl_egl_buffer;
}
+static int
+__idle_cb_check_buffers_commit(void *data)
+{
+ tpl_wl_egl_surface_t wl_egl_surface(data);
+ tpl_bool_t is_waiting_commit = TPL_FALSE;
+ tpl_bool_t ret = TPL_FALSE;
+ tpl_list_node_t *node;
+
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+ if (wl_egl_surface->buffers_commit_done) {
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ return ret;
+ }
+
+ tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
+ node = __tpl_list_get_front_node(wl_egl_surface->buffers);
+ do {
+ if (!node) break;
+
+ tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_node_get_data(node));
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+ is_waiting_commit = (wl_egl_buffer->status >= ENQUEUED &&
+ wl_egl_buffer->status < COMMITTED);
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+ if (is_waiting_commit) break;
+ } while ((node = __tpl_list_node_next(node)));
+
+ tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
+
+ wl_egl_surface->buffers_commit_done = !is_waiting_commit;
+
+ if (!is_waiting_commit) {
+ TPL_INFO("[ALL BUFFERS COMMITTED]",
+ "wl_egl_surface(%p) consumed all previous buffers",
+ wl_egl_surface);
+ ret = TPL_FALSE;
+ tpl_gcond_signal(&wl_egl_surface->surf_cond);
+ } else {
+ ret = TPL_TRUE;
+ }
+
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+ return ret;
+}
+
#define CAN_DEQUEUE_TIMEOUT_MS 10000
#define FORCE_FLUSH_TIMEOUT_MS 1000
+#define CHECK_COMMIT_TIMEOUT_MS 200
static tbm_surface_h
__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
TPL_OBJECT_UNLOCK(surface);
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
if (wl_egl_surface->reset == TPL_TRUE) {
- if (_check_tbm_surface_validate(wl_egl_surface, wl_egl_surface->last_enq_buffer)) {
- tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer;
- tpl_wl_egl_buffer_t *enqueued_buffer =
- _get_wl_egl_buffer(last_enq_buffer);
-
- if (enqueued_buffer) {
- tbm_surface_internal_ref(last_enq_buffer);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- tpl_gmutex_lock(&enqueued_buffer->mutex);
- while (enqueued_buffer->status >= ENQUEUED &&
- enqueued_buffer->status < COMMITTED) {
- tpl_result_t wait_result;
- TPL_INFO("[DEQ_AFTER_RESET]",
- "wl_egl_surface(%p) waiting for previous wl_egl_buffer(%p) commit",
- wl_egl_surface, enqueued_buffer);
-
- wait_result = tpl_gcond_timed_wait(&enqueued_buffer->cond,
- &enqueued_buffer->mutex,
- 200); /* 200ms */
- if (wait_result == TPL_ERROR_TIME_OUT) {
- TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
- enqueued_buffer);
- break;
- }
- }
- tpl_gmutex_unlock(&enqueued_buffer->mutex);
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- tbm_surface_internal_unref(last_enq_buffer);
- }
+ tpl_result_t result_of_waiting = TPL_ERROR_NONE;
+ wl_egl_surface->buffers_commit_done = TPL_FALSE;
+ tpl_gthread_add_idle(wl_egl_display->thread,
+ __idle_cb_check_buffers_commit, wl_egl_surface);
+ do {
+ result_of_waiting = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
+ &wl_egl_surface->surf_mutex,
+ CHECK_COMMIT_TIMEOUT_MS);
+ } while (result_of_waiting != TPL_ERROR_TIME_OUT &&
+ !wl_egl_surface->buffers_commit_done);
+
+ if (result_of_waiting == TPL_ERROR_TIME_OUT) {
+ TPL_WARN("wl_egl_surface(%p) timeout error occured", wl_egl_surface);
+ _print_buffer_lists(wl_egl_surface);
}
- wl_egl_surface->last_enq_buffer = NULL;
+ wl_egl_surface->buffers_commit_done = TPL_TRUE;
}
+
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- wl_egl_surface->last_enq_buffer = tbm_surface;
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
-
tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
if (!wl_egl_surface->vblank_enable || wl_egl_surface->vblank_done) {
_thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
- tpl_gcond_signal(&wl_egl_buffer->cond);
} else {
tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
__tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers,
if (ready_to_commit) {
_thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
- tpl_gcond_signal(&wl_egl_buffer->cond);
}
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
tpl_gmutex_lock(&wl_egl_buffer->mutex);
_thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
- tpl_gcond_signal(&wl_egl_buffer->cond);
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
/* If tdm error such as TIMEOUT occured,
wl_egl_buffer->need_to_commit = TPL_FALSE;
wl_egl_buffer->status = COMMITTED;
- if (wl_egl_surface->last_enq_buffer == wl_egl_buffer->tbm_surface)
- wl_egl_surface->last_enq_buffer = NULL;
TPL_LOG_T("WL_EGL",
"[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
tpl_gmutex_clear(&wl_egl_buffer->mutex);
- tpl_gcond_clear(&wl_egl_buffer->cond);
free(wl_egl_buffer);
}
tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
}
-static tpl_bool_t
-_check_tbm_surface_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface)
-{
- tpl_list_node_t *node = NULL;
- tpl_bool_t ret = TPL_FALSE;
-
- /* silent return */
- if (!wl_egl_surface || !tbm_surface)
- return ret;
-
- if (!tbm_surface_internal_is_valid(tbm_surface))
- return ret;
-
- tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
- node = __tpl_list_get_front_node(wl_egl_surface->buffers);
- do {
- if (!node) break;
- tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_node_get_data(node));
- if (wl_egl_buffer->tbm_surface == tbm_surface) {
- ret = TPL_TRUE;
- break;
- }
- } while ((node = __tpl_list_node_next(node)));
-
- if (ret == TPL_FALSE) {
- TPL_ERR("tbm_surface(%p) is not owned by wl_egl_surface(%p)",
- tbm_surface, wl_egl_surface);
- }
-
- tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
-
- return ret;
-}