tpl_bool_t set_serial_is_used;
tpl_bool_t initialized_in_thread;
tpl_bool_t frontbuffer_activated;
+ tpl_bool_t buffers_finalize_done;
+ tpl_bool_t need_force_release;
/* To make sure that tpl_gsource has been successfully finalized. */
tpl_bool_t gsource_finalized;
/* for checking draw done */
tpl_bool_t draw_done;
+ tpl_bool_t release_pending;
+
#if TIZEN_FEATURE_ENABLE
/* to get release event via zwp_linux_buffer_release_v1 */
struct zwp_linux_buffer_release_v1 *buffer_release;
tpl_wl_egl_buffer_t *wl_egl_buffer);
static void
__cb_surface_vblank_free(void *data);
+static void
+_buffers_force_release(tpl_wl_egl_surface_t *wl_egl_surface);
static struct tizen_private *
tizen_private_create()
#endif
if (wl_egl_surface->tbm_queue) {
+ if (wl_egl_surface->need_force_release)
+ _buffers_force_release(wl_egl_surface);
+
TPL_INFO("[TBM_QUEUE_DESTROY]",
"wl_egl_surface(%p) tbm_queue(%p)",
wl_egl_surface, wl_egl_surface->tbm_queue);
}
static void
-_tpl_wl_egl_surface_buffer_clear(tpl_wl_egl_surface_t *wl_egl_surface)
+_buffers_force_release(tpl_wl_egl_surface_t *wl_egl_surface)
{
tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
tpl_bool_t need_to_release = TPL_FALSE;
tpl_bool_t need_to_cancel = TPL_FALSE;
- buffer_status_t status = RELEASED;
- int buffer_cnt = 0;
- int idx = 0;
-
- tpl_gthread_pause_in_idle(wl_egl_display->thread);
+ tpl_list_node_t *node = NULL;
- buffer_cnt = __tpl_list_get_count(wl_egl_surface->buffers);
+ TPL_INFO("[BUFFER_FORCE_RELEASE_BEGIN]", "wl_egl_surface(%p)", wl_egl_surface);
+ node = __tpl_list_get_front_node(wl_egl_surface->buffers);
+ do {
+ if (!node) break;
- while (!__tpl_list_is_empty(wl_egl_surface->buffers)) {
- tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_pop_front(wl_egl_surface->buffers,
- NULL));
+ tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_node_get_data(node));
tpl_gmutex_lock(&wl_egl_buffer->mutex);
+ buffer_status_t status = wl_egl_buffer->status;
- status = wl_egl_buffer->status;
-
- TPL_INFO("[BUFFER_CLEAR]",
- "[%d/%d] wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
- ++idx, buffer_cnt, wl_egl_surface, wl_egl_buffer,
- wl_egl_buffer->tbm_surface,
- status_to_string[status]);
-
- if (status >= ENQUEUED) {
- tpl_result_t wait_result = TPL_ERROR_NONE;
-
- while (status < COMMITTED && wait_result != TPL_ERROR_TIME_OUT) {
- tpl_gthread_continue(wl_egl_display->thread);
- wait_result = tpl_gcond_timed_wait(&wl_egl_buffer->cond,
- &wl_egl_buffer->mutex,
- 500); /* 500ms */
- tpl_gthread_pause_in_idle(wl_egl_display->thread);
- status = wl_egl_buffer->status; /* update status */
-
- if (wait_result == TPL_ERROR_TIME_OUT) {
- TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p) status(%s)",
- wl_egl_buffer, status_to_string[status]);
- }
- }
+ if (status == RELEASED) {
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+ continue;
}
- /* ACQUIRED, WAITING_SIGNALED, WAITING_VBLANK, COMMITTED */
- /* It has been acquired but has not yet been released, so this
- * buffer must be released. */
need_to_release = (status >= ACQUIRED && status <= COMMITTED);
-
- /* After dequeue, it has not been enqueued yet
- * so cancel_dequeue must be performed. */
need_to_cancel = (status == DEQUEUED);
if (need_to_release) {
tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
wl_egl_buffer->tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
+ TPL_ERR("Failed to cancel dequeue tbm_surface(%p) tsq_err(%d)",
wl_egl_buffer->tbm_surface, tsq_err);
}
wl_egl_buffer->status = RELEASED;
+ TPL_INFO("[FORCE_RELEASE]", "wl_egl_buffer(%p) status(%s -> %s)",
+ wl_egl_buffer,
+ status_to_string[status],
+ status_to_string[RELEASED]);
+
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
- if (need_to_release || need_to_cancel || status == ENQUEUED)
+ if (need_to_release || need_to_cancel)
tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
+
+ } while ((node = __tpl_list_node_next(node)));
+
+ wl_egl_surface->need_force_release = TPL_FALSE;
+ TPL_INFO("[BUFFER_FORCE_RELEASE_END]", "wl_egl_surface(%p)", wl_egl_surface);
+}
+
+static int
+__idle_cb_buffers_finalize(void *data)
+{
+ tpl_wl_egl_surface_t wl_egl_surface(data);
+ TPL_CHECK_ON_NULL_RETURN_VAL(wl_egl_surface, TPL_FALSE);
+
+ int pending_cnt = 0;
+ tpl_list_node_t *node = NULL;
+ tpl_bool_t ret = TPL_TRUE;
+
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+
+ if (wl_egl_surface->buffers_finalize_done) {
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ return TPL_FALSE;
+ }
+
+ tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+
+ node = __tpl_list_get_front_node(wl_egl_surface->buffers);
+ do {
+ if (!node) break;
+
+ tpl_wl_egl_buffer_t wl_egl_buffer(__tpl_list_node_get_data(node));
+
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+ buffer_status_t status = wl_egl_buffer->status;
+
+ if (status == RELEASED) {
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+ continue;
+ }
+
+ if (status > DEQUEUED && status < COMMITTED) {
+ if (!wl_egl_buffer->release_pending) {
+ TPL_INFO("[RELEASE_PENDING]", "wl_egl_surface(%p) wl_egl_buffer(%p) status(%s)",
+ wl_egl_surface, wl_egl_buffer, status_to_string[status]);
+ TPL_INFO("[RELEASE_PENDING]", "tbm_surface(%p) bo(%d)",
+ wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name);
+ wl_egl_buffer->release_pending = TPL_TRUE;
+ }
+
+ pending_cnt++;
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+ continue;
+ }
+
+ if (status == COMMITTED) {
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
+
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+ wl_egl_buffer->tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ wl_egl_buffer->tbm_surface, tsq_err);
+
+ if (wl_egl_display->wl_tbm_client && wl_egl_buffer->wl_buffer) {
+ wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
+ (void *)wl_egl_buffer->wl_buffer);
+ wl_egl_buffer->wl_buffer = NULL;
+ wl_display_flush(wl_egl_display->wl_display);
+ }
+
+ } else if (status == DEQUEUED) {
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
+ wl_egl_buffer->tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to cancel dequeue. tbm_surface(%p) tsq_err(%d)",
+ wl_egl_buffer->tbm_surface, tsq_err);
+ }
+
+ TPL_INFO("[RELEASE]", "wl_egl_buffer(%p) status(%s -> %s)",
+ wl_egl_buffer,
+ status_to_string[status],
+ status_to_string[RELEASED]);
+
+ wl_egl_buffer->status = RELEASED;
+
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+ tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
+
+ } while ((node = __tpl_list_node_next(node)));
+
+ tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
+ if (pending_cnt == 0) {
+ wl_egl_surface->buffers_finalize_done = TPL_TRUE;
+ tpl_gcond_signal(&wl_egl_surface->surf_cond);
+ TPL_INFO("[BUFFERS_FINALIZE DONE]", "wl_egl_surface(%p)", wl_egl_surface);
+ ret = TPL_FALSE;
}
- tpl_gthread_continue(wl_egl_display->thread);
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
+ return ret;
}
+#define BUFFER_CLEAR_WAITING_TIMEOUT_MS 1000
+
static void
__tpl_wl_egl_surface_fini(tpl_surface_t *surface)
{
wl_egl_display = wl_egl_surface->wl_egl_display;
TPL_CHECK_ON_NULL_RETURN(wl_egl_display);
+ tpl_result_t result_of_waiting = TPL_ERROR_NONE;
+
TPL_INFO("[SURFACE_FINI][BEGIN]",
"wl_egl_surface(%p) wl_surface(%p) tbm_queue(%p)",
wl_egl_surface,
wl_egl_surface->wl_surface, wl_egl_surface->tbm_queue);
- _tpl_wl_egl_surface_buffer_clear(wl_egl_surface);
+ tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+
+ _print_buffer_lists(wl_egl_surface);
+
+ wl_egl_surface->need_force_release = TPL_FALSE;
+ wl_egl_surface->buffers_finalize_done = TPL_FALSE;
+ tpl_gthread_add_idle(wl_egl_display->thread,
+ __idle_cb_buffers_finalize, wl_egl_surface);
+ while (!wl_egl_surface->buffers_finalize_done &&
+ result_of_waiting != TPL_ERROR_TIME_OUT) {
+ result_of_waiting = tpl_gcond_timed_wait(&wl_egl_surface->surf_cond,
+ &wl_egl_surface->surf_mutex,
+ BUFFER_CLEAR_WAITING_TIMEOUT_MS);
+ if (result_of_waiting == TPL_ERROR_TIME_OUT)
+ TPL_WARN("buffer clear timeout. wl_egl_surface(%p)", wl_egl_surface);
+
+ wl_egl_surface->buffers_finalize_done = TPL_TRUE;
+ wl_egl_surface->need_force_release = (result_of_waiting != TPL_ERROR_NONE);
+ }
if (wl_egl_surface->surf_source) {
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
// Send destroy mesage to thread
tpl_gsource_destroy(wl_egl_surface->surf_source, TPL_TRUE);
/* This is a protection to prevent problems that arise in unexpected situations
tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
}
wl_egl_surface->surf_source = NULL;
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
+ tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+
if (wl_egl_surface->wl_egl_window) {
struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
struct tizen_private tizen_private(wl_egl_window->driver_private);
TPL_ASSERT(tizen_private);
wl_egl_buffer->draw_done = TPL_FALSE;
+ wl_egl_buffer->release_pending = TPL_FALSE;
wl_egl_buffer->need_to_commit = TPL_TRUE;
#if TIZEN_FEATURE_ENABLE
wl_egl_buffer->buffer_release = NULL;