* so cancel_dequeue must be performed. */
need_to_cancel = wl_egl_buffer->status == DEQUEUED;
- if (wl_egl_buffer->status >= ENQUEUED &&
- wl_egl_buffer->status < WAITING_VBLANK) {
+ if (wl_egl_buffer->status >= ENQUEUED) {
+ tpl_bool_t need_to_wait = TPL_FALSE;
tpl_result_t wait_result = TPL_ERROR_NONE;
- tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
- wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
- &wl_egl_buffer->mutex,
- 16); /* 16ms */
- tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
- if (wait_result == TPL_ERROR_TIME_OUT)
- TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
- wl_egl_buffer);
+
+ if (!wl_egl_display->use_explicit_sync &&
+ wl_egl_buffer->status < WAITING_VBLANK)
+ need_to_wait = TPL_TRUE;
+
+ if (wl_egl_display->use_explicit_sync &&
+ wl_egl_buffer->status < COMMITTED)
+ need_to_wait = TPL_TRUE;
+
+ if (need_to_wait) {
+ tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
+ wait_result = tpl_cond_timed_wait(&wl_egl_buffer->cond,
+ &wl_egl_buffer->mutex,
+ 16); /* 16ms */
+ tpl_gmutex_lock(&wl_egl_display->wl_event_mutex);
+ if (wait_result == TPL_ERROR_TIME_OUT)
+ TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
+ wl_egl_buffer);
+ }
}
if (need_to_release) {
wl_egl_buffer->tbm_surface, tsq_err);
}
+ wl_egl_buffer->status = RELEASED;
+
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
if (need_to_release || need_to_cancel)
tbm_surface = wl_egl_buffer->tbm_surface;
if (tbm_surface_internal_is_valid(tbm_surface)) {
+
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
if (wl_egl_buffer->status == COMMITTED) {
tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
tbm_surface_queue_error_e tsq_err;
- tpl_gmutex_lock(&wl_egl_buffer->mutex);
-
zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
wl_egl_buffer->buffer_release = NULL;
_get_tbm_surface_bo_name(tbm_surface),
fence);
- tpl_gmutex_unlock(&wl_egl_buffer->mutex);
-
tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
tbm_surface_internal_unref(tbm_surface);
}
+
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
} else {
TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
}
tbm_surface = wl_egl_buffer->tbm_surface;
if (tbm_surface_internal_is_valid(tbm_surface)) {
+
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
if (wl_egl_buffer->status == COMMITTED) {
tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
tbm_surface_queue_error_e tsq_err;
- tpl_gmutex_lock(&wl_egl_buffer->mutex);
-
zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
wl_egl_buffer->buffer_release = NULL;
wl_egl_buffer->wl_buffer, tbm_surface,
_get_tbm_surface_bo_name(tbm_surface));
- tpl_gmutex_unlock(&wl_egl_buffer->mutex);
-
tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
tbm_surface_internal_unref(tbm_surface);
}
+
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
} else {
TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
}
TRACE_ASYNC_BEGIN((int)wl_egl_buffer->tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
wl_egl_buffer->bo_name);
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
wl_egl_buffer->need_to_commit = TPL_FALSE;
wl_egl_buffer->status = COMMITTED;
+ tpl_gcond_signal(&wl_egl_buffer->cond);
+
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
TPL_LOG_T("WL_EGL",
"[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",
wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface,