tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE];
int buffer_cnt; /* the number of using wl_egl_buffers */
tpl_gmutex buffers_mutex;
+ tpl_wl_egl_buffer_t *last_deq_buffer;
tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
wl_egl_surface->buffer_cnt = 0;
}
+ wl_egl_surface->last_deq_buffer = NULL;
+
{
struct tizen_private *tizen_private = NULL;
wl_egl_surface->wl_egl_window = NULL;
}
+ wl_egl_surface->last_deq_buffer = NULL;
+
wl_egl_surface->wl_surface = NULL;
wl_egl_surface->wl_egl_display = NULL;
wl_egl_surface->tpl_surface = NULL;
tbm_surface_h tbm_surface = NULL;
TPL_OBJECT_UNLOCK(surface);
+ if (wl_egl_surface->reset == TPL_TRUE && wl_egl_surface->last_deq_buffer) {
+ tpl_wl_egl_buffer_t *last_deq_buffer = wl_egl_surface->last_deq_buffer;
+
+ tpl_gmutex_lock(&last_deq_buffer->mutex);
+ if (last_deq_buffer->status > RELEASED &&
+ last_deq_buffer->status < COMMITTED) {
+ tpl_result_t wait_result;
+ wait_result = tpl_cond_timed_wait(&last_deq_buffer->cond,
+ &last_deq_buffer->mutex,
+ 200); /* 200ms */
+
+ if (wait_result == TPL_ERROR_TIME_OUT)
+ TPL_WARN("timeout occured waiting signaled. wl_egl_buffer(%p)",
+ last_deq_buffer);
+ }
+ tpl_gmutex_unlock(&last_deq_buffer->mutex);
+
+ wl_egl_surface->last_deq_buffer = NULL;
+ }
tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
wl_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
TPL_OBJECT_LOCK(surface);
tpl_gmutex_lock(&wl_egl_buffer->mutex);
wl_egl_buffer->status = DEQUEUED;
+ wl_egl_surface->last_deq_buffer = wl_egl_buffer;
/* If wl_egl_buffer->release_fence_fd is -1,
* the tbm_surface can be used immediately.
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
}
+ if (wl_egl_buffer == wl_egl_surface->last_deq_buffer)
+ wl_egl_surface->last_deq_buffer = NULL;
+
tbm_surface_internal_unref(tbm_surface);
tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
wl_egl_buffer->tbm_surface = NULL;
wl_egl_buffer->bo_name = -1;
+ wl_egl_buffer->status = RELEASED;
free(wl_egl_buffer);
}