/* To make sure that tpl_gsource has been successfully finalized. */
tpl_bool_t gsource_finalized;
+
+ struct timespec last_commit;
+ int last_commit_bo;
+ struct timespec last_release;
+ int last_release_bo;
};
struct _surface_vblank {
tpl_gcond cond;
tpl_wl_egl_surface_t *wl_egl_surface;
+
+ struct timespec begin;
+ buffer_status_t begin_status;
};
#if TIZEN_FEATURE_ENABLE
static void
_buffers_force_release(tpl_wl_egl_surface_t *wl_egl_surface);
+#define REL_TO_DEQ 30
+#define DEQ_TO_ENQ 20
+#define DEQ_TO_CAN 20
+#define ENQ_TO_ACQ 20
+#define ACQ_TO_SIG 10
+#define ACQ_TO_VBL 10
+#define SIG_TO_VBL 30
+#define VBL_TO_CMT 20
+#define CMT_TO_REL 50
+
+#define RELEASE_FROM_LAST_COMMIT 20
+#define DEQUEUE_FROM_LAST_RELEASE 20
+
+static long
+_cal_time_gap(struct timespec begin, struct timespec end)
+{
+ long gap_ms = 1000 * (end.tv_sec - begin.tv_sec);
+ gap_ms = gap_ms + ((end.tv_nsec/1000000) - (begin.tv_nsec/1000000));
+ return gap_ms;
+}
+
+static void
+_elapsed_between_status(tpl_wl_egl_buffer_t *wl_egl_buffer, long threshold)
+{
+ struct timespec end;
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ long gap = _cal_time_gap(wl_egl_buffer->begin, end);
+ if (wl_egl_buffer->begin_status != COMMITTED && gap > threshold) {
+ TPL_ERR("bo_name(%d) | %s ~ %s | takes too long time %ld > %ld",
+ wl_egl_buffer->bo_name,
+ status_to_string[wl_egl_buffer->begin_status],
+ status_to_string[wl_egl_buffer->status],
+ gap, threshold);
+ }
+ wl_egl_buffer->begin.tv_sec = end.tv_sec;
+ wl_egl_buffer->begin.tv_nsec = end.tv_nsec;
+ wl_egl_buffer->begin_status = wl_egl_buffer->status;
+}
+
+static void
+_elapsed_from_last_commit(tpl_wl_egl_surface_t *wl_egl_surface, int bo_name)
+{
+ struct timespec release;
+ clock_gettime(CLOCK_MONOTONIC, &release);
+ long gap = _cal_time_gap(wl_egl_surface->last_commit, release);
+ if (gap > RELEASE_FROM_LAST_COMMIT) {
+ TPL_ERR("last COMMIT(%d) ~ RELEASE(%d) | takes too long time %ld > %d",
+ wl_egl_surface->last_commit_bo, bo_name,
+ gap, RELEASE_FROM_LAST_COMMIT);
+ }
+}
+
+static void
+_update_last_commit_time(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+ struct timespec commit;
+ clock_gettime(CLOCK_MONOTONIC, &commit);
+ wl_egl_surface->last_commit.tv_sec = commit.tv_sec;
+ wl_egl_surface->last_commit.tv_nsec = commit.tv_nsec;
+}
+
+static void
+_elapsed_from_last_release(tpl_wl_egl_surface_t *wl_egl_surface, int bo_name)
+{
+ struct timespec dequeue;
+ clock_gettime(CLOCK_MONOTONIC, &dequeue);
+ long gap = _cal_time_gap(wl_egl_surface->last_release, dequeue);
+ if (gap > RELEASE_FROM_LAST_COMMIT) {
+ TPL_ERR("last RELEASE(%d) ~ DEQUEUE(%d) | takes too long time %ld > %d",
+ wl_egl_surface->last_release_bo, bo_name,
+ gap, DEQUEUE_FROM_LAST_RELEASE);
+ }
+}
+
+static void
+_update_last_release_time(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+ struct timespec release;
+ clock_gettime(CLOCK_MONOTONIC, &release);
+ wl_egl_surface->last_release.tv_sec = release.tv_sec;
+ wl_egl_surface->last_release.tv_nsec = release.tv_nsec;
+}
+
static struct tizen_private *
tizen_private_create()
{
wl_egl_surface->buffers = __tpl_list_alloc();
+ _update_last_release_time(wl_egl_surface);
+ wl_egl_surface->last_release_bo = -1;
+
{
struct tizen_private *tizen_private = NULL;
wl_egl_buffer->wl_egl_surface = wl_egl_surface;
wl_egl_buffer->status = RELEASED;
+ wl_egl_buffer->begin_status = wl_egl_buffer->status;
+ clock_gettime(CLOCK_MONOTONIC, &wl_egl_buffer->begin);
wl_egl_buffer->acquire_fence_fd = -1;
wl_egl_buffer->commit_sync_fd = -1;
tpl_gmutex_lock(&wl_egl_buffer->mutex);
wl_egl_buffer->status = DEQUEUED;
+ _elapsed_between_status(wl_egl_buffer, REL_TO_DEQ);
+ _elapsed_from_last_release(wl_egl_surface, wl_egl_buffer->bo_name);
/* If wl_egl_buffer->release_fence_fd is -1,
* the tbm_surface can be used immediately.
if (wl_egl_buffer) {
tpl_gmutex_lock(&wl_egl_buffer->mutex);
wl_egl_buffer->status = RELEASED;
+ _elapsed_between_status(wl_egl_buffer, DEQ_TO_ENQ);
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
}
tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
wl_egl_buffer->status = ENQUEUED;
+ _elapsed_between_status(wl_egl_buffer, DEQ_TO_ENQ);
TPL_LOG_T("WL_EGL",
"[ENQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
wl_egl_buffer, tbm_surface, bo_name, acquire_fence);
tpl_gmutex_lock(&wl_egl_buffer->mutex);
wl_egl_buffer->status = WAITING_VBLANK;
+ _elapsed_between_status(wl_egl_buffer, SIG_TO_VBL);
TPL_LOG_D("[FINALIZE]", "wl_egl_surface(%p) wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
wl_egl_surface, wl_egl_buffer, wl_egl_buffer->waiting_source,
.finalize = __thread_func_waiting_source_finalize,
};
+
+
static tpl_result_t
_thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface)
{
tpl_gmutex_lock(&wl_egl_buffer->mutex);
wl_egl_buffer->status = ACQUIRED;
+ _elapsed_between_status(wl_egl_buffer, ENQ_TO_ACQ);
TPL_LOG_T("WL_EGL", "[ACQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
wl_egl_buffer, tbm_surface,
FD_TYPE_FENCE, &buffer_funcs,
SOURCE_TYPE_DISPOSABLE);
wl_egl_buffer->status = WAITING_SIGNALED;
+ _elapsed_between_status(wl_egl_buffer, ACQ_TO_SIG);
TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
wl_egl_buffer->acquire_fence_fd);
ready_to_commit = TPL_TRUE;
else {
wl_egl_buffer->status = WAITING_VBLANK;
+ _elapsed_between_status(wl_egl_buffer, ACQ_TO_VBL);
tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
__tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers, wl_egl_buffer);
tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
wl_egl_buffer->release_fence_fd = fence;
wl_egl_buffer->status = RELEASED;
+ _elapsed_between_status(wl_egl_buffer, CMT_TO_REL);
+ wl_egl_surface->last_release_bo = wl_egl_buffer->bo_name;
+ _elapsed_from_last_commit(wl_egl_surface, wl_egl_buffer->bo_name);
+ _update_last_release_time(wl_egl_surface);
TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
_get_tbm_surface_bo_name(tbm_surface),
wl_egl_buffer->release_fence_fd = -1;
wl_egl_buffer->status = RELEASED;
+ _elapsed_between_status(wl_egl_buffer, CMT_TO_REL);
+ wl_egl_surface->last_release_bo = wl_egl_buffer->bo_name;
+ _elapsed_from_last_commit(wl_egl_surface, wl_egl_buffer->bo_name);
+ _update_last_release_time(wl_egl_surface);
TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
_get_tbm_surface_bo_name(tbm_surface));
TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
wl_egl_buffer->status = RELEASED;
+ _elapsed_between_status(wl_egl_buffer, CMT_TO_REL);
+ wl_egl_surface->last_release_bo = wl_egl_buffer->bo_name;
+ _elapsed_from_last_commit(wl_egl_surface, wl_egl_buffer->bo_name);
+ _update_last_release_time(wl_egl_surface);
TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
TRACE_ASYNC_END((intptr_t)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
wl_egl_buffer->need_to_commit = TPL_FALSE;
wl_egl_buffer->status = COMMITTED;
+ _elapsed_between_status(wl_egl_buffer, VBL_TO_CMT);
+ _update_last_commit_time(wl_egl_surface);
+
+ wl_egl_surface->last_commit_bo = wl_egl_buffer->bo_name;
TPL_LOG_T("WL_EGL",
"[COMMIT] wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p) bo(%d)",