ENQUEUED, // 2
ACQUIRED, // 3
WAITING_SIGNALED, // 4
- WAITING_VBLANK, // 5
- COMMITTED, // 6
+ SIGNALED, // 5
+ WAITING_VBLANK, // 6
+ VBLANK_DONE, // 7
+ COMMITTED, // 8
} buffer_status_t;
-static const char *status_to_string[7] = {
- "RELEASED", // 0
- "DEQUEUED", // 1
- "ENQUEUED", // 2
- "ACQUIRED", // 3
- "WAITING_SIGNALED", // 4
- "WAITING_VBLANK", // 5
- "COMMITTED", // 6
+static struct buffer_status_info {
+ const char status_str[20];
+ uint32_t threshold_ms; /* The time limit takes to reach this status */
+} buffer_status_info[9] = {
+ { "RELEASED", UINT32_MAX }, /* COMMITTED ~ RELEASE will be not traced */
+ { "DEQUEUED", UINT32_MAX }, /* RELEASED ~ DEQUEUED will be not traced */
+ { "ENQEUEUD", 20 },
+ { "ACQUIRED", 10 },
+ { "WAIT_SIG", 10 },
+ { "SIGNALED", 20 },
+ { "WAIT_VBL", 10 },
+ { "DONE_VBL", 20 },
+ { "COMMITTED", 10 }
};
struct _tpl_wl_egl_buffer {
static void
_buffers_force_release(tpl_wl_egl_surface_t *wl_egl_surface);
-#define REL_TO_DEQ 30
-#define DEQ_TO_ENQ 20
-#define DEQ_TO_CAN 20
-#define ENQ_TO_ACQ 20
-#define ACQ_TO_SIG 10
-#define ACQ_TO_VBL 10
-#define SIG_TO_VBL 30
-#define VBL_TO_CMT 20
-#define CMT_TO_REL 50
-
#define RELEASE_FROM_LAST_COMMIT 20
#define DEQUEUE_FROM_LAST_RELEASE 20
if (wl_egl_buffer->begin_status != COMMITTED && gap > threshold) {
TPL_ERR("bo_name(%d) | %s ~ %s | takes too long time %ld > %ld",
wl_egl_buffer->bo_name,
- status_to_string[wl_egl_buffer->begin_status],
- status_to_string[wl_egl_buffer->status],
+ buffer_status_info[wl_egl_buffer->begin_status].status_str,
+ buffer_status_info[wl_egl_buffer->status].status_str,
gap, threshold);
}
wl_egl_buffer->begin.tv_sec = end.tv_sec;
TPL_INFO("[FORCE_RELEASE]", "wl_egl_buffer(%p) status(%s -> %s)",
wl_egl_buffer,
- status_to_string[status],
- status_to_string[RELEASED]);
+ buffer_status_info[status].status_str,
+ buffer_status_info[RELEASED].status_str);
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
if (status > DEQUEUED && status < COMMITTED) {
if (!wl_egl_buffer->release_pending) {
TPL_INFO("[RELEASE_PENDING]", "wl_egl_surface(%p) wl_egl_buffer(%p) status(%s)",
- wl_egl_surface, wl_egl_buffer, status_to_string[status]);
+ wl_egl_surface, wl_egl_buffer, buffer_status_info[status].status_str);
TPL_INFO("[RELEASE_PENDING]", "tbm_surface(%p) bo(%d)",
wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name);
wl_egl_buffer->release_pending = TPL_TRUE;
TPL_INFO("[RELEASE]", "wl_egl_buffer(%p) status(%s -> %s)",
wl_egl_buffer,
- status_to_string[status],
- status_to_string[RELEASED]);
+ buffer_status_info[status].status_str,
+ buffer_status_info[RELEASED].status_str);
wl_egl_buffer->status = RELEASED;
tpl_gmutex_lock(&wl_egl_buffer->mutex);
wl_egl_buffer->status = DEQUEUED;
- _elapsed_between_status(wl_egl_buffer, REL_TO_DEQ);
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
_elapsed_from_last_release(wl_egl_surface, wl_egl_buffer->bo_name);
/* If wl_egl_buffer->release_fence_fd is -1,
if (wl_egl_buffer) {
tpl_gmutex_lock(&wl_egl_buffer->mutex);
wl_egl_buffer->status = RELEASED;
- _elapsed_between_status(wl_egl_buffer, DEQ_TO_ENQ);
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
}
tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
wl_egl_buffer->status = ENQUEUED;
- _elapsed_between_status(wl_egl_buffer, DEQ_TO_ENQ);
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
TPL_LOG_T("WL_EGL",
"[ENQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
wl_egl_buffer, tbm_surface, bo_name, acquire_fence);
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
tpl_gmutex_lock(&wl_egl_buffer->mutex);
- wl_egl_buffer->status = WAITING_VBLANK;
- _elapsed_between_status(wl_egl_buffer, SIG_TO_VBL);
+ wl_egl_buffer->status = SIGNALED;
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
TPL_LOG_D("[FINALIZE]", "wl_egl_surface(%p) wl_egl_buffer(%p) wait_source(%p) fence_fd(%d)",
wl_egl_surface, wl_egl_buffer, wl_egl_buffer->waiting_source,
if (!wl_egl_surface->vblank_enable || wl_egl_surface->vblank_done) {
_thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
} else {
+ wl_egl_buffer->status = WAITING_VBLANK;
tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
__tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers,
wl_egl_buffer);
tpl_gmutex_lock(&wl_egl_buffer->mutex);
wl_egl_buffer->status = ACQUIRED;
- _elapsed_between_status(wl_egl_buffer, ENQ_TO_ACQ);
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
TPL_LOG_T("WL_EGL", "[ACQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
wl_egl_buffer, tbm_surface,
FD_TYPE_FENCE, &buffer_funcs,
SOURCE_TYPE_DISPOSABLE);
wl_egl_buffer->status = WAITING_SIGNALED;
- _elapsed_between_status(wl_egl_buffer, ACQ_TO_SIG);
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
wl_egl_buffer->acquire_fence_fd);
ready_to_commit = TPL_TRUE;
else {
wl_egl_buffer->status = WAITING_VBLANK;
- _elapsed_between_status(wl_egl_buffer, ACQ_TO_VBL);
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
__tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers, wl_egl_buffer);
tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
if (!wl_egl_buffer) break;
tpl_gmutex_lock(&wl_egl_buffer->mutex);
+ wl_egl_buffer->status = VBLANK_DONE;
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
_thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
tpl_gmutex_unlock(&wl_egl_buffer->mutex);
wl_egl_buffer->release_fence_fd = fence;
wl_egl_buffer->status = RELEASED;
- _elapsed_between_status(wl_egl_buffer, CMT_TO_REL);
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
wl_egl_surface->last_release_bo = wl_egl_buffer->bo_name;
_elapsed_from_last_commit(wl_egl_surface, wl_egl_buffer->bo_name);
_update_last_release_time(wl_egl_surface);
wl_egl_buffer->release_fence_fd = -1;
wl_egl_buffer->status = RELEASED;
- _elapsed_between_status(wl_egl_buffer, CMT_TO_REL);
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
wl_egl_surface->last_release_bo = wl_egl_buffer->bo_name;
_elapsed_from_last_commit(wl_egl_surface, wl_egl_buffer->bo_name);
_update_last_release_time(wl_egl_surface);
TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
wl_egl_buffer->status = RELEASED;
- _elapsed_between_status(wl_egl_buffer, CMT_TO_REL);
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
wl_egl_surface->last_release_bo = wl_egl_buffer->bo_name;
_elapsed_from_last_commit(wl_egl_surface, wl_egl_buffer->bo_name);
_update_last_release_time(wl_egl_surface);
wl_egl_buffer->need_to_commit = TPL_FALSE;
wl_egl_buffer->status = COMMITTED;
- _elapsed_between_status(wl_egl_buffer, VBL_TO_CMT);
+ _elapsed_between_status(wl_egl_buffer, buffer_status_info[wl_egl_buffer->status].threshold_ms);
_update_last_commit_time(wl_egl_surface);
wl_egl_surface->last_commit_bo = wl_egl_buffer->bo_name;
"[%d/%d] wl_egl_surface(%p), wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
++idx, buffer_cnt, wl_egl_surface, wl_egl_buffer,
wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name,
- status_to_string[wl_egl_buffer->status]);
+ buffer_status_info[wl_egl_buffer->status].status_str);
} while ((node = __tpl_list_node_next(node)));
tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex);
}