tpl_wl_egl_display_t *wl_egl_display;
tpl_surface_t *tpl_surface;
- /* wl_egl_buffer array for buffer tracing */
- tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE];
+ /* wl_egl_buffer list for buffer tracing */
+ tpl_list_t *buffers;
int buffer_cnt; /* the number of using wl_egl_buffers */
tpl_gmutex buffers_mutex;
tbm_surface_h last_enq_buffer;
int width, height; /* size to attach to wl_surface */
buffer_status_t status; /* for tracing buffer status */
- int idx; /* position index in buffers array of wl_egl_surface */
/* for damage region */
int num_rects;
wl_egl_surface->presentation_sync.fd = -1;
wl_egl_surface->sent_message = NONE_MESSAGE;
-
- {
- int i = 0;
- for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
- wl_egl_surface->buffers[i] = NULL;
- wl_egl_surface->buffer_cnt = 0;
- }
-
wl_egl_surface->last_enq_buffer = NULL;
+ wl_egl_surface->buffers = __tpl_list_alloc();
+
{
struct tizen_private *tizen_private = NULL;
tpl_bool_t need_to_release = TPL_FALSE;
tpl_bool_t need_to_cancel = TPL_FALSE;
buffer_status_t status = RELEASED;
+ int buffer_cnt = 0;
int idx = 0;
tpl_gthread_pause_in_idle(wl_egl_display->thread);
- TPL_INFO("[BUFFER_CLEAR]", "BEGIN | wl_egl_surface(%p)", wl_egl_surface);
+ buffer_cnt = __tpl_list_get_count(wl_egl_surface->buffers);
+ TPL_INFO("[BUFFER_CLEAR]", "BEGIN | wl_egl_surface(%p) buffer_cnt(%d)",
+ wl_egl_surface, buffer_cnt);
- while (wl_egl_surface->buffer_cnt) {
- wl_egl_buffer = wl_egl_surface->buffers[idx];
-
- if (wl_egl_buffer) {
- wl_egl_surface->buffers[idx] = NULL;
- wl_egl_surface->buffer_cnt--;
- } else {
- idx++;
- continue;
- }
+ while (!__tpl_list_is_empty(wl_egl_surface->buffers)) {
+ wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(wl_egl_surface->buffers,
+ NULL);
tpl_gmutex_lock(&wl_egl_buffer->mutex);
status = wl_egl_buffer->status;
- TPL_INFO("[BUFFER]","idx(%d)| wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
- idx, wl_egl_buffer,
+ TPL_INFO("[BUFFER]","[%d/%d]| wl_egl_buffer(%p) tbm_surface(%p) status(%s)",
+ ++idx, buffer_cnt, wl_egl_buffer,
wl_egl_buffer->tbm_surface,
status_to_string[status]);
if (need_to_release || need_to_cancel)
tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
-
- idx++;
}
TPL_INFO("[BUFFER_CLEAR]", "END | wl_egl_surface(%p)", wl_egl_surface);
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
- _print_buffer_lists(wl_egl_surface);
-
if (wl_egl_surface->wl_egl_window) {
struct tizen_private *tizen_private = NULL;
struct wl_egl_window *wl_egl_window = wl_egl_surface->wl_egl_window;
wl_egl_surface->wl_egl_display = NULL;
wl_egl_surface->tpl_surface = NULL;
+ tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+ __tpl_list_free(wl_egl_surface->buffers, NULL);
+ wl_egl_surface->buffers = NULL;
+ tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+ tpl_gmutex_clear(&wl_egl_surface->buffers_mutex);
+
tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex);
tpl_gmutex_clear(&wl_egl_surface->commit_sync.mutex);
return TPL_ERROR_INVALID_OPERATION;
}
- {
- int i;
- tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
- for (i = 0; i < BUFFER_ARRAY_SIZE; i++) {
- buffer_status_t status;
- wl_egl_buffer = wl_egl_surface->buffers[i];
- if (wl_egl_buffer) {
- status = wl_egl_buffer->status;
- } else {
- continue;
- }
+ while (!__tpl_list_is_empty(wl_egl_surface->buffers)) {
+ tpl_bool_t need_to_release = TPL_FALSE;
+ tpl_wl_egl_buffer_t *wl_egl_buffer =
+ (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(wl_egl_surface->buffers,
+ NULL);
+ need_to_release = (wl_egl_buffer->status >= ACQUIRED) &&
+ (wl_egl_buffer->status <= COMMITTED);
- if (status > ENQUEUED && status <= COMMITTED) {
- tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
- wl_egl_buffer->tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
- wl_egl_buffer->tbm_surface, tsq_err);
- tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
- }
+ if (need_to_release) {
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+ wl_egl_buffer->tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ wl_egl_buffer->tbm_surface, tsq_err);
+ tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
}
}
tpl_gcond_init(&wl_egl_buffer->cond);
tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
- {
- int i;
- for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
- if (wl_egl_surface->buffers[i] == NULL) break;
-
- /* If this exception is reached,
- * it may be a critical memory leak problem. */
- if (i == BUFFER_ARRAY_SIZE) {
- tpl_wl_egl_buffer_t *evicted_buffer = NULL;
- int evicted_idx = 0; /* evict the frontmost buffer */
-
- evicted_buffer = wl_egl_surface->buffers[evicted_idx];
-
- TPL_WARN("wl_egl_surface(%p) buffers array is full. evict one.",
- wl_egl_surface);
- TPL_WARN("evicted buffer (%p) tbm_surface(%p) status(%s)",
- evicted_buffer, evicted_buffer->tbm_surface,
- status_to_string[evicted_buffer->status]);
-
- /* [TODO] need to think about whether there will be
- * better modifications */
- wl_egl_surface->buffer_cnt--;
- wl_egl_surface->buffers[evicted_idx] = NULL;
-
- i = evicted_idx;
- }
-
- wl_egl_surface->buffer_cnt++;
- wl_egl_surface->buffers[i] = wl_egl_buffer;
- wl_egl_buffer->idx = i;
- }
+ __tpl_list_push_back(wl_egl_surface->buffers, (void *)wl_egl_buffer);
tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
TPL_INFO("[WL_EGL_BUFFER_CREATE]",
wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);
tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
- if (wl_egl_buffer->idx >= 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) {
- wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL;
- wl_egl_surface->buffer_cnt--;
-
- wl_egl_buffer->idx = -1;
+ if (wl_egl_surface->buffers) {
+ __tpl_list_remove_data(wl_egl_surface->buffers, (void *)wl_egl_buffer,
+ TPL_FIRST, NULL);
}
tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
- if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
- tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
- __tpl_list_remove_data(wl_egl_surface->vblank->waiting_buffers,
- (void *)wl_egl_buffer,
- TPL_FIRST,
- NULL);
- tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
- }
-
if (wl_egl_display) {
if (wl_egl_buffer->wl_buffer) {
wayland_tbm_client_destroy_buffer(wl_egl_display->wl_tbm_client,
static void
_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
{
+ tpl_list_node_t *node = NULL;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ int buffer_cnt = 0;
int idx = 0;
tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+ buffer_cnt = __tpl_list_get_count(wl_egl_surface->buffers);
TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)",
- wl_egl_surface, wl_egl_surface->buffer_cnt);
- for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
- tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
- if (wl_egl_buffer) {
- TPL_INFO("[INFO]",
- "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
- idx, wl_egl_buffer, wl_egl_buffer->tbm_surface,
- wl_egl_buffer->bo_name,
- status_to_string[wl_egl_buffer->status]);
- }
- }
+ wl_egl_surface, buffer_cnt);
+
+ node = __tpl_list_get_front_node(wl_egl_surface->buffers);
+ do {
+ if (!node) break;
+ wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_node_get_data(node);
+ TPL_INFO("[INFO]",
+ "[%d/%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%s)",
+ ++idx, buffer_cnt, wl_egl_buffer, wl_egl_buffer->tbm_surface,
+ wl_egl_buffer->bo_name,
+ status_to_string[wl_egl_buffer->status]);
+ } while ((node = __tpl_list_node_next(node)));
tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
}
static tpl_bool_t
_check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface)
{
- int idx = 0;
+ tpl_list_node_t *node = NULL;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
tpl_bool_t ret = TPL_FALSE;
/* silent return */
return ret;
tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
- for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
- tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
- if (wl_egl_buffer && wl_egl_buffer->tbm_surface == tbm_surface) {
+ node = __tpl_list_get_front_node(wl_egl_surface->buffers);
+ do {
+ if (!node) break;
+ wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_node_get_data(node);
+ if (wl_egl_buffer->tbm_surface == tbm_surface) {
ret = TPL_TRUE;
break;
}
- }
+ } while ((node = __tpl_list_node_next(node)));
- if (ret == TPL_FALSE || idx == BUFFER_ARRAY_SIZE) {
+ if (ret == TPL_FALSE) {
TPL_ERR("tbm_surface(%p) is not owned by wl_egl_surface(%p)",
tbm_surface, wl_egl_surface);
}
+
tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
return ret;