/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
#define CLIENT_QUEUE_SIZE 3
+#define BUFFER_ARRAY_SIZE (CLIENT_QUEUE_SIZE * 2)
typedef struct _tpl_wl_egl_display tpl_wl_egl_display_t;
typedef struct _tpl_wl_egl_surface tpl_wl_egl_surface_t;
tpl_wl_egl_display_t *wl_egl_display;
tpl_surface_t *tpl_surface;
- /* the lists for buffer tracing */
- tpl_list_t *committed_buffers; /* Trace tbm_surface from wl_surface_commit() to RELEASE */
- tpl_list_t *in_use_buffers; /* Trace tbm_surface from DEQUEUE to ENQUEUE */
- tpl_list_t *fence_waiting_buffers; /* Trace buffers from ENQUEUE to fence signaled */
+ /* wl_egl_buffer array for buffer tracing */
+ tpl_wl_egl_buffer_t *buffers[BUFFER_ARRAY_SIZE];
+ int buffer_cnt; /* the number of using wl_egl_buffers */
+ tpl_gmutex buffers_mutex;
+
tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */
tpl_bool_t set_serial_is_used;
};
+typedef enum buffer_status {
+ RELEASED = 0,
+ DEQUEUED,
+ ENQUEUED,
+ ACQUIRED,
+ WAITING_SIGNALED,
+ WAITING_VBLANK,
+ COMMITTED,
+} buffer_status_t;
+
struct _tpl_wl_egl_buffer {
tbm_surface_h tbm_surface;
int dx, dy; /* position to attach to wl_surface */
int width, height; /* size to attach to wl_surface */
+ buffer_status_t status; /* for tracing buffer status */
+ int idx; /* position index in buffers array of wl_egl_surface */
+
/* for damage region */
int num_rects;
int *rects;
/* for checking need_to_commit (frontbuffer mode) */
tpl_bool_t need_to_commit;
- /* for checking need to release */
- tpl_bool_t need_to_release;
-
/* for checking draw done */
tpl_bool_t draw_done;
tpl_gsource *waiting_source;
+ tpl_gmutex mutex;
+ tpl_gcond cond;
+
tpl_wl_egl_surface_t *wl_egl_surface;
};
-
-static void
-__cb_buffer_remove_from_list(void *data);
static int
_get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
static void
}
wl_egl_display->ev_queue = wl_display_create_queue(wl_egl_display->wl_display);
- if (wl_egl_display->ev_queue) {
+ if (!wl_egl_display->ev_queue) {
TPL_ERR("Failed to create wl_queue wl_display(%p)",
wl_egl_display->wl_display);
result = TPL_ERROR_INVALID_OPERATION;
tpl_gmutex_unlock(&wl_egl_surface->presentation_sync.mutex);
- if (wl_egl_surface->in_use_buffers) {
- __tpl_list_free(wl_egl_surface->in_use_buffers,
- (tpl_free_func_t)__cb_buffer_remove_from_list);
- wl_egl_surface->in_use_buffers = NULL;
- }
- if (wl_egl_surface->committed_buffers) {
- while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) {
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- tbm_surface_h tbm_surface =
- __tpl_list_pop_front(wl_egl_surface->committed_buffers,
- (tpl_free_func_t)__cb_buffer_remove_from_list);
+ {
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ int idx = 0;
+ tpl_bool_t need_to_release = TPL_FALSE;
+ tpl_bool_t need_to_cancel = TPL_FALSE;
+
+ while (wl_egl_surface->buffer_cnt) {
+ tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+ wl_egl_buffer = wl_egl_surface->buffers[idx];
+ if (wl_egl_buffer) {
+ TPL_DEBUG("[idx:%d] wl_egl_buffer(%p) tbm_surface(%p) status(%d)",
+ idx, wl_egl_buffer,
+ wl_egl_buffer->tbm_surface, wl_egl_buffer->status);
+
+ wl_egl_surface->buffers[idx] = NULL;
+ wl_egl_surface->buffer_cnt--;
+ } else {
+ tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+ idx++;
+ continue;
+ }
+ tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
- tbm_surface, tsq_err);
- }
- __tpl_list_free(wl_egl_surface->committed_buffers, NULL);
- wl_egl_surface->committed_buffers = NULL;
- }
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
- if (wl_egl_surface->vblank_waiting_buffers) {
- while (!__tpl_list_is_empty(wl_egl_surface->vblank_waiting_buffers)) {
- tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- tbm_surface_h tbm_surface =
- __tpl_list_pop_front(wl_egl_surface->vblank_waiting_buffers,
- (tpl_free_func_t)__cb_buffer_remove_from_list);
+ need_to_release = (wl_egl_buffer->status == ACQUIRED ||
+ wl_egl_buffer->status == WAITING_SIGNALED ||
+ wl_egl_buffer->status == WAITING_VBLANK ||
+ wl_egl_buffer->status == COMMITTED);
+
+ need_to_cancel = wl_egl_buffer->status == DEQUEUED;
- tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
+ if (wl_egl_buffer->status == WAITING_SIGNALED)
+ tpl_gcond_wait(&wl_egl_buffer->cond, &wl_egl_buffer->mutex);
+
+ if (need_to_release) {
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+ wl_egl_buffer->tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
- tbm_surface, tsq_err);
- }
- __tpl_list_free(wl_egl_surface->vblank_waiting_buffers, NULL);
- wl_egl_surface->vblank_waiting_buffers = NULL;
- }
+ wl_egl_buffer->tbm_surface, tsq_err);
+ }
- if (wl_egl_surface->fence_waiting_buffers) {
- while (!__tpl_list_is_empty(wl_egl_surface->fence_waiting_buffers)) {
- tbm_surface_h tbm_surface =
- __tpl_list_pop_front(wl_egl_surface->fence_waiting_buffers,
- NULL);
- /* TODO */
+ if (need_to_cancel) {
+ tsq_err = tbm_surface_queue_cancel_dequeue(wl_egl_surface->tbm_queue,
+ wl_egl_buffer->tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release tbm_surface(%p) tsq_err(%d)",
+ wl_egl_buffer->tbm_surface, tsq_err);
+ }
+
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+ if (need_to_release || need_to_cancel)
+ tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
+
+ idx++;
}
}
wl_egl_surface->presentation_sync.fd = -1;
{
+ int i = 0;
+ for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
+ wl_egl_surface->buffers[i] = NULL;
+ wl_egl_surface->buffer_cnt = 0;
+ }
+
+ {
struct tizen_private *tizen_private = NULL;
if (wl_egl_window->driver_private)
tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex);
tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex);
+ tpl_gmutex_init(&wl_egl_surface->buffers_mutex);
+
tpl_gmutex_init(&wl_egl_surface->surf_mutex);
tpl_gcond_init(&wl_egl_surface->surf_cond);
}
}
- wl_egl_surface->committed_buffers = __tpl_list_alloc();
- wl_egl_surface->in_use_buffers = __tpl_list_alloc();
- wl_egl_surface->fence_waiting_buffers = __tpl_list_alloc();
wl_egl_surface->vblank_waiting_buffers = __tpl_list_alloc();
wl_egl_surface->presentation_feedbacks = __tpl_list_alloc();
}
return TPL_ERROR_INVALID_OPERATION;
}
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- if (wl_egl_surface->committed_buffers) {
- while (!__tpl_list_is_empty(wl_egl_surface->committed_buffers)) {
- tbm_surface_h tbm_surface =
- __tpl_list_pop_front(wl_egl_surface->committed_buffers,
- (tpl_free_func_t)__cb_buffer_remove_from_list);
- TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
- _get_tbm_surface_bo_name(tbm_surface));
- tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue, tbm_surface);
- if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
- TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
- tbm_surface, tsq_err);
+ {
+ int i;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
+ for (i = 0; i < BUFFER_ARRAY_SIZE; i++) {
+ tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+ wl_egl_buffer = wl_egl_surface->buffers[i];
+ tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+ if (wl_egl_buffer && wl_egl_buffer->status == COMMITTED) {
+ wl_egl_buffer->status = RELEASED;
+ tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
+ wl_egl_buffer->tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+ TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+ wl_egl_buffer->tbm_surface, tsq_err);
+ tbm_surface_internal_unref(wl_egl_buffer->tbm_surface);
+ }
}
}
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
TPL_INFO("[FORCE_FLUSH]",
"wl_egl_surface(%p) tbm_queue(%p)",
wl_egl_buffer->tbm_surface = tbm_surface;
wl_egl_buffer->wl_egl_surface = wl_egl_surface;
+ wl_egl_buffer->status = RELEASED;
+
wl_egl_buffer->dx = wl_egl_window->dx;
wl_egl_buffer->dy = wl_egl_window->dy;
wl_egl_buffer->width = tbm_surface_get_width(tbm_surface);
wl_egl_buffer->height = tbm_surface_get_height(tbm_surface);
+ tpl_gmutex_init(&wl_egl_buffer->mutex);
+ tpl_gcond_init(&wl_egl_buffer->cond);
+
+ tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+ {
+ int i;
+ for (i = 0; i < BUFFER_ARRAY_SIZE; i++)
+ if (wl_egl_surface->buffers[i] == NULL) break;
+
+ wl_egl_surface->buffer_cnt++;
+ wl_egl_surface->buffers[i] = wl_egl_buffer;
+ wl_egl_buffer->idx = i;
+ }
+ tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
TPL_INFO("[WL_EGL_BUFFER_CREATE]",
"wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)",
wl_egl_surface, wl_egl_buffer, tbm_surface,
wl_egl_buffer = _wl_egl_buffer_create(wl_egl_surface, tbm_surface);
TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer, "Failed to create/get wl_egl_buffer.");
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+ wl_egl_buffer->status = DEQUEUED;
+
/* If wl_egl_buffer->release_fence_fd is -1,
* the tbm_surface can be used immediately.
* If not, user(EGL) have to wait until signaled. */
TPL_LOG_T("WL_EGL", "[DEQ] wl_egl_buffer(%p) tbm_surface(%p) bo(%d) fence(%d)",
wl_egl_buffer, tbm_surface, bo_name, release_fence ? *release_fence : -1);
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
tpl_gmutex_unlock(&wl_egl_display->wl_event_mutex);
return tbm_surface;
tpl_wl_egl_surface_t *wl_egl_surface =
(tpl_wl_egl_surface_t *)surface->backend.data;
+ tpl_wl_egl_buffer_t *wl_egl_buffer = NULL;
tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
if (!tbm_surface_internal_is_valid(tbm_surface)) {
return TPL_ERROR_INVALID_PARAMETER;
}
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- /* Stop tracking of this canceled tbm_surface */
- __tpl_list_remove_data(wl_egl_surface->in_use_buffers,
- (void *)tbm_surface, TPL_FIRST, NULL);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+ if (wl_egl_buffer) {
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+ wl_egl_buffer->status = RELEASED;
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+ }
tbm_surface_internal_unref(tbm_surface);
wl_egl_surface, tbm_surface, bo_name, acquire_fence);
wl_egl_buffer = _get_wl_egl_buffer(tbm_surface);
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
/* If there are received region information, save it to wl_egl_buffer */
if (num_rects && rects) {
if (!wl_egl_buffer->rects) {
TPL_ERR("Failed to allocate memory fo damage rects info.");
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
return TPL_ERROR_OUT_OF_MEMORY;
}
TPL_WARN("[ENQ_SKIP][Frontbuffer:%s] tbm_surface(%p) need not to enqueue",
((surface->frontbuffer == tbm_surface) ? "ON" : "OFF"), tbm_surface);
TRACE_ASYNC_END((int)tbm_surface, "[DEQ]~[ENQ] BO_NAME:%d", bo_name);
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
return TPL_ERROR_NONE;
}
if (wl_egl_buffer->acquire_fence_fd != -1)
close(wl_egl_buffer->acquire_fence_fd);
-
+
wl_egl_buffer->acquire_fence_fd = acquire_fence;
+ wl_egl_buffer->status = ENQUEUED;
+
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
tsq_err = tbm_surface_queue_enqueue(wl_egl_surface->tbm_queue,
tbm_surface);
TRACE_ASYNC_END(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
wl_egl_buffer->acquire_fence_fd);
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+ tpl_gcond_signal(&wl_egl_buffer->cond);
+ wl_egl_buffer->status = WAITING_VBLANK;
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- __tpl_list_remove_data(wl_egl_surface->fence_waiting_buffers,
- (void *)tbm_surface, TPL_FIRST, NULL);
if (wl_egl_surface->vblank_done)
_thread_wl_surface_commit(wl_egl_surface, tbm_surface);
TPL_CHECK_ON_FALSE_ASSERT_FAIL(wl_egl_buffer != NULL,
"wl_egl_buffer sould be not NULL");
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
+ wl_egl_buffer->status = ACQUIRED;
+
if (wl_egl_buffer->wl_buffer == NULL) {
tpl_wl_egl_display_t *wl_egl_display = wl_egl_surface->wl_egl_display;
wl_egl_buffer->wl_buffer =
tpl_gsource_create(wl_egl_display->thread, wl_egl_buffer,
wl_egl_buffer->acquire_fence_fd, &buffer_funcs,
SOURCE_TYPE_DISPOSABLE);
-
- __tpl_list_push_back(wl_egl_surface->fence_waiting_buffers, tbm_surface);
+ wl_egl_buffer->status = WAITING_SIGNALED;
TRACE_ASYNC_BEGIN(wl_egl_buffer->acquire_fence_fd, "FENCE WAIT fd(%d)",
wl_egl_buffer->acquire_fence_fd);
if (wl_egl_surface->vblank_done)
ready_to_commit = TPL_TRUE;
else {
+ wl_egl_buffer->status = WAITING_VBLANK;
__tpl_list_push_back(wl_egl_surface->vblank_waiting_buffers, tbm_surface);
ready_to_commit = TPL_FALSE;
}
}
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
if (ready_to_commit)
_thread_wl_surface_commit(wl_egl_surface, tbm_surface);
}
tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
tbm_surface_h tbm_surface = NULL;
- if (wl_egl_buffer)
- tbm_surface = wl_egl_buffer->tbm_surface;
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
+
+ tbm_surface = wl_egl_buffer->tbm_surface;
if (tbm_surface_internal_is_valid(tbm_surface)) {
- if (wl_egl_buffer->need_to_release) {
+ if (wl_egl_buffer->status == COMMITTED) {
tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
tbm_surface_queue_error_e tsq_err;
- if (wl_egl_surface->committed_buffers) {
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- __tpl_list_remove_data(wl_egl_surface->committed_buffers,
- (void *)tbm_surface,
- TPL_FIRST, NULL);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- }
-
- wl_egl_buffer->need_to_release = TPL_FALSE;
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
wl_egl_buffer->buffer_release = NULL;
wl_egl_buffer->release_fence_fd = fence;
+ wl_egl_buffer->status = RELEASED;
TRACE_MARK("[FENCED_RELEASE] BO(%d) fence(%d)",
_get_tbm_surface_bo_name(tbm_surface),
_get_tbm_surface_bo_name(tbm_surface),
fence);
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
tbm_surface_h tbm_surface = NULL;
- if (wl_egl_buffer)
- tbm_surface = wl_egl_buffer->tbm_surface;
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer);
+
+ tbm_surface = wl_egl_buffer->tbm_surface;
if (tbm_surface_internal_is_valid(tbm_surface)) {
- if (wl_egl_buffer->need_to_release) {
+ if (wl_egl_buffer->status == COMMITTED) {
tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
tbm_surface_queue_error_e tsq_err;
- if (wl_egl_surface->committed_buffers) {
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- __tpl_list_remove_data(wl_egl_surface->committed_buffers,
- (void *)tbm_surface,
- TPL_FIRST, NULL);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- }
-
- wl_egl_buffer->need_to_release = TPL_FALSE;
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
zwp_linux_buffer_release_v1_destroy(wl_egl_buffer->buffer_release);
wl_egl_buffer->buffer_release = NULL;
wl_egl_buffer->release_fence_fd = -1;
+ wl_egl_buffer->status = RELEASED;
TRACE_MARK("[IMMEDIATE_RELEASE] BO(%d)",
_get_tbm_surface_bo_name(tbm_surface));
wl_egl_buffer->wl_buffer, tbm_surface,
_get_tbm_surface_bo_name(tbm_surface));
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
tpl_wl_egl_buffer_t *wl_egl_buffer = (tpl_wl_egl_buffer_t *)data;
tbm_surface_h tbm_surface = NULL;
- if (wl_egl_buffer)
- tbm_surface = wl_egl_buffer->tbm_surface;
+ TPL_CHECK_ON_NULL_RETURN(wl_egl_buffer)
+
+ tbm_surface = wl_egl_buffer->tbm_surface;
if (tbm_surface_internal_is_valid(tbm_surface)) {
- if (wl_egl_buffer->need_to_release) {
- tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
- tbm_surface_queue_error_e tsq_err;
+ tpl_wl_egl_surface_t *wl_egl_surface = wl_egl_buffer->wl_egl_surface;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+
+ tpl_gmutex_lock(&wl_egl_buffer->mutex);
+
+ if (wl_egl_buffer->status == COMMITTED) {
tsq_err = tbm_surface_queue_release(wl_egl_surface->tbm_queue,
tbm_surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
TPL_ERR("tbm_surface(%p) tsq_err(%d)", tbm_surface, tsq_err);
- if (wl_egl_surface->committed_buffers) {
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- __tpl_list_remove_data(wl_egl_surface->committed_buffers,
- (void *)tbm_surface,
- TPL_FIRST, NULL);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
- }
-
- wl_egl_buffer->need_to_release = TPL_FALSE;
+ wl_egl_buffer->status = RELEASED;
TRACE_MARK("[RELEASE] BO(%d)", _get_tbm_surface_bo_name(tbm_surface));
TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
TPL_LOG_T("WL_EGL", "[REL] wl_buffer(%p) tbm_surface(%p) bo(%d)",
wl_egl_buffer->wl_buffer, tbm_surface,
_get_tbm_surface_bo_name(tbm_surface));
+ }
+ tpl_gmutex_unlock(&wl_egl_buffer->mutex);
+
+ if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE)
tbm_surface_internal_unref(tbm_surface);
- }
} else {
TPL_ERR("Invalid parameter | tbm_surface(%p)", tbm_surface);
}
(void *)wl_egl_buffer->wl_buffer,
wl_egl_buffer->serial);
- wl_egl_buffer->need_to_release = TPL_TRUE;
-
if (wl_egl_display->use_explicit_sync &&
wl_egl_surface->surface_sync) {
TRACE_ASYNC_BEGIN((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
_get_tbm_surface_bo_name(tbm_surface));
- wl_egl_buffer->need_to_commit = TPL_FALSE;
+ wl_egl_buffer->need_to_commit = TPL_FALSE;
+ wl_egl_buffer->status = COMMITTED;
TPL_LOG_T("WL_EGL", "[COMMIT] wl_buffer(%p) tbm_surface(%p) bo(%d)",
wl_egl_buffer->wl_buffer, tbm_surface,
_thread_surface_vblank_wait(wl_egl_surface) != TPL_ERROR_NONE)
TPL_ERR("Failed to set wait vblank.");
- if (wl_egl_surface->committed_buffers) {
- __tpl_list_push_back(wl_egl_surface->committed_buffers, tbm_surface);
- }
-
tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex);
if (wl_egl_buffer->commit_sync_fd != -1) {
TPL_INFO("[BUFFER_FREE]", "wl_egl_buffer(%p) wl_buffer(%p) tbm_surface(%p)",
wl_egl_buffer, wl_egl_buffer->wl_buffer, wl_egl_buffer->tbm_surface);
+ tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+ if (wl_egl_buffer->idx > 0 && wl_egl_surface->buffers[wl_egl_buffer->idx]) {
+ wl_egl_surface->buffers[wl_egl_buffer->idx] = NULL;
+ wl_egl_surface->buffer_cnt--;
+
+ wl_egl_buffer->idx = -1;
+ }
+ tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
+
wl_display_flush(wl_egl_display->wl_display);
if (wl_egl_buffer->wl_buffer)
free(wl_egl_buffer);
}
-static void
-__cb_buffer_remove_from_list(void *data)
-{
- tbm_surface_h tbm_surface = (tbm_surface_h)data;
-
- if (tbm_surface && tbm_surface_internal_is_valid(tbm_surface))
- tbm_surface_internal_unref(tbm_surface);
-}
-
static int
_get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
{
static void
_print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
{
- int count = 0;
- int idx = 0;
- tpl_list_node_t *node = NULL;
- tbm_surface_h tbm_surface = NULL;
-
- /* vblank waiting list */
- count = __tpl_list_get_count(wl_egl_surface->vblank_waiting_buffers);
- TPL_DEBUG("VBLANK WAITING BUFFERS | wl_egl_surface(%p) list(%p) count(%d)",
- wl_egl_surface, wl_egl_surface->vblank_waiting_buffers, count);
-
- while ((!node &&
- (node = __tpl_list_get_front_node(wl_egl_surface->vblank_waiting_buffers))) ||
- (node && (node = __tpl_list_node_next(node)))) {
- tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
- TPL_DEBUG("VBLANK WAITING BUFFERS | %d | tbm_surface(%p) bo(%d)",
- idx, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
- idx++;
- }
-
- idx = 0;
- node = NULL;
-
- /* in use buffers list */
- count = __tpl_list_get_count(wl_egl_surface->in_use_buffers);
- TPL_DEBUG("DEQUEUED BUFFERS | wl_egl_surface(%p) list(%p) count(%d)",
- wl_egl_surface, wl_egl_surface->in_use_buffers, count);
-
- while ((!node &&
- (node = __tpl_list_get_front_node(wl_egl_surface->in_use_buffers))) ||
- (node && (node = __tpl_list_node_next(node)))) {
- tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
- TPL_DEBUG("DEQUEUED BUFFERS | %d | tbm_surface(%p) bo(%d)",
- idx, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
- idx++;
- }
-
- idx = 0;
- node = NULL;
-
- /* committed buffers list */
- count = __tpl_list_get_count(wl_egl_surface->committed_buffers);
- TPL_DEBUG("COMMITTED BUFFERS | wl_egl_surface(%p) list(%p) count(%d)",
- wl_egl_surface, wl_egl_surface->committed_buffers, count);
-
- while ((!node &&
- (node = __tpl_list_get_front_node(wl_egl_surface->committed_buffers))) ||
- (node && (node = __tpl_list_node_next(node)))) {
- tbm_surface = (tbm_surface_h)__tpl_list_node_get_data(node);
- TPL_DEBUG("COMMITTED BUFFERS | %d | tbm_surface(%p) bo(%d)",
- idx, tbm_surface,
- _get_tbm_surface_bo_name(tbm_surface));
- idx++;
+ int idx = 0;
+
+ tpl_gmutex_lock(&wl_egl_surface->buffers_mutex);
+ TPL_INFO("[BUFFERS_INFO]", "wl_egl_surface(%p) buffer_cnt(%d)",
+ wl_egl_surface, wl_egl_surface->buffer_cnt);
+ for (idx = 0; idx < BUFFER_ARRAY_SIZE; idx++) {
+ tpl_wl_egl_buffer_t *wl_egl_buffer = wl_egl_surface->buffers[idx];
+ if (wl_egl_buffer) {
+ TPL_INFO("[INFO]",
+ "INDEX[%d] | wl_egl_buffer(%p) tbm_surface(%p) bo(%d) | status(%d)",
+ idx, wl_egl_buffer, wl_egl_buffer->tbm_surface,
+ _get_tbm_surface_bo_name(wl_egl_buffer->tbm_surface),
+ wl_egl_buffer->status);
+ }
}
+ tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
}