tdm_client_vblank *tdm_vblank;
tpl_wl_egl_surface_t *wl_egl_surface;
tpl_list_t *waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
+ tpl_gmutex mutex;
};
typedef enum buffer_status {
wl_egl_surface->tss_flusher = NULL;
}
#endif
+
+ if (wl_egl_surface->tbm_queue) {
+ TPL_INFO("[TBM_QUEUE_DESTROY]",
+ "wl_egl_surface(%p) tbm_queue(%p)",
+ wl_egl_surface, wl_egl_surface->tbm_queue);
+ tbm_surface_queue_destroy(wl_egl_surface->tbm_queue);
+ wl_egl_surface->tbm_queue = NULL;
+ }
+
if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
+ tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
__tpl_list_free(wl_egl_surface->vblank->waiting_buffers, NULL);
wl_egl_surface->vblank->waiting_buffers = NULL;
+ tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
}
if (wl_egl_surface->vblank) {
wl_egl_surface->vblank = NULL;
}
- if (wl_egl_surface->tbm_queue) {
- TPL_INFO("[TBM_QUEUE_DESTROY]",
- "wl_egl_surface(%p) tbm_queue(%p)",
- wl_egl_surface, wl_egl_surface->tbm_queue);
- tbm_surface_queue_destroy(wl_egl_surface->tbm_queue);
- wl_egl_surface->tbm_queue = NULL;
- }
-
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
tdm_client_vblank_destroy(vblank->tdm_vblank);
vblank->tdm_vblank = NULL;
vblank->wl_egl_surface = NULL;
+ tpl_gmutex_clear(&vblank->mutex);
free(vblank);
} else {
vblank->waiting_buffers = __tpl_list_alloc();
vblank->wl_egl_surface = wl_egl_surface;
+ tpl_gmutex_init(&vblank->mutex);
__tpl_list_push_back(wl_egl_display->tdm.surface_vblanks,
(void *)vblank);
if (wl_egl_surface->vblank == NULL || wl_egl_surface->vblank_done)
_thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
- else
+ else {
+ tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
__tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers,
wl_egl_buffer);
+ tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+ }
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
ready_to_commit = TPL_TRUE;
else {
wl_egl_buffer->status = WAITING_VBLANK;
+ tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
__tpl_list_push_back(wl_egl_surface->vblank->waiting_buffers, wl_egl_buffer);
+ tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
ready_to_commit = TPL_FALSE;
}
}
tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
- while (!__tpl_list_is_empty(wl_egl_surface->vblank->waiting_buffers)) {
+ tpl_bool_t is_empty = TPL_TRUE;
+ do {
+ tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
wl_egl_buffer = (tpl_wl_egl_buffer_t *)__tpl_list_pop_front(
wl_egl_surface->vblank->waiting_buffers,
NULL);
- if (wl_egl_buffer)
- _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
+ is_empty = __tpl_list_is_empty(wl_egl_surface->vblank->waiting_buffers);
+ tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+
+ if (!wl_egl_buffer) break;
+
+ _thread_wl_surface_commit(wl_egl_surface, wl_egl_buffer);
- /* If tdm error such as TIEMOUT occured,
+ /* If tdm error such as TIMEOUT occured,
* flush all vblank waiting buffers of its wl_egl_surface.
* Otherwise, only one wl_egl_buffer will be commited per one vblank event.
*/
if (error == TDM_ERROR_NONE) break;
- }
+ } while (!is_empty);
}
tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
}
}
tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex);
- tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
- __tpl_list_remove_data(wl_egl_surface->vblank->waiting_buffers,
- (void *)wl_egl_buffer,
- TPL_FIRST,
- NULL);
- tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+ if (wl_egl_surface->vblank && wl_egl_surface->vblank->waiting_buffers) {
+ tpl_gmutex_lock(&wl_egl_surface->vblank->mutex);
+ __tpl_list_remove_data(wl_egl_surface->vblank->waiting_buffers,
+ (void *)wl_egl_buffer,
+ TPL_FIRST,
+ NULL);
+ tpl_gmutex_unlock(&wl_egl_surface->vblank->mutex);
+ }
if (wl_egl_display) {
if (wl_egl_buffer->wl_buffer) {