X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=src%2Ftbm_surface_queue.c;h=1d8cc99b226adce590ddd2e13da5446a833ce3d3;hb=757968fc683975d882aadf1b45a54e214bbd4783;hp=90fc623906bf0379c91aaaac8d6ad132a31a10a8;hpb=889749e503f081970159b47de21e308a8367ec38;p=platform%2Fcore%2Fuifw%2Flibtbm.git diff --git a/src/tbm_surface_queue.c b/src/tbm_surface_queue.c index 90fc623..1d8cc99 100644 --- a/src/tbm_surface_queue.c +++ b/src/tbm_surface_queue.c @@ -38,22 +38,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define DIRTY_QUEUE 2 #define NODE_LIST 4 -#define TBM_QUEUE_DEBUG 0 - -#ifdef TRACE -#define TBM_QUEUE_TRACE(fmt, ...) { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); } -#else -#define TBM_QUEUE_TRACE(fmt, ...) -#endif /* TRACE */ - -#if TBM_QUEUE_DEBUG -#define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue) -#define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue) -#else -#define TBM_LOCK() -#define TBM_UNLOCK() -#endif - static tbm_bufmgr g_surf_queue_bufmgr; static pthread_mutex_t tbm_surf_queue_lock; void _tbm_surface_queue_mutex_unlock(void); @@ -61,7 +45,8 @@ void _tbm_surface_queue_mutex_unlock(void); /* check condition */ #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\ if (!(cond)) {\ - TBM_LOG_E("'%s' failed.\n", #cond);\ + TBM_ERR("'%s' failed.\n", #cond);\ + _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\ _tbm_surf_queue_mutex_unlock();\ return;\ } \ @@ -69,7 +54,8 @@ void _tbm_surface_queue_mutex_unlock(void); #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\ if (!(cond)) {\ - TBM_LOG_E("'%s' failed.\n", #cond);\ + TBM_ERR("'%s' failed.\n", #cond);\ + _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\ _tbm_surf_queue_mutex_unlock();\ return val;\ } \ @@ -97,6 +83,8 @@ typedef struct { Queue_Node_Type type; unsigned int priv_flags; /*for each queue*/ + + int delete_pending; } queue_node; typedef struct { @@ -106,6 +94,13 @@ typedef struct { void *data; } queue_notify; +typedef struct { + struct list_head link; + + tbm_surface_queue_trace_cb cb; + void *data; +} queue_trace; + typedef struct _tbm_surface_queue_interface { void (*init)(tbm_surface_queue_h queue); void (*reset)(tbm_surface_queue_h queue); @@ -133,8 +128,10 @@ struct _tbm_surface_queue { struct list_head destory_noti; struct list_head dequeuable_noti; struct list_head dequeue_noti; + struct list_head can_dequeue_noti; struct list_head acquirable_noti; struct list_head reset_noti; + struct list_head trace_noti; pthread_mutex_t lock; pthread_cond_t free_cond; @@ -149,9 +146,11 @@ struct _tbm_surface_queue { void *alloc_cb_data; struct list_head item_link; /* link of surface queue */ -}; -/* LCOV_EXCL_START */ + int modes; + unsigned int enqueue_sync_count; + unsigned int acquire_sync_count; +}; static bool _tbm_surf_queue_mutex_init(void) @@ -162,7 +161,7 @@ _tbm_surf_queue_mutex_init(void) return true; if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) { - TBM_LOG_E("fail: tbm_surf_queue mutex init\n"); + TBM_ERR("fail: pthread_mutex_init\n"); return false; } @@ -174,8 +173,10 @@ _tbm_surf_queue_mutex_init(void) static void _tbm_surf_queue_mutex_lock(void) { - if (!_tbm_surf_queue_mutex_init()) + if (!_tbm_surf_queue_mutex_init()) { + TBM_ERR("fail: _tbm_surf_queue_mutex_init\n"); return; + } pthread_mutex_lock(&tbm_surf_queue_lock); } @@ -205,27 +206,33 @@ _deinit_tbm_surf_queue_bufmgr(void) static int _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue) { - tbm_surface_queue_h old_data; + tbm_surface_queue_h old_data = NULL; + + if (surface_queue == NULL) { + TBM_ERR("error: surface_queue is NULL.\n"); + return 0; + } - if (surface_queue == NULL || g_surf_queue_bufmgr == NULL) { - TBM_TRACE("error: surface_queue is NULL or not initialized\n"); + if (g_surf_queue_bufmgr == NULL) { + TBM_ERR("error: g_surf_queue_bufmgr is NULL.\n"); return 0; } if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) { - TBM_TRACE("error: surf_queue_list is empty\n"); + TBM_ERR("error: surf_queue_list is empty\n"); return 0; } LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list, item_link) { if (old_data == surface_queue) { - TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); return 1; } } - TBM_TRACE("error: Invalid tbm_surface_queue(%p)\n", surface_queue); + TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue); + return 0; } @@ -275,9 +282,12 @@ _queue_node_pop_front(queue *queue) { queue_node *node; + if (!queue->head.next) return NULL; + if (!queue->count) return NULL; + node = LIST_ENTRY(queue_node, queue->head.next, item_link); - LIST_DEL(&node->item_link); + LIST_DELINIT(&node->item_link); queue->count--; return node; @@ -286,7 +296,7 @@ _queue_node_pop_front(queue *queue) static queue_node * _queue_node_pop(queue *queue, queue_node *node) { - LIST_DEL(&node->item_link); + LIST_DELINIT(&node->item_link); queue->count--; return node; @@ -296,7 +306,7 @@ static queue_node * _queue_get_node(tbm_surface_queue_h surface_queue, int type, tbm_surface_h surface, int *out_type) { - queue_node *node; + queue_node *node = NULL; if (type == 0) type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST; @@ -338,6 +348,8 @@ _queue_get_node(tbm_surface_queue_h surface_queue, int type, } } + TBM_ERR("fail to get the queue_node.\n"); + return NULL; } @@ -386,7 +398,7 @@ static void _notify_remove(struct list_head *list, tbm_surface_queue_notify_cb cb, void *data) { - queue_notify *item, *tmp; + queue_notify *item = NULL, *tmp; LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) { if (item->cb == cb && item->data == data) { @@ -396,13 +408,13 @@ _notify_remove(struct list_head *list, } } - TBM_LOG_E("Cannot find notifiy\n"); + TBM_ERR("Cannot find notifiy\n"); } static void _notify_remove_all(struct list_head *list) { - queue_notify *item, *tmp; + queue_notify *item = NULL, *tmp; LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) { LIST_DEL(&item->link); @@ -414,16 +426,81 @@ static void _notify_emit(tbm_surface_queue_h surface_queue, struct list_head *list) { - queue_notify *item; + queue_notify *item = NULL, *tmp;; - LIST_FOR_EACH_ENTRY(item, list, link) + /* + The item->cb is the outside function of the libtbm. + The tbm user may/can remove the item of the list, + so we have to use the LIST_FOR_EACH_ENTRY_SAFE. + */ + LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) item->cb(surface_queue, item->data); } +static void +_trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb, + void *data) +{ + TBM_RETURN_IF_FAIL(cb != NULL); + + queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace)); + + TBM_RETURN_IF_FAIL(item != NULL); + + LIST_INITHEAD(&item->link); + item->cb = cb; + item->data = data; + + LIST_ADDTAIL(&item->link, list); +} + +static void +_trace_remove(struct list_head *list, + tbm_surface_queue_trace_cb cb, void *data) +{ + queue_trace *item = NULL, *tmp; + + LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) { + if (item->cb == cb && item->data == data) { + LIST_DEL(&item->link); + free(item); + return; + } + } + + TBM_ERR("Cannot find notifiy\n"); +} + +static void +_trace_remove_all(struct list_head *list) +{ + queue_trace *item = NULL, *tmp; + + LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) { + LIST_DEL(&item->link); + free(item); + } +} + +static void +_trace_emit(tbm_surface_queue_h surface_queue, + struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace) +{ + queue_trace *item = NULL, *tmp;; + + /* + The item->cb is the outside function of the libtbm. + The tbm user may/can remove the item of the list, + so we have to use the LIST_FOR_EACH_ENTRY_SAFE. + */ + LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) + item->cb(surface_queue, surface, trace, item->data); +} + static int _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type) { - queue_node *node; + queue_node *node = NULL; int count = 0; LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) { @@ -480,14 +557,6 @@ _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue) { queue_node *node; - if (_queue_is_empty(&surface_queue->free_queue)) { - if (surface_queue->impl && surface_queue->impl->need_attach) - surface_queue->impl->need_attach(surface_queue); - - if (_queue_is_empty(&surface_queue->free_queue)) - return NULL; - } - node = _queue_node_pop_front(&surface_queue->free_queue); return node; @@ -525,8 +594,6 @@ _tbm_surface_queue_init(tbm_surface_queue_h surface_queue, TBM_RETURN_IF_FAIL(surface_queue != NULL); TBM_RETURN_IF_FAIL(impl != NULL); - memset(surface_queue, 0x00, sizeof(struct _tbm_surface_queue)); - if (!g_surf_queue_bufmgr) _init_tbm_surf_queue_bufmgr(); @@ -540,16 +607,19 @@ _tbm_surface_queue_init(tbm_surface_queue_h surface_queue, surface_queue->format = format; surface_queue->impl = impl; surface_queue->impl_data = data; + surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE; _queue_init(&surface_queue->free_queue); _queue_init(&surface_queue->dirty_queue); LIST_INITHEAD(&surface_queue->list); LIST_INITHEAD(&surface_queue->destory_noti); - LIST_INITHEAD(&surface_queue->acquirable_noti); LIST_INITHEAD(&surface_queue->dequeuable_noti); LIST_INITHEAD(&surface_queue->dequeue_noti); + LIST_INITHEAD(&surface_queue->can_dequeue_noti); + LIST_INITHEAD(&surface_queue->acquirable_noti); LIST_INITHEAD(&surface_queue->reset_noti); + LIST_INITHEAD(&surface_queue->trace_noti); if (surface_queue->impl && surface_queue->impl->init) surface_queue->impl->init(surface_queue); @@ -563,13 +633,16 @@ tbm_surface_queue_add_destroy_cb( void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb, + TBM_ERROR_INVALID_PARAMETER); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); _notify_add(&surface_queue->destory_noti, destroy_cb, data); @@ -586,13 +659,14 @@ tbm_surface_queue_remove_destroy_cb( void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); _notify_remove(&surface_queue->destory_noti, destroy_cb, data); @@ -609,13 +683,16 @@ tbm_surface_queue_add_dequeuable_cb( void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb, + TBM_ERROR_INVALID_PARAMETER); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data); @@ -632,13 +709,14 @@ tbm_surface_queue_remove_dequeuable_cb( void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data); @@ -655,13 +733,16 @@ tbm_surface_queue_add_dequeue_cb( void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb, + TBM_ERROR_INVALID_PARAMETER); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data); @@ -678,13 +759,14 @@ tbm_surface_queue_remove_dequeue_cb( void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data); @@ -696,18 +778,71 @@ tbm_surface_queue_remove_dequeue_cb( } tbm_surface_queue_error_e +tbm_surface_queue_add_can_dequeue_cb( + tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb, + void *data) +{ + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb, + TBM_ERROR_INVALID_PARAMETER); + + pthread_mutex_lock(&surface_queue->lock); + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); + + _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data); + + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + +tbm_surface_queue_error_e +tbm_surface_queue_remove_can_dequeue_cb( + tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb, + void *data) +{ + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + + pthread_mutex_lock(&surface_queue->lock); + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); + + _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data); + + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + +tbm_surface_queue_error_e tbm_surface_queue_add_acquirable_cb( tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb, void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb, + TBM_ERROR_INVALID_PARAMETER); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data); @@ -724,13 +859,14 @@ tbm_surface_queue_remove_acquirable_cb( void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data); @@ -742,6 +878,56 @@ tbm_surface_queue_remove_acquirable_cb( } tbm_surface_queue_error_e +tbm_surface_queue_add_trace_cb( + tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb, + void *data) +{ + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb, + TBM_ERROR_INVALID_PARAMETER); + + pthread_mutex_lock(&surface_queue->lock); + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); + + _trace_add(&surface_queue->trace_noti, trace_cb, data); + + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + +tbm_surface_queue_error_e +tbm_surface_queue_remove_trace_cb( + tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb, + void *data) +{ + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + + pthread_mutex_lock(&surface_queue->lock); + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); + + _trace_remove(&surface_queue->trace_noti, trace_cb, data); + + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + +tbm_surface_queue_error_e tbm_surface_queue_set_alloc_cb( tbm_surface_queue_h surface_queue, tbm_surface_alloc_cb alloc_cb, @@ -749,13 +935,14 @@ tbm_surface_queue_set_alloc_cb( void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); surface_queue->alloc_cb = alloc_cb; surface_queue->free_cb = free_cb; @@ -774,10 +961,11 @@ tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue) int width; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); width = surface_queue->width; @@ -792,10 +980,11 @@ tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue) int height; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); height = surface_queue->height; @@ -810,10 +999,11 @@ tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue) int format; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); format = surface_queue->format; @@ -828,10 +1018,11 @@ tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue) int queue_size; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); queue_size = surface_queue->queue_size; @@ -846,13 +1037,16 @@ tbm_surface_queue_add_reset_cb( void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb, + TBM_ERROR_INVALID_PARAMETER); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); _notify_add(&surface_queue->reset_noti, reset_cb, data); @@ -869,13 +1063,14 @@ tbm_surface_queue_remove_reset_cb( void *data) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); _notify_remove(&surface_queue->reset_noti, reset_cb, data); @@ -894,6 +1089,7 @@ tbm_surface_queue_enqueue(tbm_surface_queue_h int queue_type; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); @@ -905,16 +1101,23 @@ tbm_surface_queue_enqueue(tbm_surface_queue_h pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface); node = _queue_get_node(surface_queue, 0, surface, &queue_type); if (node == NULL || queue_type != NODE_LIST) { - TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n", + TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n", node, queue_type); pthread_mutex_unlock(&surface_queue->lock); _tbm_surf_queue_mutex_unlock(); - return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE; + + if (!node) { + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE); + return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE; + } else { + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST); + return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST; + } } if (surface_queue->impl && surface_queue->impl->enqueue) @@ -922,33 +1125,49 @@ tbm_surface_queue_enqueue(tbm_surface_queue_h else _tbm_surface_queue_enqueue(surface_queue, node, 1); - if (_queue_is_empty(&surface_queue->dirty_queue)) { - TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node); + if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) { + TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface); + _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION); pthread_mutex_unlock(&surface_queue->lock); _tbm_surf_queue_mutex_unlock(); - return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE; + return TBM_SURFACE_ERROR_INVALID_OPERATION; } node->type = QUEUE_NODE_TYPE_ENQUEUE; + if (surface_queue->enqueue_sync_count == 1) { + tbm_surface_info_s info; + int ret; + + ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info); + if (ret == TBM_SURFACE_ERROR_NONE) + tbm_surface_unmap(surface); + } + + if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--; + pthread_mutex_unlock(&surface_queue->lock); pthread_cond_signal(&surface_queue->dirty_cond); _tbm_surf_queue_mutex_unlock(); + _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE); + _notify_emit(surface_queue, &surface_queue->acquirable_noti); return TBM_SURFACE_QUEUE_ERROR_NONE; } tbm_surface_queue_error_e -tbm_surface_queue_dequeue(tbm_surface_queue_h - surface_queue, tbm_surface_h *surface) +tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h + surface_queue, tbm_surface_h surface) { queue_node *node; + int queue_type; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); @@ -957,130 +1176,240 @@ tbm_surface_queue_dequeue(tbm_surface_queue_h pthread_mutex_lock(&surface_queue->lock); - if (surface_queue->impl && surface_queue->impl->dequeue) - node = surface_queue->impl->dequeue(surface_queue); - else - node = _tbm_surface_queue_dequeue(surface_queue); - - if (node == NULL) { - *surface = NULL; - pthread_mutex_unlock(&surface_queue->lock); - - _tbm_surf_queue_mutex_unlock(); - return TBM_SURFACE_QUEUE_ERROR_EMPTY; - } + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface); - if (node->surface == NULL) { - *surface = NULL; - TBM_LOG_E("_queue_node_pop_front failed\n"); + node = _queue_get_node(surface_queue, 0, surface, &queue_type); + if (node == NULL || queue_type != NODE_LIST) { + TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n", + node, queue_type); + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE); pthread_mutex_unlock(&surface_queue->lock); _tbm_surf_queue_mutex_unlock(); - return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE; + return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE; } - node->type = QUEUE_NODE_TYPE_DEQUEUE; - *surface = node->surface; - - TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface); - - pthread_mutex_unlock(&surface_queue->lock); - - _tbm_surf_queue_mutex_unlock(); - - _notify_emit(surface_queue, &surface_queue->dequeue_noti); - - return TBM_SURFACE_QUEUE_ERROR_NONE; -} - -int -tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait) -{ - _tbm_surf_queue_mutex_lock(); + if (node->delete_pending) { + TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface); - TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0); + _queue_delete_node(surface_queue, node); - pthread_mutex_lock(&surface_queue->lock); + pthread_mutex_unlock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + _tbm_surf_queue_mutex_unlock(); - if (_queue_is_empty(&surface_queue->free_queue)) { - if (surface_queue->impl && surface_queue->impl->need_attach) - surface_queue->impl->need_attach(surface_queue); + _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE); - if (!_tbm_surface_queue_is_valid(surface_queue)) { - TBM_LOG_E("surface_queue:%p is invalid", surface_queue); - _tbm_surf_queue_mutex_unlock(); - return 0; - } + return TBM_SURFACE_QUEUE_ERROR_NONE; } - if (_queue_is_empty(&surface_queue->free_queue)) { - if (wait && - _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE)) { + if (surface_queue->queue_size < surface_queue->num_attached) { + TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface); - _tbm_surf_queue_mutex_unlock(); + if (surface_queue->impl && surface_queue->impl->need_detach) + surface_queue->impl->need_detach(surface_queue, node); + else + _tbm_surface_queue_detach(surface_queue, surface); - pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock); + pthread_mutex_unlock(&surface_queue->lock); - _tbm_surf_queue_mutex_lock(); + _tbm_surf_queue_mutex_unlock(); - if (!_tbm_surface_queue_is_valid(surface_queue)) { - TBM_LOG_E("surface_queue:%p is invalid", surface_queue); - _tbm_surf_queue_mutex_unlock(); - return 0; - } + _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE); - pthread_mutex_unlock(&surface_queue->lock); + return TBM_SURFACE_QUEUE_ERROR_NONE; + } - _tbm_surf_queue_mutex_unlock(); - return 1; - } + if (surface_queue->impl && surface_queue->impl->release) + surface_queue->impl->release(surface_queue, node); + else + _tbm_surface_queue_release(surface_queue, node, 1); + if (_queue_is_empty(&surface_queue->free_queue)) { + TBM_ERR("surface_queue->free_queue is empty.\n"); + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE); pthread_mutex_unlock(&surface_queue->lock); _tbm_surf_queue_mutex_unlock(); - return 0; + return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE; } + node->type = QUEUE_NODE_TYPE_RELEASE; + pthread_mutex_unlock(&surface_queue->lock); + pthread_cond_signal(&surface_queue->free_cond); _tbm_surf_queue_mutex_unlock(); - return 1; + _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE); + + return TBM_SURFACE_QUEUE_ERROR_NONE; } tbm_surface_queue_error_e -tbm_surface_queue_release(tbm_surface_queue_h - surface_queue, tbm_surface_h surface) +tbm_surface_queue_dequeue(tbm_surface_queue_h + surface_queue, tbm_surface_h *surface) { queue_node *node; - int queue_type; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL, TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE); + *surface = NULL; + pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface); + if (_queue_is_empty(&surface_queue->free_queue)) { + if (surface_queue->impl && surface_queue->impl->need_attach) + surface_queue->impl->need_attach(surface_queue); + + if (!_tbm_surface_queue_is_valid(surface_queue)) { + TBM_ERR("surface_queue:%p is invalid", surface_queue); + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + pthread_mutex_unlock(&surface_queue->lock); + _tbm_surf_queue_mutex_unlock(); + return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE; + } + } - node = _queue_get_node(surface_queue, 0, surface, &queue_type); - if (node == NULL || queue_type != NODE_LIST) { - TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n", - node, queue_type); + if (surface_queue->impl && surface_queue->impl->dequeue) + node = surface_queue->impl->dequeue(surface_queue); + else + node = _tbm_surface_queue_dequeue(surface_queue); + + if (node == NULL || node->surface == NULL) { + TBM_ERR("_queue_node_pop_front failed\n"); + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY); pthread_mutex_unlock(&surface_queue->lock); _tbm_surf_queue_mutex_unlock(); - return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE; + return TBM_SURFACE_QUEUE_ERROR_EMPTY; } - if (surface_queue->queue_size < surface_queue->num_attached) { + node->type = QUEUE_NODE_TYPE_DEQUEUE; + *surface = node->surface; + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface); + + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + + _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE); + + _notify_emit(surface_queue, &surface_queue->dequeue_noti); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + +int +tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait) +{ + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0); + + _tbm_surf_queue_mutex_unlock(); + + _notify_emit(surface_queue, &surface_queue->can_dequeue_noti); + + _tbm_surf_queue_mutex_lock(); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0); + + pthread_mutex_lock(&surface_queue->lock); + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); + + if (_queue_is_empty(&surface_queue->free_queue)) { + if (surface_queue->impl && surface_queue->impl->need_attach) + surface_queue->impl->need_attach(surface_queue); + + if (!_tbm_surface_queue_is_valid(surface_queue)) { + TBM_ERR("surface_queue:%p is invalid", surface_queue); + pthread_mutex_unlock(&surface_queue->lock); + _tbm_surf_queue_mutex_unlock(); + return 0; + } + } + + if (!_queue_is_empty(&surface_queue->free_queue)) { + pthread_mutex_unlock(&surface_queue->lock); + _tbm_surf_queue_mutex_unlock(); + return 1; + } + + if (wait && _tbm_surface_queue_get_node_count(surface_queue, + QUEUE_NODE_TYPE_ACQUIRE)) { + _tbm_surf_queue_mutex_unlock(); + pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock); + pthread_mutex_unlock(&surface_queue->lock); + return 1; + } + + pthread_mutex_unlock(&surface_queue->lock); + _tbm_surf_queue_mutex_unlock(); + return 0; +} + +tbm_surface_queue_error_e +tbm_surface_queue_release(tbm_surface_queue_h + surface_queue, tbm_surface_h surface) +{ + queue_node *node; + int queue_type; + + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL, + TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE); + + pthread_mutex_lock(&surface_queue->lock); + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface); + + node = _queue_get_node(surface_queue, 0, surface, &queue_type); + if (node == NULL || queue_type != NODE_LIST) { + TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n", + node, queue_type); + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + + if (!node) { + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE); + return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE; + } else { + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST); + return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST; + } + } + + if (node->delete_pending) { + TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface); + + _queue_delete_node(surface_queue, node); + + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + + _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE); + + return TBM_SURFACE_QUEUE_ERROR_NONE; + } - TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface); + if (surface_queue->queue_size < surface_queue->num_attached) { + TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface); if (surface_queue->impl && surface_queue->impl->need_detach) surface_queue->impl->need_detach(surface_queue, node); @@ -1090,6 +1419,9 @@ tbm_surface_queue_release(tbm_surface_queue_h pthread_mutex_unlock(&surface_queue->lock); _tbm_surf_queue_mutex_unlock(); + + _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE); + return TBM_SURFACE_QUEUE_ERROR_NONE; } @@ -1098,11 +1430,13 @@ tbm_surface_queue_release(tbm_surface_queue_h else _tbm_surface_queue_release(surface_queue, node, 1); - if (_queue_is_empty(&surface_queue->free_queue)) { + if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) { + TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface); + _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION); pthread_mutex_unlock(&surface_queue->lock); _tbm_surf_queue_mutex_unlock(); - return TBM_SURFACE_QUEUE_ERROR_NONE; + return TBM_SURFACE_ERROR_INVALID_OPERATION; } node->type = QUEUE_NODE_TYPE_RELEASE; @@ -1112,18 +1446,81 @@ tbm_surface_queue_release(tbm_surface_queue_h _tbm_surf_queue_mutex_unlock(); + _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE); + _notify_emit(surface_queue, &surface_queue->dequeuable_noti); return TBM_SURFACE_QUEUE_ERROR_NONE; } tbm_surface_queue_error_e +tbm_surface_queue_cancel_acquire(tbm_surface_queue_h + surface_queue, tbm_surface_h surface) +{ + queue_node *node; + int queue_type; + + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL, + TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE); + + pthread_mutex_lock(&surface_queue->lock); + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface); + + node = _queue_get_node(surface_queue, 0, surface, &queue_type); + if (node == NULL || queue_type != NODE_LIST) { + TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n", + node, queue_type); + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST); + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST; + } + + if (surface_queue->impl && surface_queue->impl->enqueue) + surface_queue->impl->enqueue(surface_queue, node); + else + _tbm_surface_queue_enqueue(surface_queue, node, 1); + + if (_queue_is_empty(&surface_queue->dirty_queue)) { + TBM_ERR("enqueue surface but queue is empty node:%p\n", node); + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE); + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE; + } + + node->type = QUEUE_NODE_TYPE_ENQUEUE; + + pthread_mutex_unlock(&surface_queue->lock); + pthread_cond_signal(&surface_queue->dirty_cond); + + _tbm_surf_queue_mutex_unlock(); + + _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE); + + _notify_emit(surface_queue, &surface_queue->acquirable_noti); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + +tbm_surface_queue_error_e tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue, tbm_surface_h *surface) { queue_node *node; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + *surface = NULL; TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); @@ -1137,28 +1534,33 @@ tbm_surface_queue_acquire(tbm_surface_queue_h else node = _tbm_surface_queue_acquire(surface_queue); - if (node == NULL) { - *surface = NULL; + if (node == NULL || node->surface == NULL) { + TBM_ERR("_queue_node_pop_front failed\n"); + _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY); pthread_mutex_unlock(&surface_queue->lock); _tbm_surf_queue_mutex_unlock(); return TBM_SURFACE_QUEUE_ERROR_EMPTY; } - if (node->surface == NULL) { - *surface = NULL; - TBM_LOG_E("_queue_node_pop_front failed\n"); - pthread_mutex_unlock(&surface_queue->lock); - - _tbm_surf_queue_mutex_unlock(); - return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE; - } - node->type = QUEUE_NODE_TYPE_ACQUIRE; *surface = node->surface; - TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface); + if (surface_queue->acquire_sync_count == 1) { + tbm_surface_info_s info; + int ret; + + TBM_ERR("start map surface:%p", *surface); + ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info); + TBM_ERR("end map surface:%p", *surface); + if (ret == TBM_SURFACE_ERROR_NONE) + tbm_surface_unmap(*surface); + } + + if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--; + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface); pthread_mutex_unlock(&surface_queue->lock); @@ -1167,6 +1569,8 @@ tbm_surface_queue_acquire(tbm_surface_queue_h if (b_dump_queue) tbm_surface_internal_dump_buffer(*surface, "acquire"); + _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE); + return TBM_SURFACE_QUEUE_ERROR_NONE; } @@ -1174,58 +1578,46 @@ int tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait) { _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0); pthread_mutex_lock(&surface_queue->lock); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); - - if (_queue_is_empty(&surface_queue->dirty_queue)) { - if (wait && - _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE)) { - - _tbm_surf_queue_mutex_unlock(); - - pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock); - - _tbm_surf_queue_mutex_lock(); - - if (!_tbm_surface_queue_is_valid(surface_queue)) { - TBM_LOG_E("surface_queue:%p is invalid", surface_queue); - _tbm_surf_queue_mutex_unlock(); - return 0; - } - - pthread_mutex_unlock(&surface_queue->lock); - - _tbm_surf_queue_mutex_unlock(); - return 1; - } + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); + if (!_queue_is_empty(&surface_queue->dirty_queue)) { pthread_mutex_unlock(&surface_queue->lock); + _tbm_surf_queue_mutex_unlock(); + return 1; + } + if (wait && _tbm_surface_queue_get_node_count(surface_queue, + QUEUE_NODE_TYPE_DEQUEUE)) { _tbm_surf_queue_mutex_unlock(); - return 0; + pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock); + pthread_mutex_unlock(&surface_queue->lock); + return 1; } pthread_mutex_unlock(&surface_queue->lock); - _tbm_surf_queue_mutex_unlock(); - - return 1; + return 0; } void tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue) { - queue_node *node, *tmp; + queue_node *node = NULL, *tmp; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue)); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); + + LIST_DEL(&surface_queue->item_link); LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) _queue_delete_node(surface_queue, node); @@ -1236,16 +1628,16 @@ tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue) _notify_emit(surface_queue, &surface_queue->destory_noti); _notify_remove_all(&surface_queue->destory_noti); - _notify_remove_all(&surface_queue->acquirable_noti); _notify_remove_all(&surface_queue->dequeuable_noti); + _notify_remove_all(&surface_queue->dequeue_noti); + _notify_remove_all(&surface_queue->can_dequeue_noti); + _notify_remove_all(&surface_queue->acquirable_noti); _notify_remove_all(&surface_queue->reset_noti); + _trace_remove_all(&surface_queue->trace_noti); pthread_mutex_destroy(&surface_queue->lock); - LIST_DEL(&surface_queue->item_link); - free(surface_queue); - surface_queue = NULL; if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) _deinit_tbm_surf_queue_bufmgr(); @@ -1257,14 +1649,15 @@ tbm_surface_queue_error_e tbm_surface_queue_reset(tbm_surface_queue_h surface_queue, int width, int height, int format) { - queue_node *node, *tmp; + queue_node *node = NULL, *tmp; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); if (width == surface_queue->width && height == surface_queue->height && format == surface_queue->format) { @@ -1278,14 +1671,23 @@ tbm_surface_queue_reset(tbm_surface_queue_h surface_queue->height = height; surface_queue->format = format; - /* Destory surface and Push to free_queue */ - LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) - _queue_delete_node(surface_queue, node); + if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) { + /* Destory surface and Push to free_queue */ + LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) + _queue_delete_node(surface_queue, node); + + LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) + node->delete_pending = 1; + } else { + LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) + _queue_delete_node(surface_queue, node); + + _queue_init(&surface_queue->dirty_queue); + LIST_INITHEAD(&surface_queue->list); + } /* Reset queue */ _queue_init(&surface_queue->free_queue); - _queue_init(&surface_queue->dirty_queue); - LIST_INITHEAD(&surface_queue->list); surface_queue->num_attached = 0; @@ -1303,19 +1705,52 @@ tbm_surface_queue_reset(tbm_surface_queue_h } tbm_surface_queue_error_e +tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue) +{ + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + + _tbm_surf_queue_mutex_unlock(); + + _notify_emit(surface_queue, &surface_queue->reset_noti); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + +tbm_surface_queue_error_e +tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue) +{ + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + + _tbm_surf_queue_mutex_unlock(); + + _notify_emit(surface_queue, &surface_queue->dequeuable_noti); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + +tbm_surface_queue_error_e tbm_surface_queue_set_size(tbm_surface_queue_h surface_queue, int queue_size, int flush) { - queue_node *node, *tmp; + queue_node *node = NULL, *tmp; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, - TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER); + TBM_ERROR_INVALID_PARAMETER); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); if ((surface_queue->queue_size == queue_size) && !flush) { _tbm_surf_queue_mutex_unlock(); @@ -1325,14 +1760,23 @@ tbm_surface_queue_set_size(tbm_surface_queue_h pthread_mutex_lock(&surface_queue->lock); if (flush) { - /* Destory surface and Push to free_queue */ - LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) - _queue_delete_node(surface_queue, node); + if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) { + /* Destory surface and Push to free_queue */ + LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) + _queue_delete_node(surface_queue, node); + + LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) + node->delete_pending = 1; + } else { + LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) + _queue_delete_node(surface_queue, node); + + _queue_init(&surface_queue->dirty_queue); + LIST_INITHEAD(&surface_queue->list); + } /* Reset queue */ _queue_init(&surface_queue->free_queue); - _queue_init(&surface_queue->dirty_queue); - LIST_INITHEAD(&surface_queue->list); surface_queue->num_attached = 0; surface_queue->queue_size = queue_size; @@ -1353,7 +1797,7 @@ tbm_surface_queue_set_size(tbm_surface_queue_h int need_del = surface_queue->queue_size - queue_size; LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) { - TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface); + TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface); if (surface_queue->impl && surface_queue->impl->need_detach) surface_queue->impl->need_detach(surface_queue, node); @@ -1377,16 +1821,54 @@ tbm_surface_queue_set_size(tbm_surface_queue_h } tbm_surface_queue_error_e +tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue) +{ + queue_node *node = NULL; + + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); + + if (surface_queue->num_attached == 0) { + _tbm_surf_queue_mutex_unlock(); + return TBM_SURFACE_QUEUE_ERROR_NONE; + } + + pthread_mutex_lock(&surface_queue->lock); + + /* Destory surface in free_queue */ + while ((node = _queue_node_pop_front(&surface_queue->free_queue))) { + if (surface_queue->impl && surface_queue->impl->need_detach) + surface_queue->impl->need_detach(surface_queue, node); + else + _tbm_surface_queue_detach(surface_queue, node->surface); + } + + /* Reset queue */ + _queue_init(&surface_queue->free_queue); + + pthread_mutex_unlock(&surface_queue->lock); + _tbm_surf_queue_mutex_unlock(); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + +tbm_surface_queue_error_e tbm_surface_queue_flush(tbm_surface_queue_h surface_queue) { - queue_node *node, *tmp; + queue_node *node = NULL, *tmp; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); if (surface_queue->num_attached == 0) { _tbm_surf_queue_mutex_unlock(); @@ -1395,14 +1877,23 @@ tbm_surface_queue_flush(tbm_surface_queue_h surface_queue) pthread_mutex_lock(&surface_queue->lock); - /* Destory surface and Push to free_queue */ - LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) - _queue_delete_node(surface_queue, node); + if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) { + /* Destory surface and Push to free_queue */ + LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) + _queue_delete_node(surface_queue, node); + + LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) + node->delete_pending = 1; + } else { + LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) + _queue_delete_node(surface_queue, node); + + _queue_init(&surface_queue->dirty_queue); + LIST_INITHEAD(&surface_queue->list); + } /* Reset queue */ _queue_init(&surface_queue->free_queue); - _queue_init(&surface_queue->dirty_queue); - LIST_INITHEAD(&surface_queue->list); surface_queue->num_attached = 0; @@ -1423,19 +1914,23 @@ tbm_surface_queue_error_e tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue, tbm_surface_h *surfaces, int *num) { - queue_node *node; + queue_node *node = NULL; _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL, - TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER); + TBM_ERROR_INVALID_PARAMETER); + + *num = 0; pthread_mutex_lock(&surface_queue->lock); - *num = 0; LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) { + if (node->delete_pending) continue; + if (surfaces) surfaces[*num] = node->surface; @@ -1449,6 +1944,49 @@ tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue, return TBM_SURFACE_QUEUE_ERROR_NONE; } +tbm_surface_queue_error_e +tbm_surface_queue_get_trace_surface_num( + tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num) +{ + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL, + TBM_ERROR_INVALID_PARAMETER); + + *num = 0; + + pthread_mutex_lock(&surface_queue->lock); + + switch (trace) { + case TBM_SURFACE_QUEUE_TRACE_NONE: + *num = 0; + break; + case TBM_SURFACE_QUEUE_TRACE_DEQUEUE: + *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE); + break; + case TBM_SURFACE_QUEUE_TRACE_ENQUEUE: + *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE); + break; + case TBM_SURFACE_QUEUE_TRACE_ACQUIRE: + *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE); + break; + case TBM_SURFACE_QUEUE_TRACE_RELEASE: + *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE); + break; + default: + break; + } + + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + typedef struct { int flags; } tbm_queue_default; @@ -1469,10 +2007,13 @@ __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue) return; if (surface_queue->alloc_cb) { + pthread_mutex_unlock(&surface_queue->lock); _tbm_surf_queue_mutex_unlock(); surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data); _tbm_surf_queue_mutex_lock(); + pthread_mutex_lock(&surface_queue->lock); + /* silent return */ if (!surface) return; @@ -1505,22 +2046,30 @@ tbm_surface_queue_h tbm_surface_queue_create(int queue_size, int width, int height, int format, int flags) { - TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL); - TBM_RETURN_VAL_IF_FAIL(width > 0, NULL); - TBM_RETURN_VAL_IF_FAIL(height > 0, NULL); - TBM_RETURN_VAL_IF_FAIL(format > 0, NULL); - _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL); tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1, sizeof(struct _tbm_surface_queue)); - TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL); + if (!surface_queue) { + TBM_ERR("cannot allocate the surface_queue.\n"); + _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY); + _tbm_surf_queue_mutex_unlock(); + return NULL; + } - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); tbm_queue_default *data = (tbm_queue_default *) calloc(1, sizeof(tbm_queue_default)); if (data == NULL) { + TBM_ERR("cannot allocate the tbm_queue_default.\n"); + _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY); free(surface_queue); _tbm_surf_queue_mutex_unlock(); return NULL; @@ -1555,6 +2104,9 @@ __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue) { tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data; + if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) + return; + _queue_init(&data->dequeue_list); } @@ -1574,10 +2126,13 @@ __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue) return; if (surface_queue->alloc_cb) { + pthread_mutex_unlock(&surface_queue->lock); _tbm_surf_queue_mutex_unlock(); surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data); _tbm_surf_queue_mutex_lock(); + pthread_mutex_lock(&surface_queue->lock); + /* silent return */ if (!surface) return; @@ -1599,16 +2154,31 @@ __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue, queue_node *node) { tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data; - queue_node *next, *tmp; + queue_node *first = NULL; + + first = container_of(data->dequeue_list.head.next, first, item_link); + if (first != node) { + return; + } node->priv_flags = 0; - LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) { - if (next->priv_flags) - break; - _queue_node_pop(&data->dequeue_list, next); - _tbm_surface_queue_enqueue(surface_queue, next, 1); + _queue_node_pop(&data->dequeue_list, node); + _tbm_surface_queue_enqueue(surface_queue, node, 1); +} + +static void +__tbm_queue_sequence_release(tbm_surface_queue_h surface_queue, + queue_node *node) +{ + tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data; + + if (node->priv_flags) { + node->priv_flags = 0; + _queue_node_pop(&data->dequeue_list, node); } + + _tbm_surface_queue_release(surface_queue, node, 1); } static queue_node * @@ -1633,7 +2203,7 @@ static const tbm_surface_queue_interface tbm_queue_sequence_impl = { __tbm_queue_sequence_destroy, __tbm_queue_sequence_need_attach, __tbm_queue_sequence_enqueue, - NULL, /*__tbm_queue_sequence_release*/ + __tbm_queue_sequence_release, __tbm_queue_sequence_dequeue, NULL, /*__tbm_queue_sequence_acquire*/ NULL, /*__tbm_queue_sequence_need_dettach*/ @@ -1643,22 +2213,30 @@ tbm_surface_queue_h tbm_surface_queue_sequence_create(int queue_size, int width, int height, int format, int flags) { - TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL); - TBM_RETURN_VAL_IF_FAIL(width > 0, NULL); - TBM_RETURN_VAL_IF_FAIL(height > 0, NULL); - TBM_RETURN_VAL_IF_FAIL(format > 0, NULL); - _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL); + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL); tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1, sizeof(struct _tbm_surface_queue)); - TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL); + if (surface_queue == NULL) { + TBM_ERR("cannot allocate the surface_queue.\n"); + _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY); + _tbm_surf_queue_mutex_unlock(); + return NULL; + } - TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue); + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue); tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1, sizeof(tbm_queue_sequence)); if (data == NULL) { + TBM_ERR("cannot allocate the tbm_queue_sequence.\n"); + _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY); free(surface_queue); _tbm_surf_queue_mutex_unlock(); return NULL; @@ -1674,4 +2252,59 @@ tbm_surface_queue_sequence_create(int queue_size, int width, return surface_queue; } -/* LCOV_EXCL_STOP */ + +tbm_surface_queue_error_e +tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue, + int modes) +{ + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + + pthread_mutex_lock(&surface_queue->lock); + + if (modes == TBM_SURFACE_QUEUE_MODE_NONE) + modes = TBM_SURFACE_QUEUE_MODE_NONE; + else + surface_queue->modes |= modes; + + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +} + +tbm_surface_queue_error_e +tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue, + unsigned int sync_count) +{ + int dequeue_num, enqueue_num; + + _tbm_surf_queue_mutex_lock(); + _tbm_set_last_result(TBM_ERROR_NONE); + + TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), + TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE); + + pthread_mutex_lock(&surface_queue->lock); + + dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE); + enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE); + + if (dequeue_num + sync_count == 0) + surface_queue->acquire_sync_count = enqueue_num; + else + surface_queue->enqueue_sync_count = dequeue_num + sync_count; + + TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n", + surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count); + + pthread_mutex_unlock(&surface_queue->lock); + + _tbm_surf_queue_mutex_unlock(); + + return TBM_SURFACE_QUEUE_ERROR_NONE; +}