#define DIRTY_QUEUE 2
#define NODE_LIST 4
-#define TBM_QUEUE_DEBUG 0
-
-#ifdef TRACE
-#define TBM_QUEUE_TRACE(fmt, ...) { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
-#else
-#define TBM_QUEUE_TRACE(fmt, ...)
-#endif /* TRACE */
-
-#if TBM_QUEUE_DEBUG
-#define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
-#define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
-#else
-#define TBM_LOCK()
-#define TBM_UNLOCK()
-#endif
-
static tbm_bufmgr g_surf_queue_bufmgr;
static pthread_mutex_t tbm_surf_queue_lock;
void _tbm_surface_queue_mutex_unlock(void);
/* check condition */
#define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
if (!(cond)) {\
- TBM_LOG_E("'%s' failed.\n", #cond);\
+ TBM_ERR("'%s' failed.\n", #cond);\
+ _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
_tbm_surf_queue_mutex_unlock();\
return;\
} \
#define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
if (!(cond)) {\
- TBM_LOG_E("'%s' failed.\n", #cond);\
+ TBM_ERR("'%s' failed.\n", #cond);\
+ _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
_tbm_surf_queue_mutex_unlock();\
return val;\
} \
Queue_Node_Type type;
unsigned int priv_flags; /*for each queue*/
+
+ int delete_pending;
} queue_node;
typedef struct {
void *data;
} queue_notify;
+typedef struct {
+ struct list_head link;
+
+ tbm_surface_queue_trace_cb cb;
+ void *data;
+} queue_trace;
+
typedef struct _tbm_surface_queue_interface {
void (*init)(tbm_surface_queue_h queue);
void (*reset)(tbm_surface_queue_h queue);
struct list_head destory_noti;
struct list_head dequeuable_noti;
struct list_head dequeue_noti;
+ struct list_head can_dequeue_noti;
struct list_head acquirable_noti;
struct list_head reset_noti;
+ struct list_head trace_noti;
pthread_mutex_t lock;
pthread_cond_t free_cond;
void *alloc_cb_data;
struct list_head item_link; /* link of surface queue */
-};
-/* LCOV_EXCL_START */
+ int modes;
+ unsigned int enqueue_sync_count;
+ unsigned int acquire_sync_count;
+};
static bool
_tbm_surf_queue_mutex_init(void)
return true;
if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
- TBM_LOG_E("fail: tbm_surf_queue mutex init\n");
+ TBM_ERR("fail: pthread_mutex_init\n");
return false;
}
static void
_tbm_surf_queue_mutex_lock(void)
{
- if (!_tbm_surf_queue_mutex_init())
+ if (!_tbm_surf_queue_mutex_init()) {
+ TBM_ERR("fail: _tbm_surf_queue_mutex_init\n");
return;
+ }
pthread_mutex_lock(&tbm_surf_queue_lock);
}
{
tbm_surface_queue_h old_data = NULL;
- if (surface_queue == NULL || g_surf_queue_bufmgr == NULL) {
- TBM_TRACE("error: surface_queue is NULL or not initialized\n");
+ if (surface_queue == NULL) {
+ TBM_ERR("error: surface_queue is NULL.\n");
+ return 0;
+ }
+
+ if (g_surf_queue_bufmgr == NULL) {
+ TBM_ERR("error: g_surf_queue_bufmgr is NULL.\n");
return 0;
}
if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
- TBM_TRACE("error: surf_queue_list is empty\n");
+ TBM_ERR("error: surf_queue_list is empty\n");
return 0;
}
LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
item_link) {
if (old_data == surface_queue) {
- TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
return 1;
}
}
- TBM_TRACE("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
+ TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
+
return 0;
}
{
queue_node *node;
+ if (!queue->head.next) return NULL;
+ if (!queue->count) return NULL;
+
node = LIST_ENTRY(queue_node, queue->head.next, item_link);
- LIST_DEL(&node->item_link);
+ LIST_DELINIT(&node->item_link);
queue->count--;
return node;
static queue_node *
_queue_node_pop(queue *queue, queue_node *node)
{
- LIST_DEL(&node->item_link);
+ LIST_DELINIT(&node->item_link);
queue->count--;
return node;
}
}
+ TBM_ERR("fail to get the queue_node.\n");
+
return NULL;
}
}
}
- TBM_LOG_E("Cannot find notifiy\n");
+ TBM_ERR("Cannot find notifiy\n");
}
static void
item->cb(surface_queue, item->data);
}
+static void
+_trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
+ void *data)
+{
+ TBM_RETURN_IF_FAIL(cb != NULL);
+
+ queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
+
+ TBM_RETURN_IF_FAIL(item != NULL);
+
+ LIST_INITHEAD(&item->link);
+ item->cb = cb;
+ item->data = data;
+
+ LIST_ADDTAIL(&item->link, list);
+}
+
+static void
+_trace_remove(struct list_head *list,
+ tbm_surface_queue_trace_cb cb, void *data)
+{
+ queue_trace *item = NULL, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
+ if (item->cb == cb && item->data == data) {
+ LIST_DEL(&item->link);
+ free(item);
+ return;
+ }
+ }
+
+ TBM_ERR("Cannot find notifiy\n");
+}
+
+static void
+_trace_remove_all(struct list_head *list)
+{
+ queue_trace *item = NULL, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
+ LIST_DEL(&item->link);
+ free(item);
+ }
+}
+
+static void
+_trace_emit(tbm_surface_queue_h surface_queue,
+ struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
+{
+ queue_trace *item = NULL, *tmp;;
+
+ /*
+ The item->cb is the outside function of the libtbm.
+ The tbm user may/can remove the item of the list,
+ so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
+ */
+ LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
+ item->cb(surface_queue, surface, trace, item->data);
+}
+
static int
_tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
{
{
queue_node *node;
- if (_queue_is_empty(&surface_queue->free_queue)) {
- if (surface_queue->impl && surface_queue->impl->need_attach)
- surface_queue->impl->need_attach(surface_queue);
-
- if (_queue_is_empty(&surface_queue->free_queue))
- return NULL;
- }
-
node = _queue_node_pop_front(&surface_queue->free_queue);
return node;
surface_queue->format = format;
surface_queue->impl = impl;
surface_queue->impl_data = data;
+ surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
_queue_init(&surface_queue->free_queue);
_queue_init(&surface_queue->dirty_queue);
LIST_INITHEAD(&surface_queue->destory_noti);
LIST_INITHEAD(&surface_queue->dequeuable_noti);
LIST_INITHEAD(&surface_queue->dequeue_noti);
+ LIST_INITHEAD(&surface_queue->can_dequeue_noti);
LIST_INITHEAD(&surface_queue->acquirable_noti);
LIST_INITHEAD(&surface_queue->reset_noti);
+ LIST_INITHEAD(&surface_queue->trace_noti);
if (surface_queue->impl && surface_queue->impl->init)
surface_queue->impl->init(surface_queue);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->destory_noti, destroy_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->destory_noti, destroy_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
}
tbm_surface_queue_error_e
+tbm_surface_queue_add_can_dequeue_cb(
+ tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
+ void *data)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
+ TBM_ERROR_INVALID_PARAMETER);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+ _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_remove_can_dequeue_cb(
+ tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
+ void *data)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+ _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
tbm_surface_queue_add_acquirable_cb(
tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
}
tbm_surface_queue_error_e
+tbm_surface_queue_add_trace_cb(
+ tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
+ void *data)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
+ TBM_ERROR_INVALID_PARAMETER);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+ _trace_add(&surface_queue->trace_noti, trace_cb, data);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_remove_trace_cb(
+ tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
+ void *data)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+ _trace_remove(&surface_queue->trace_noti, trace_cb, data);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
tbm_surface_queue_set_alloc_cb(
tbm_surface_queue_h surface_queue,
tbm_surface_alloc_cb alloc_cb,
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
surface_queue->alloc_cb = alloc_cb;
surface_queue->free_cb = free_cb;
int width;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
width = surface_queue->width;
int height;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
height = surface_queue->height;
int format;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
format = surface_queue->format;
int queue_size;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
queue_size = surface_queue->queue_size;
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->reset_noti, reset_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->reset_noti, reset_cb, data);
int queue_type;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
node = _queue_get_node(surface_queue, 0, surface, &queue_type);
if (node == NULL || queue_type != NODE_LIST) {
- TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+ TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
node, queue_type);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+
+ if (!node) {
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
+ return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+ } else {
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
+ return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+ }
}
if (surface_queue->impl && surface_queue->impl->enqueue)
else
_tbm_surface_queue_enqueue(surface_queue, node, 1);
- if (_queue_is_empty(&surface_queue->dirty_queue)) {
- TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
+ if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
+ TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
+ _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+ return TBM_SURFACE_ERROR_INVALID_OPERATION;
}
node->type = QUEUE_NODE_TYPE_ENQUEUE;
+ if (surface_queue->enqueue_sync_count == 1) {
+ tbm_surface_info_s info;
+ int ret;
+
+ ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
+ if (ret == TBM_SURFACE_ERROR_NONE)
+ tbm_surface_unmap(surface);
+ }
+
+ if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
+
pthread_mutex_unlock(&surface_queue->lock);
pthread_cond_signal(&surface_queue->dirty_cond);
_tbm_surf_queue_mutex_unlock();
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
+
_notify_emit(surface_queue, &surface_queue->acquirable_noti);
return TBM_SURFACE_QUEUE_ERROR_NONE;
}
tbm_surface_queue_error_e
-tbm_surface_queue_dequeue(tbm_surface_queue_h
- surface_queue, tbm_surface_h *surface)
+tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
+ surface_queue, tbm_surface_h surface)
{
queue_node *node;
+ int queue_type;
_tbm_surf_queue_mutex_lock();
-
- *surface = NULL;
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- if (surface_queue->impl && surface_queue->impl->dequeue)
- node = surface_queue->impl->dequeue(surface_queue);
- else
- node = _tbm_surface_queue_dequeue(surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
- if (node == NULL || node->surface == NULL) {
- TBM_LOG_E("_queue_node_pop_front failed\n");
+ node = _queue_get_node(surface_queue, 0, surface, &queue_type);
+ if (node == NULL || queue_type != NODE_LIST) {
+ TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+ node, queue_type);
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
+ return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
}
- node->type = QUEUE_NODE_TYPE_DEQUEUE;
- *surface = node->surface;
-
- TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
-
- pthread_mutex_unlock(&surface_queue->lock);
-
- _tbm_surf_queue_mutex_unlock();
-
- _notify_emit(surface_queue, &surface_queue->dequeue_noti);
-
- return TBM_SURFACE_QUEUE_ERROR_NONE;
-}
-
-int
-tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
-{
- _tbm_surf_queue_mutex_lock();
-
- TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
-
- pthread_mutex_lock(&surface_queue->lock);
-
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
-
- if (_queue_is_empty(&surface_queue->free_queue)) {
- if (surface_queue->impl && surface_queue->impl->need_attach)
- surface_queue->impl->need_attach(surface_queue);
+ if (node->delete_pending) {
+ TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
- if (!_tbm_surface_queue_is_valid(surface_queue)) {
- TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
- _tbm_surf_queue_mutex_unlock();
- return 0;
- }
- }
+ _queue_delete_node(surface_queue, node);
- if (!_queue_is_empty(&surface_queue->free_queue)) {
pthread_mutex_unlock(&surface_queue->lock);
- _tbm_surf_queue_mutex_unlock();
- return 1;
- }
-
- if (wait && _tbm_surface_queue_get_node_count(surface_queue,
- QUEUE_NODE_TYPE_ACQUIRE)) {
- _tbm_surf_queue_mutex_unlock();
- pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
- _tbm_surf_queue_mutex_lock();
-
- if (!_tbm_surface_queue_is_valid(surface_queue)) {
- TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
- pthread_mutex_unlock(&surface_queue->lock);
- _tbm_surf_queue_mutex_unlock();
- return 0;
- }
- pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return 1;
- }
-
- pthread_mutex_unlock(&surface_queue->lock);
- _tbm_surf_queue_mutex_unlock();
- return 0;
-}
-
-tbm_surface_queue_error_e
-tbm_surface_queue_release(tbm_surface_queue_h
- surface_queue, tbm_surface_h surface)
-{
- queue_node *node;
- int queue_type;
-
- _tbm_surf_queue_mutex_lock();
-
- TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
- TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
- TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
- TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
-
- pthread_mutex_lock(&surface_queue->lock);
-
- TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
- node = _queue_get_node(surface_queue, 0, surface, &queue_type);
- if (node == NULL || queue_type != NODE_LIST) {
- TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
- node, queue_type);
- pthread_mutex_unlock(&surface_queue->lock);
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
- _tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
}
if (surface_queue->queue_size < surface_queue->num_attached) {
- TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+ TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
if (surface_queue->impl && surface_queue->impl->need_detach)
surface_queue->impl->need_detach(surface_queue, node);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
+
return TBM_SURFACE_QUEUE_ERROR_NONE;
}
_tbm_surface_queue_release(surface_queue, node, 1);
if (_queue_is_empty(&surface_queue->free_queue)) {
+ TBM_ERR("surface_queue->free_queue is empty.\n");
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
_tbm_surf_queue_mutex_unlock();
- _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
return TBM_SURFACE_QUEUE_ERROR_NONE;
}
tbm_surface_queue_error_e
-tbm_surface_queue_acquire(tbm_surface_queue_h
+tbm_surface_queue_dequeue(tbm_surface_queue_h
surface_queue, tbm_surface_h *surface)
{
queue_node *node;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
+ TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+
+ *surface = NULL;
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ if (_queue_is_empty(&surface_queue->free_queue)) {
+ if (surface_queue->impl && surface_queue->impl->need_attach)
+ surface_queue->impl->need_attach(surface_queue);
+
+ if (!_tbm_surface_queue_is_valid(surface_queue)) {
+ TBM_ERR("surface_queue:%p is invalid", surface_queue);
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ pthread_mutex_unlock(&surface_queue->lock);
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
+ }
+ }
+
+ if (surface_queue->impl && surface_queue->impl->dequeue)
+ node = surface_queue->impl->dequeue(surface_queue);
+ else
+ node = _tbm_surface_queue_dequeue(surface_queue);
+
+ if (node == NULL || node->surface == NULL) {
+ TBM_ERR("_queue_node_pop_front failed\n");
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_EMPTY;
+ }
+
+ node->type = QUEUE_NODE_TYPE_DEQUEUE;
+ *surface = node->surface;
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
+
+ _notify_emit(surface_queue, &surface_queue->dequeue_noti);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+int
+tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
+
+ _tbm_surf_queue_mutex_lock();
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+ if (_queue_is_empty(&surface_queue->free_queue)) {
+ if (surface_queue->impl && surface_queue->impl->need_attach)
+ surface_queue->impl->need_attach(surface_queue);
+
+ if (!_tbm_surface_queue_is_valid(surface_queue)) {
+ TBM_ERR("surface_queue:%p is invalid", surface_queue);
+ pthread_mutex_unlock(&surface_queue->lock);
+ _tbm_surf_queue_mutex_unlock();
+ return 0;
+ }
+ }
+
+ if (!_queue_is_empty(&surface_queue->free_queue)) {
+ pthread_mutex_unlock(&surface_queue->lock);
+ _tbm_surf_queue_mutex_unlock();
+ return 1;
+ }
+
+ if (wait && _tbm_surface_queue_get_node_count(surface_queue,
+ QUEUE_NODE_TYPE_ACQUIRE)) {
+ _tbm_surf_queue_mutex_unlock();
+ pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
+ pthread_mutex_unlock(&surface_queue->lock);
+ return 1;
+ }
+
+ pthread_mutex_unlock(&surface_queue->lock);
+ _tbm_surf_queue_mutex_unlock();
+ return 0;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_release(tbm_surface_queue_h
+ surface_queue, tbm_surface_h surface)
+{
+ queue_node *node;
+ int queue_type;
+
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
+ TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
+
+ node = _queue_get_node(surface_queue, 0, surface, &queue_type);
+ if (node == NULL || queue_type != NODE_LIST) {
+ TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+ node, queue_type);
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ if (!node) {
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
+ return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+ } else {
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
+ return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+ }
+ }
+
+ if (node->delete_pending) {
+ TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+
+ _queue_delete_node(surface_queue, node);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+
+ if (surface_queue->queue_size < surface_queue->num_attached) {
+ TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+
+ if (surface_queue->impl && surface_queue->impl->need_detach)
+ surface_queue->impl->need_detach(surface_queue, node);
+ else
+ _tbm_surface_queue_detach(surface_queue, surface);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+
+ if (surface_queue->impl && surface_queue->impl->release)
+ surface_queue->impl->release(surface_queue, node);
+ else
+ _tbm_surface_queue_release(surface_queue, node, 1);
+
+ if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
+ TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
+ _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_ERROR_INVALID_OPERATION;
+ }
+
+ node->type = QUEUE_NODE_TYPE_RELEASE;
+
+ pthread_mutex_unlock(&surface_queue->lock);
+ pthread_cond_signal(&surface_queue->free_cond);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
+
+ _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
+ surface_queue, tbm_surface_h surface)
+{
+ queue_node *node;
+ int queue_type;
+
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
+ TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
+
+ node = _queue_get_node(surface_queue, 0, surface, &queue_type);
+ if (node == NULL || queue_type != NODE_LIST) {
+ TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+ node, queue_type);
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+ }
+
+ if (surface_queue->impl && surface_queue->impl->enqueue)
+ surface_queue->impl->enqueue(surface_queue, node);
+ else
+ _tbm_surface_queue_enqueue(surface_queue, node, 1);
+
+ if (_queue_is_empty(&surface_queue->dirty_queue)) {
+ TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+ }
+
+ node->type = QUEUE_NODE_TYPE_ENQUEUE;
+
+ pthread_mutex_unlock(&surface_queue->lock);
+ pthread_cond_signal(&surface_queue->dirty_cond);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
+
+ _notify_emit(surface_queue, &surface_queue->acquirable_noti);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_acquire(tbm_surface_queue_h
+ surface_queue, tbm_surface_h *surface)
+{
+ queue_node *node;
+
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
*surface = NULL;
node = _tbm_surface_queue_acquire(surface_queue);
if (node == NULL || node->surface == NULL) {
- TBM_LOG_E("_queue_node_pop_front failed\n");
+ TBM_ERR("_queue_node_pop_front failed\n");
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
+ return TBM_SURFACE_QUEUE_ERROR_EMPTY;
}
node->type = QUEUE_NODE_TYPE_ACQUIRE;
*surface = node->surface;
- TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
+ if (surface_queue->acquire_sync_count == 1) {
+ tbm_surface_info_s info;
+ int ret;
+
+ TBM_ERR("start map surface:%p", *surface);
+ ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
+ TBM_ERR("end map surface:%p", *surface);
+ if (ret == TBM_SURFACE_ERROR_NONE)
+ tbm_surface_unmap(*surface);
+ }
+
+ if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
pthread_mutex_unlock(&surface_queue->lock);
if (b_dump_queue)
tbm_surface_internal_dump_buffer(*surface, "acquire");
+ _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
+
return TBM_SURFACE_QUEUE_ERROR_NONE;
}
tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
if (!_queue_is_empty(&surface_queue->dirty_queue)) {
pthread_mutex_unlock(&surface_queue->lock);
QUEUE_NODE_TYPE_DEQUEUE)) {
_tbm_surf_queue_mutex_unlock();
pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
- _tbm_surf_queue_mutex_lock();
-
- if (!_tbm_surface_queue_is_valid(surface_queue)) {
- TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
- pthread_mutex_unlock(&surface_queue->lock);
- _tbm_surf_queue_mutex_unlock();
- return 0;
- }
-
pthread_mutex_unlock(&surface_queue->lock);
- _tbm_surf_queue_mutex_unlock();
return 1;
}
queue_node *node = NULL, *tmp;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
LIST_DEL(&surface_queue->item_link);
_notify_remove_all(&surface_queue->destory_noti);
_notify_remove_all(&surface_queue->dequeuable_noti);
_notify_remove_all(&surface_queue->dequeue_noti);
+ _notify_remove_all(&surface_queue->can_dequeue_noti);
_notify_remove_all(&surface_queue->acquirable_noti);
_notify_remove_all(&surface_queue->reset_noti);
+ _trace_remove_all(&surface_queue->trace_noti);
pthread_mutex_destroy(&surface_queue->lock);
queue_node *node = NULL, *tmp;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
if (width == surface_queue->width && height == surface_queue->height &&
format == surface_queue->format) {
surface_queue->height = height;
surface_queue->format = format;
- /* Destory surface and Push to free_queue */
- LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
- _queue_delete_node(surface_queue, node);
+ if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
+ /* Destory surface and Push to free_queue */
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
+ _queue_delete_node(surface_queue, node);
+
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+ node->delete_pending = 1;
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+ _queue_delete_node(surface_queue, node);
+
+ _queue_init(&surface_queue->dirty_queue);
+ LIST_INITHEAD(&surface_queue->list);
+ }
/* Reset queue */
_queue_init(&surface_queue->free_queue);
- _queue_init(&surface_queue->dirty_queue);
- LIST_INITHEAD(&surface_queue->list);
surface_queue->num_attached = 0;
}
tbm_surface_queue_error_e
+tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _notify_emit(surface_queue, &surface_queue->reset_noti);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
tbm_surface_queue_set_size(tbm_surface_queue_h
surface_queue, int queue_size, int flush)
{
queue_node *node = NULL, *tmp;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
- TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
+ TBM_ERROR_INVALID_PARAMETER);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
if ((surface_queue->queue_size == queue_size) && !flush) {
_tbm_surf_queue_mutex_unlock();
pthread_mutex_lock(&surface_queue->lock);
if (flush) {
- /* Destory surface and Push to free_queue */
- LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
- _queue_delete_node(surface_queue, node);
+ if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
+ /* Destory surface and Push to free_queue */
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
+ _queue_delete_node(surface_queue, node);
+
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+ node->delete_pending = 1;
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+ _queue_delete_node(surface_queue, node);
+
+ _queue_init(&surface_queue->dirty_queue);
+ LIST_INITHEAD(&surface_queue->list);
+ }
/* Reset queue */
_queue_init(&surface_queue->free_queue);
- _queue_init(&surface_queue->dirty_queue);
- LIST_INITHEAD(&surface_queue->list);
surface_queue->num_attached = 0;
surface_queue->queue_size = queue_size;
int need_del = surface_queue->queue_size - queue_size;
LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
- TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+ TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
if (surface_queue->impl && surface_queue->impl->need_detach)
surface_queue->impl->need_detach(surface_queue, node);
}
tbm_surface_queue_error_e
+tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
+{
+ queue_node *node = NULL;
+
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+ if (surface_queue->num_attached == 0) {
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ /* Destory surface in free_queue */
+ while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
+ if (surface_queue->impl && surface_queue->impl->need_detach)
+ surface_queue->impl->need_detach(surface_queue, node);
+ else
+ _tbm_surface_queue_detach(surface_queue, node->surface);
+ }
+
+ /* Reset queue */
+ _queue_init(&surface_queue->free_queue);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
{
queue_node *node = NULL, *tmp;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
if (surface_queue->num_attached == 0) {
_tbm_surf_queue_mutex_unlock();
pthread_mutex_lock(&surface_queue->lock);
- /* Destory surface and Push to free_queue */
- LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
- _queue_delete_node(surface_queue, node);
+ if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
+ /* Destory surface and Push to free_queue */
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
+ _queue_delete_node(surface_queue, node);
+
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+ node->delete_pending = 1;
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+ _queue_delete_node(surface_queue, node);
+
+ _queue_init(&surface_queue->dirty_queue);
+ LIST_INITHEAD(&surface_queue->list);
+ }
/* Reset queue */
_queue_init(&surface_queue->free_queue);
- _queue_init(&surface_queue->dirty_queue);
- LIST_INITHEAD(&surface_queue->list);
surface_queue->num_attached = 0;
queue_node *node = NULL;
_tbm_surf_queue_mutex_lock();
-
- *num = 0;
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
- TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
+ TBM_ERROR_INVALID_PARAMETER);
+
+ *num = 0;
pthread_mutex_lock(&surface_queue->lock);
LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
+ if (node->delete_pending) continue;
+
if (surfaces)
surfaces[*num] = node->surface;
return TBM_SURFACE_QUEUE_ERROR_NONE;
}
+tbm_surface_queue_error_e
+tbm_surface_queue_get_trace_surface_num(
+ tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
+ TBM_ERROR_INVALID_PARAMETER);
+
+ *num = 0;
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ switch (trace) {
+ case TBM_SURFACE_QUEUE_TRACE_NONE:
+ *num = 0;
+ break;
+ case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
+ *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
+ break;
+ case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
+ *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
+ break;
+ case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
+ *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
+ break;
+ case TBM_SURFACE_QUEUE_TRACE_RELEASE:
+ *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
+ break;
+ default:
+ break;
+ }
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
typedef struct {
int flags;
} tbm_queue_default;
_tbm_surf_queue_mutex_lock();
pthread_mutex_lock(&surface_queue->lock);
+ /* silent return */
if (!surface)
return;
tbm_surface_queue_create(int queue_size, int width,
int height, int format, int flags)
{
- TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
-
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
sizeof(struct _tbm_surface_queue));
- TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
+ if (!surface_queue) {
+ TBM_ERR("cannot allocate the surface_queue.\n");
+ _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
+ _tbm_surf_queue_mutex_unlock();
+ return NULL;
+ }
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
tbm_queue_default *data = (tbm_queue_default *) calloc(1,
sizeof(tbm_queue_default));
if (data == NULL) {
+ TBM_ERR("cannot allocate the tbm_queue_default.\n");
+ _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
free(surface_queue);
_tbm_surf_queue_mutex_unlock();
return NULL;
{
tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
+ if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
+ return;
+
_queue_init(&data->dequeue_list);
}
_tbm_surf_queue_mutex_lock();
pthread_mutex_lock(&surface_queue->lock);
+ /* silent return */
if (!surface)
return;
queue_node *node)
{
tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
- queue_node *next = NULL, *tmp;
+ queue_node *first = NULL;
+
+ first = container_of(data->dequeue_list.head.next, first, item_link);
+ if (first != node) {
+ return;
+ }
node->priv_flags = 0;
- LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) {
- if (next->priv_flags)
- break;
- _queue_node_pop(&data->dequeue_list, next);
- _tbm_surface_queue_enqueue(surface_queue, next, 1);
+ _queue_node_pop(&data->dequeue_list, node);
+ _tbm_surface_queue_enqueue(surface_queue, node, 1);
+}
+
+static void
+__tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
+ queue_node *node)
+{
+ tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
+
+ if (node->priv_flags) {
+ node->priv_flags = 0;
+ _queue_node_pop(&data->dequeue_list, node);
}
+
+ _tbm_surface_queue_release(surface_queue, node, 1);
}
static queue_node *
__tbm_queue_sequence_destroy,
__tbm_queue_sequence_need_attach,
__tbm_queue_sequence_enqueue,
- NULL, /*__tbm_queue_sequence_release*/
+ __tbm_queue_sequence_release,
__tbm_queue_sequence_dequeue,
NULL, /*__tbm_queue_sequence_acquire*/
NULL, /*__tbm_queue_sequence_need_dettach*/
tbm_surface_queue_sequence_create(int queue_size, int width,
int height, int format, int flags)
{
- TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
-
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
sizeof(struct _tbm_surface_queue));
- TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
+ if (surface_queue == NULL) {
+ TBM_ERR("cannot allocate the surface_queue.\n");
+ _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
+ _tbm_surf_queue_mutex_unlock();
+ return NULL;
+ }
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
sizeof(tbm_queue_sequence));
if (data == NULL) {
+ TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
+ _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
free(surface_queue);
_tbm_surf_queue_mutex_unlock();
return NULL;
return surface_queue;
}
-/* LCOV_EXCL_STOP */
+
+tbm_surface_queue_error_e
+tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
+ int modes)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
+ modes = TBM_SURFACE_QUEUE_MODE_NONE;
+ else
+ surface_queue->modes |= modes;
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
+ unsigned int sync_count)
+{
+ int dequeue_num, enqueue_num;
+
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
+ enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
+
+ if (dequeue_num + sync_count == 0)
+ surface_queue->acquire_sync_count = enqueue_num;
+ else
+ surface_queue->enqueue_sync_count = dequeue_num + sync_count;
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
+ surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}