#include "tbm_bufmgr_int.h"
#include "list.h"
+#include <time.h>
#define FREE_QUEUE 1
#define DIRTY_QUEUE 2
#define NODE_LIST 4
-#define TBM_QUEUE_DEBUG 0
-
-#ifdef TRACE
-#define TBM_QUEUE_TRACE(fmt, ...) { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
-#else
-#define TBM_QUEUE_TRACE(fmt, ...)
-#endif /* TRACE */
-
-#if TBM_QUEUE_DEBUG
-#define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
-#define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
-#else
-#define TBM_LOCK()
-#define TBM_UNLOCK()
-#endif
-
static tbm_bufmgr g_surf_queue_bufmgr;
static pthread_mutex_t tbm_surf_queue_lock;
void _tbm_surface_queue_mutex_unlock(void);
/* check condition */
#define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
if (!(cond)) {\
- TBM_LOG_E("'%s' failed.\n", #cond);\
+ TBM_ERR("'%s' failed.\n", #cond);\
+ _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
_tbm_surf_queue_mutex_unlock();\
return;\
} \
#define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
if (!(cond)) {\
- TBM_LOG_E("'%s' failed.\n", #cond);\
+ TBM_ERR("'%s' failed.\n", #cond);\
+ _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
_tbm_surf_queue_mutex_unlock();\
return val;\
} \
Queue_Node_Type type;
unsigned int priv_flags; /*for each queue*/
+
+ int delete_pending;
} queue_node;
typedef struct {
void *alloc_cb_data;
struct list_head item_link; /* link of surface queue */
-};
-/* LCOV_EXCL_START */
+ int modes;
+ unsigned int enqueue_sync_count;
+ unsigned int acquire_sync_count;
+};
static bool
_tbm_surf_queue_mutex_init(void)
return true;
if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
- TBM_LOG_E("fail: pthread_mutex_init\n");
+ TBM_ERR("fail: pthread_mutex_init\n");
return false;
}
_tbm_surf_queue_mutex_lock(void)
{
if (!_tbm_surf_queue_mutex_init()) {
- TBM_LOG_E("fail: _tbm_surf_queue_mutex_init\n");
+ TBM_ERR("fail: _tbm_surf_queue_mutex_init\n");
return;
}
tbm_surface_queue_h old_data = NULL;
if (surface_queue == NULL) {
- TBM_LOG_E("error: surface_queue is NULL.\n");
+ TBM_ERR("error: surface_queue is NULL.\n");
return 0;
}
if (g_surf_queue_bufmgr == NULL) {
- TBM_LOG_E("error: g_surf_queue_bufmgr is NULL.\n");
+ TBM_ERR("error: g_surf_queue_bufmgr is NULL.\n");
return 0;
}
if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
- TBM_LOG_E("error: surf_queue_list is empty\n");
+ TBM_ERR("error: surf_queue_list is empty\n");
return 0;
}
LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
item_link) {
if (old_data == surface_queue) {
- TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
return 1;
}
}
- TBM_LOG_E("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
+ TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
return 0;
}
}
}
- TBM_LOG_E("fail to get the queue_node.\n");
+ TBM_ERR("fail to get the queue_node.\n");
return NULL;
}
}
}
- TBM_LOG_E("Cannot find notifiy\n");
+ TBM_ERR("Cannot find notifiy\n");
}
static void
}
}
- TBM_LOG_E("Cannot find notifiy\n");
+ TBM_ERR("Cannot find notifiy\n");
}
static void
{
queue_node *node;
- if (_queue_is_empty(&surface_queue->free_queue)) {
- if (surface_queue->impl && surface_queue->impl->need_attach)
- surface_queue->impl->need_attach(surface_queue);
-
- if (_queue_is_empty(&surface_queue->free_queue)) {
- TBM_LOG_E("surface_queue->free_queue is empty.\n");
- return NULL;
- }
- }
-
node = _queue_node_pop_front(&surface_queue->free_queue);
return node;
int width, int height, int format,
const tbm_surface_queue_interface *impl, void *data)
{
+ pthread_condattr_t attr;
+
TBM_RETURN_IF_FAIL(surface_queue != NULL);
TBM_RETURN_IF_FAIL(impl != NULL);
_init_tbm_surf_queue_bufmgr();
pthread_mutex_init(&surface_queue->lock, NULL);
- pthread_cond_init(&surface_queue->free_cond, NULL);
- pthread_cond_init(&surface_queue->dirty_cond, NULL);
+
+ pthread_condattr_init(&attr);
+ pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+ pthread_cond_init(&surface_queue->free_cond, &attr);
+ pthread_cond_init(&surface_queue->dirty_cond, &attr);
+ pthread_condattr_destroy(&attr);
surface_queue->queue_size = queue_size;
surface_queue->width = width;
surface_queue->format = format;
surface_queue->impl = impl;
surface_queue->impl_data = data;
+ surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
_queue_init(&surface_queue->free_queue);
_queue_init(&surface_queue->dirty_queue);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->destory_noti, destroy_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->destory_noti, destroy_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_trace_add(&surface_queue->trace_noti, trace_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_trace_remove(&surface_queue->trace_noti, trace_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
surface_queue->alloc_cb = alloc_cb;
surface_queue->free_cb = free_cb;
int width;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
width = surface_queue->width;
int height;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
height = surface_queue->height;
int format;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
format = surface_queue->format;
int queue_size;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
queue_size = surface_queue->queue_size;
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_add(&surface_queue->reset_noti, reset_cb, data);
void *data)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
_notify_remove(&surface_queue->reset_noti, reset_cb, data);
int queue_type;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
node = _queue_get_node(surface_queue, 0, surface, &queue_type);
if (node == NULL || queue_type != NODE_LIST) {
- TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+ TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
node, queue_type);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+
+ if (!node) {
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
+ return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+ } else {
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
+ return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+ }
}
if (surface_queue->impl && surface_queue->impl->enqueue)
else
_tbm_surface_queue_enqueue(surface_queue, node, 1);
- if (_queue_is_empty(&surface_queue->dirty_queue)) {
- TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
+ if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
+ TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+ return TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE;
}
node->type = QUEUE_NODE_TYPE_ENQUEUE;
+ if (surface_queue->enqueue_sync_count == 1) {
+ tbm_surface_info_s info;
+ int ret;
+
+ ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
+ if (ret == TBM_SURFACE_ERROR_NONE)
+ tbm_surface_unmap(surface);
+ }
+
+ if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
+
pthread_mutex_unlock(&surface_queue->lock);
pthread_cond_signal(&surface_queue->dirty_cond);
}
tbm_surface_queue_error_e
+tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
+ surface_queue, tbm_surface_h surface)
+{
+ queue_node *node;
+ int queue_type;
+
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
+ TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
+
+ node = _queue_get_node(surface_queue, 0, surface, &queue_type);
+ if (node == NULL || queue_type != NODE_LIST) {
+ TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+ node, queue_type);
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+ }
+
+ if (node->delete_pending) {
+ TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+
+ _queue_delete_node(surface_queue, node);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+
+ if (surface_queue->queue_size < surface_queue->num_attached) {
+ TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+
+ if (surface_queue->impl && surface_queue->impl->need_detach)
+ surface_queue->impl->need_detach(surface_queue, node);
+ else
+ _tbm_surface_queue_detach(surface_queue, surface);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+
+ if (surface_queue->impl && surface_queue->impl->release)
+ surface_queue->impl->release(surface_queue, node);
+ else
+ _tbm_surface_queue_release(surface_queue, node, 1);
+
+ if (_queue_is_empty(&surface_queue->free_queue)) {
+ TBM_ERR("surface_queue->free_queue is empty.\n");
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+ }
+
+ node->type = QUEUE_NODE_TYPE_RELEASE;
+
+ pthread_mutex_unlock(&surface_queue->lock);
+ pthread_cond_signal(&surface_queue->free_cond);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
tbm_surface_queue_dequeue(tbm_surface_queue_h
surface_queue, tbm_surface_h *surface)
{
queue_node *node;
_tbm_surf_queue_mutex_lock();
-
- *surface = NULL;
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+ *surface = NULL;
+
pthread_mutex_lock(&surface_queue->lock);
+ if (_queue_is_empty(&surface_queue->free_queue)) {
+ if (surface_queue->impl && surface_queue->impl->need_attach)
+ surface_queue->impl->need_attach(surface_queue);
+
+ if (!_tbm_surface_queue_is_valid(surface_queue)) {
+ TBM_ERR("surface_queue:%p is invalid", surface_queue);
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ pthread_mutex_unlock(&surface_queue->lock);
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
+ }
+ }
+
if (surface_queue->impl && surface_queue->impl->dequeue)
node = surface_queue->impl->dequeue(surface_queue);
else
node = _tbm_surface_queue_dequeue(surface_queue);
if (node == NULL || node->surface == NULL) {
- TBM_LOG_E("_queue_node_pop_front failed\n");
+ TBM_ERR("_queue_node_pop_front failed\n");
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
+ return TBM_SURFACE_QUEUE_ERROR_EMPTY;
}
node->type = QUEUE_NODE_TYPE_DEQUEUE;
*surface = node->surface;
- TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
pthread_mutex_unlock(&surface_queue->lock);
return TBM_SURFACE_QUEUE_ERROR_NONE;
}
-int
-tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
+tbm_surface_queue_error_e
+tbm_surface_queue_can_dequeue_wait_timeout(tbm_surface_queue_h surface_queue, int ms_timeout)
{
+ int ret;
+ struct timespec tp;
+
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
- TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
_tbm_surf_queue_mutex_unlock();
_tbm_surf_queue_mutex_lock();
- TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
if (_queue_is_empty(&surface_queue->free_queue)) {
if (surface_queue->impl && surface_queue->impl->need_attach)
surface_queue->impl->need_attach(surface_queue);
if (!_tbm_surface_queue_is_valid(surface_queue)) {
- TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
+ TBM_ERR("surface_queue:%p is invalid", surface_queue);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return 0;
+ return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
}
}
if (!_queue_is_empty(&surface_queue->free_queue)) {
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return 1;
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
}
- if (wait && _tbm_surface_queue_get_node_count(surface_queue,
- QUEUE_NODE_TYPE_ACQUIRE)) {
- _tbm_surf_queue_mutex_unlock();
- pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
- _tbm_surf_queue_mutex_lock();
+ _tbm_surf_queue_mutex_unlock();
+
+ while (1) {
+ clock_gettime(CLOCK_MONOTONIC, &tp);
+
+ if (ms_timeout > 1000)
+ tp.tv_sec += ms_timeout / 1000;
+
+ tp.tv_nsec += (ms_timeout % 1000) * 1000000;
+
+ if (tp.tv_nsec > 1000000000L) {
+ tp.tv_sec++;
+ tp.tv_nsec -= 1000000000L;
+ }
+
+ ret = pthread_cond_timedwait(&surface_queue->free_cond, &surface_queue->lock, &tp);
+ if (ret) {
+ if (ret == ETIMEDOUT) {
+ TBM_ERR("surface_queue:%p can dequeue wait timeout", surface_queue);
+ pthread_mutex_unlock(&surface_queue->lock);
+ return TBM_SURFACE_QUEUE_ERROR_TIMEOUT;
+ } else {
+ TBM_INFO("surface_queue:%p timedwait error retry wait", surface_queue);
+ }
+ } else {
+ pthread_mutex_unlock(&surface_queue->lock);
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+ }
+}
+
+int
+tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
+
+ _tbm_surf_queue_mutex_lock();
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+ if (_queue_is_empty(&surface_queue->free_queue)) {
+ if (surface_queue->impl && surface_queue->impl->need_attach)
+ surface_queue->impl->need_attach(surface_queue);
if (!_tbm_surface_queue_is_valid(surface_queue)) {
- TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
+ TBM_ERR("surface_queue:%p is invalid", surface_queue);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
return 0;
}
+ }
+ if (!_queue_is_empty(&surface_queue->free_queue)) {
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
return 1;
}
+ if (wait) {
+ _tbm_surf_queue_mutex_unlock();
+ pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
+ pthread_mutex_unlock(&surface_queue->lock);
+ return 1;
+ }
+
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
return 0;
int queue_type;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
node = _queue_get_node(surface_queue, 0, surface, &queue_type);
if (node == NULL || queue_type != NODE_LIST) {
- TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+ TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
node, queue_type);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+
+ if (!node) {
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
+ return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+ } else {
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
+ return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+ }
+ }
+
+ if (node->delete_pending) {
+ TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+
+ _queue_delete_node(surface_queue, node);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
}
if (surface_queue->queue_size < surface_queue->num_attached) {
- TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+ TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
if (surface_queue->impl && surface_queue->impl->need_detach)
surface_queue->impl->need_detach(surface_queue, node);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
+
return TBM_SURFACE_QUEUE_ERROR_NONE;
}
else
_tbm_surface_queue_release(surface_queue, node, 1);
- if (_queue_is_empty(&surface_queue->free_queue)) {
+ if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
+ TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
+ _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
pthread_mutex_unlock(&surface_queue->lock);
- TBM_LOG_E("surface_queue->free_queue is empty.\n");
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+ return TBM_SURFACE_ERROR_INVALID_OPERATION;
}
node->type = QUEUE_NODE_TYPE_RELEASE;
}
tbm_surface_queue_error_e
+tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
+ surface_queue, tbm_surface_h surface)
+{
+ queue_node *node;
+ int queue_type;
+
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
+ TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
+
+ node = _queue_get_node(surface_queue, 0, surface, &queue_type);
+ if (node == NULL || queue_type != NODE_LIST) {
+ TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+ node, queue_type);
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+ }
+
+ if (surface_queue->impl && surface_queue->impl->enqueue)
+ surface_queue->impl->enqueue(surface_queue, node);
+ else
+ _tbm_surface_queue_enqueue(surface_queue, node, 1);
+
+ if (_queue_is_empty(&surface_queue->dirty_queue)) {
+ TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+ }
+
+ node->type = QUEUE_NODE_TYPE_ENQUEUE;
+
+ pthread_mutex_unlock(&surface_queue->lock);
+ pthread_cond_signal(&surface_queue->dirty_cond);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
+
+ _notify_emit(surface_queue, &surface_queue->acquirable_noti);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
tbm_surface_queue_acquire(tbm_surface_queue_h
surface_queue, tbm_surface_h *surface)
{
queue_node *node;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
*surface = NULL;
node = _tbm_surface_queue_acquire(surface_queue);
if (node == NULL || node->surface == NULL) {
- TBM_LOG_E("_queue_node_pop_front failed\n");
+ TBM_ERR("_queue_node_pop_front failed\n");
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
+ return TBM_SURFACE_QUEUE_ERROR_EMPTY;
}
node->type = QUEUE_NODE_TYPE_ACQUIRE;
*surface = node->surface;
- TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
+ if (surface_queue->acquire_sync_count == 1) {
+ tbm_surface_info_s info;
+ int ret;
+
+ TBM_ERR("start map surface:%p", *surface);
+ ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
+ TBM_ERR("end map surface:%p", *surface);
+ if (ret == TBM_SURFACE_ERROR_NONE)
+ tbm_surface_unmap(*surface);
+ }
+
+ if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
pthread_mutex_unlock(&surface_queue->lock);
tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
pthread_mutex_lock(&surface_queue->lock);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
if (!_queue_is_empty(&surface_queue->dirty_queue)) {
pthread_mutex_unlock(&surface_queue->lock);
QUEUE_NODE_TYPE_DEQUEUE)) {
_tbm_surf_queue_mutex_unlock();
pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
- _tbm_surf_queue_mutex_lock();
-
- if (!_tbm_surface_queue_is_valid(surface_queue)) {
- TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
- pthread_mutex_unlock(&surface_queue->lock);
- _tbm_surf_queue_mutex_unlock();
- return 0;
- }
-
pthread_mutex_unlock(&surface_queue->lock);
- _tbm_surf_queue_mutex_unlock();
return 1;
}
queue_node *node = NULL, *tmp;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
LIST_DEL(&surface_queue->item_link);
queue_node *node = NULL, *tmp;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
if (width == surface_queue->width && height == surface_queue->height &&
format == surface_queue->format) {
surface_queue->height = height;
surface_queue->format = format;
- /* Destory surface and Push to free_queue */
- LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
- _queue_delete_node(surface_queue, node);
+ if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
+ /* Destory surface and Push to free_queue */
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
+ _queue_delete_node(surface_queue, node);
+
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+ node->delete_pending = 1;
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+ _queue_delete_node(surface_queue, node);
+
+ _queue_init(&surface_queue->dirty_queue);
+ LIST_INITHEAD(&surface_queue->list);
+ }
/* Reset queue */
_queue_init(&surface_queue->free_queue);
- _queue_init(&surface_queue->dirty_queue);
- LIST_INITHEAD(&surface_queue->list);
surface_queue->num_attached = 0;
tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
{
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
}
tbm_surface_queue_error_e
+tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+ pthread_mutex_unlock(&surface_queue->lock);
+ pthread_cond_signal(&surface_queue->free_cond);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
tbm_surface_queue_set_size(tbm_surface_queue_h
surface_queue, int queue_size, int flush)
{
queue_node *node = NULL, *tmp;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
- TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
+ TBM_ERROR_INVALID_PARAMETER);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
if ((surface_queue->queue_size == queue_size) && !flush) {
_tbm_surf_queue_mutex_unlock();
pthread_mutex_lock(&surface_queue->lock);
if (flush) {
- /* Destory surface and Push to free_queue */
- LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
- _queue_delete_node(surface_queue, node);
+ surface_queue->queue_size = queue_size;
+
+ if (surface_queue->num_attached == 0) {
+ pthread_mutex_unlock(&surface_queue->lock);
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+
+ if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
+ /* Destory surface and Push to free_queue */
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
+ _queue_delete_node(surface_queue, node);
+
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+ node->delete_pending = 1;
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+ _queue_delete_node(surface_queue, node);
+
+ _queue_init(&surface_queue->dirty_queue);
+ LIST_INITHEAD(&surface_queue->list);
+ }
/* Reset queue */
_queue_init(&surface_queue->free_queue);
- _queue_init(&surface_queue->dirty_queue);
- LIST_INITHEAD(&surface_queue->list);
surface_queue->num_attached = 0;
- surface_queue->queue_size = queue_size;
if (surface_queue->impl && surface_queue->impl->reset)
surface_queue->impl->reset(surface_queue);
int need_del = surface_queue->queue_size - queue_size;
LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
- TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+ TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
if (surface_queue->impl && surface_queue->impl->need_detach)
surface_queue->impl->need_detach(surface_queue, node);
queue_node *node = NULL;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
if (surface_queue->num_attached == 0) {
_tbm_surf_queue_mutex_unlock();
queue_node *node = NULL, *tmp;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
if (surface_queue->num_attached == 0) {
_tbm_surf_queue_mutex_unlock();
pthread_mutex_lock(&surface_queue->lock);
- /* Destory surface and Push to free_queue */
- LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
- _queue_delete_node(surface_queue, node);
+ if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
+ /* Destory surface and Push to free_queue */
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
+ _queue_delete_node(surface_queue, node);
+
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+ node->delete_pending = 1;
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+ _queue_delete_node(surface_queue, node);
+
+ _queue_init(&surface_queue->dirty_queue);
+ LIST_INITHEAD(&surface_queue->list);
+ }
/* Reset queue */
_queue_init(&surface_queue->free_queue);
- _queue_init(&surface_queue->dirty_queue);
- LIST_INITHEAD(&surface_queue->list);
surface_queue->num_attached = 0;
queue_node *node = NULL;
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
+ TBM_ERROR_INVALID_PARAMETER);
+
+ *num = 0;
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
+ if (node->delete_pending) continue;
+
+ if (surfaces)
+ surfaces[*num] = node->surface;
+
+ *num = *num + 1;
+ }
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_get_acquirable_surfaces(tbm_surface_queue_h surface_queue,
+ tbm_surface_h *surfaces, int *num)
+{
+ queue_node *node = NULL;
+
+ _tbm_surf_queue_mutex_lock();
*num = 0;
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
- TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
+ TBM_ERROR_INVALID_PARAMETER);
pthread_mutex_lock(&surface_queue->lock);
- LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head, item_link) {
if (surfaces)
surfaces[*num] = node->surface;
tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
{
_tbm_surf_queue_mutex_lock();
-
- *num = 0;
+ _tbm_set_last_result(TBM_ERROR_NONE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
- TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
+ TBM_ERROR_INVALID_PARAMETER);
+
+ *num = 0;
pthread_mutex_lock(&surface_queue->lock);
case TBM_SURFACE_QUEUE_TRACE_RELEASE:
*num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
break;
+ default:
+ break;
}
pthread_mutex_unlock(&surface_queue->lock);
return;
if (surface_queue->alloc_cb) {
- pthread_mutex_unlock(&surface_queue->lock);
- _tbm_surf_queue_mutex_unlock();
surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
- _tbm_surf_queue_mutex_lock();
- pthread_mutex_lock(&surface_queue->lock);
/* silent return */
if (!surface)
tbm_surface_queue_create(int queue_size, int width,
int height, int format, int flags)
{
- TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
-
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
sizeof(struct _tbm_surface_queue));
- TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
+ if (!surface_queue) {
+ TBM_ERR("cannot allocate the surface_queue.\n");
+ _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
+ _tbm_surf_queue_mutex_unlock();
+ return NULL;
+ }
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
tbm_queue_default *data = (tbm_queue_default *) calloc(1,
sizeof(tbm_queue_default));
if (data == NULL) {
- TBM_LOG_E("cannot allocate the tbm_queue_default.\n");
+ TBM_ERR("cannot allocate the tbm_queue_default.\n");
+ _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
free(surface_queue);
_tbm_surf_queue_mutex_unlock();
return NULL;
{
tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
+ if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
+ return;
+
_queue_init(&data->dequeue_list);
}
return;
if (surface_queue->alloc_cb) {
- pthread_mutex_unlock(&surface_queue->lock);
- _tbm_surf_queue_mutex_unlock();
surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
- _tbm_surf_queue_mutex_lock();
- pthread_mutex_lock(&surface_queue->lock);
/* silent return */
if (!surface)
queue_node *node)
{
tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
- queue_node *next = NULL, *tmp;
+ queue_node *first = NULL;
+
+ first = container_of(data->dequeue_list.head.next, first, item_link);
+ if (first != node) {
+ return;
+ }
node->priv_flags = 0;
- LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) {
- if (next->priv_flags)
- break;
- _queue_node_pop(&data->dequeue_list, next);
- _tbm_surface_queue_enqueue(surface_queue, next, 1);
+ _queue_node_pop(&data->dequeue_list, node);
+ _tbm_surface_queue_enqueue(surface_queue, node, 1);
+}
+
+static void
+__tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
+ queue_node *node)
+{
+ tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
+
+ if (node->priv_flags) {
+ node->priv_flags = 0;
+ _queue_node_pop(&data->dequeue_list, node);
}
+
+ _tbm_surface_queue_release(surface_queue, node, 1);
}
static queue_node *
__tbm_queue_sequence_destroy,
__tbm_queue_sequence_need_attach,
__tbm_queue_sequence_enqueue,
- NULL, /*__tbm_queue_sequence_release*/
+ __tbm_queue_sequence_release,
__tbm_queue_sequence_dequeue,
NULL, /*__tbm_queue_sequence_acquire*/
NULL, /*__tbm_queue_sequence_need_dettach*/
tbm_surface_queue_sequence_create(int queue_size, int width,
int height, int format, int flags)
{
- TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
- TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
-
_tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
sizeof(struct _tbm_surface_queue));
- TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
+ if (surface_queue == NULL) {
+ TBM_ERR("cannot allocate the surface_queue.\n");
+ _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
+ _tbm_surf_queue_mutex_unlock();
+ return NULL;
+ }
- TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
sizeof(tbm_queue_sequence));
if (data == NULL) {
- TBM_LOG_E("cannot allocate the tbm_queue_sequence.\n");
+ TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
+ _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
free(surface_queue);
_tbm_surf_queue_mutex_unlock();
return NULL;
return surface_queue;
}
-/* LCOV_EXCL_STOP */
+
+tbm_surface_queue_error_e
+tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
+ int modes)
+{
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
+ modes = TBM_SURFACE_QUEUE_MODE_NONE;
+ else
+ surface_queue->modes |= modes;
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
+ unsigned int sync_count)
+{
+ int dequeue_num, enqueue_num;
+
+ _tbm_surf_queue_mutex_lock();
+ _tbm_set_last_result(TBM_ERROR_NONE);
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
+ enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
+
+ if (dequeue_num + sync_count == 0)
+ surface_queue->acquire_sync_count = enqueue_num;
+ else
+ surface_queue->enqueue_sync_count = dequeue_num + sync_count;
+
+ TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
+ surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}