tbm_bufmgr: lock/unlock tbm_bufmgr_mutex at tbm_bufmgr function
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
index 314bbe0..ee93127 100644 (file)
@@ -33,35 +33,22 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #include "tbm_bufmgr_int.h"
 #include "list.h"
+#include <time.h>
 
 #define FREE_QUEUE     1
 #define DIRTY_QUEUE    2
 #define NODE_LIST      4
 
-#define TBM_QUEUE_DEBUG 0
+#define TBM_SURFACE_QUEUE_MAGIC 0xBF031234
 
-#ifdef TRACE
-#define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
-#else
-#define TBM_QUEUE_TRACE(fmt, ...)
-#endif /* TRACE */
-
-#if TBM_QUEUE_DEBUG
-#define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
-#define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
-#else
-#define TBM_LOCK()
-#define TBM_UNLOCK()
-#endif
-
-static tbm_bufmgr g_surf_queue_bufmgr;
-static pthread_mutex_t tbm_surf_queue_lock;
+static pthread_mutex_t tbm_surf_queue_lock = PTHREAD_MUTEX_INITIALIZER;
 void _tbm_surface_queue_mutex_unlock(void);
 
 /* check condition */
 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
        if (!(cond)) {\
-               TBM_LOG_E("'%s' failed.\n", #cond);\
+               TBM_ERR("'%s' failed.\n", #cond);\
+               _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
                _tbm_surf_queue_mutex_unlock();\
                return;\
        } \
@@ -69,7 +56,8 @@ void _tbm_surface_queue_mutex_unlock(void);
 
 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
        if (!(cond)) {\
-               TBM_LOG_E("'%s' failed.\n", #cond);\
+               TBM_ERR("'%s' failed.\n", #cond);\
+               _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
                _tbm_surf_queue_mutex_unlock();\
                return val;\
        } \
@@ -97,6 +85,8 @@ typedef struct {
        Queue_Node_Type type;
 
        unsigned int priv_flags;        /*for each queue*/
+
+       int delete_pending;
 } queue_node;
 
 typedef struct {
@@ -106,6 +96,13 @@ typedef struct {
        void *data;
 } queue_notify;
 
+typedef struct {
+       struct list_head link;
+
+       tbm_surface_queue_trace_cb cb;
+       void *data;
+} queue_trace;
+
 typedef struct _tbm_surface_queue_interface {
        void (*init)(tbm_surface_queue_h queue);
        void (*reset)(tbm_surface_queue_h queue);
@@ -120,10 +117,12 @@ typedef struct _tbm_surface_queue_interface {
 } tbm_surface_queue_interface;
 
 struct _tbm_surface_queue {
+       unsigned int magic;
        int width;
        int height;
        int format;
        int queue_size;
+       int flags;
        int num_attached;
 
        queue free_queue;
@@ -133,8 +132,10 @@ struct _tbm_surface_queue {
        struct list_head destory_noti;
        struct list_head dequeuable_noti;
        struct list_head dequeue_noti;
+       struct list_head can_dequeue_noti;
        struct list_head acquirable_noti;
        struct list_head reset_noti;
+       struct list_head trace_noti;
 
        pthread_mutex_t lock;
        pthread_cond_t free_cond;
@@ -148,35 +149,14 @@ struct _tbm_surface_queue {
        tbm_surface_free_cb free_cb;
        void *alloc_cb_data;
 
-       struct list_head item_link; /* link of surface queue */
+       int modes;
+       unsigned int enqueue_sync_count;
+       unsigned int acquire_sync_count;
 };
 
-/* LCOV_EXCL_START */
-
-static bool
-_tbm_surf_queue_mutex_init(void)
-{
-       static bool tbm_surf_queue_mutex_init = false;
-
-       if (tbm_surf_queue_mutex_init)
-               return true;
-
-       if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
-               TBM_LOG_E("fail: tbm_surf_queue mutex init\n");
-               return false;
-       }
-
-       tbm_surf_queue_mutex_init = true;
-
-       return true;
-}
-
 static void
 _tbm_surf_queue_mutex_lock(void)
 {
-       if (!_tbm_surf_queue_mutex_init())
-               return;
-
        pthread_mutex_lock(&tbm_surf_queue_lock);
 }
 
@@ -186,47 +166,29 @@ _tbm_surf_queue_mutex_unlock(void)
        pthread_mutex_unlock(&tbm_surf_queue_lock);
 }
 
-static void
-_init_tbm_surf_queue_bufmgr(void)
-{
-       g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
-}
-
-static void
-_deinit_tbm_surf_queue_bufmgr(void)
+static int
+_tbm_surface_queue_magic_check(tbm_surface_queue_h surface_queue)
 {
-       if (!g_surf_queue_bufmgr)
-               return;
+       if (surface_queue->magic != TBM_SURFACE_QUEUE_MAGIC)
+               return 0;
 
-       tbm_bufmgr_deinit(g_surf_queue_bufmgr);
-       g_surf_queue_bufmgr = NULL;
+       return 1;
 }
 
 static int
 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
 {
-       tbm_surface_queue_h old_data = NULL;
-
-       if (surface_queue == NULL || g_surf_queue_bufmgr == NULL) {
-               TBM_TRACE("error: surface_queue is NULL or not initialized\n");
+       if (!surface_queue) {
+               TBM_ERR("error: surface_queue is NULL.\n");
                return 0;
        }
 
-       if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
-               TBM_TRACE("error: surf_queue_list is empty\n");
+       if (!_tbm_surface_queue_magic_check(surface_queue)) {
+               TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
                return 0;
        }
 
-       LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
-                               item_link) {
-               if (old_data == surface_queue) {
-                       TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
-                       return 1;
-               }
-       }
-
-       TBM_TRACE("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
-       return 0;
+       return 1;
 }
 
 static queue_node *
@@ -275,9 +237,12 @@ _queue_node_pop_front(queue *queue)
 {
        queue_node *node;
 
+       if (!queue->head.next) return NULL;
+       if (!queue->count) return NULL;
+
        node = LIST_ENTRY(queue_node, queue->head.next, item_link);
 
-       LIST_DEL(&node->item_link);
+       LIST_DELINIT(&node->item_link);
        queue->count--;
 
        return node;
@@ -286,7 +251,7 @@ _queue_node_pop_front(queue *queue)
 static queue_node *
 _queue_node_pop(queue *queue, queue_node *node)
 {
-       LIST_DEL(&node->item_link);
+       LIST_DELINIT(&node->item_link);
        queue->count--;
 
        return node;
@@ -338,6 +303,8 @@ _queue_get_node(tbm_surface_queue_h surface_queue, int type,
                }
        }
 
+       TBM_ERR("fail to get the queue_node.\n");
+
        return NULL;
 }
 
@@ -396,7 +363,7 @@ _notify_remove(struct list_head *list,
                }
        }
 
-       TBM_LOG_E("Cannot find notifiy\n");
+       TBM_ERR("Cannot find notifiy\n");
 }
 
 static void
@@ -425,6 +392,66 @@ _notify_emit(tbm_surface_queue_h surface_queue,
                item->cb(surface_queue, item->data);
 }
 
+static void
+_trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
+           void *data)
+{
+       TBM_RETURN_IF_FAIL(cb != NULL);
+
+       queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
+
+       TBM_RETURN_IF_FAIL(item != NULL);
+
+       LIST_INITHEAD(&item->link);
+       item->cb = cb;
+       item->data = data;
+
+       LIST_ADDTAIL(&item->link, list);
+}
+
+static void
+_trace_remove(struct list_head *list,
+              tbm_surface_queue_trace_cb cb, void *data)
+{
+       queue_trace *item = NULL, *tmp;
+
+       LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
+               if (item->cb == cb && item->data == data) {
+                       LIST_DEL(&item->link);
+                       free(item);
+                       return;
+               }
+       }
+
+       TBM_ERR("Cannot find notifiy\n");
+}
+
+static void
+_trace_remove_all(struct list_head *list)
+{
+       queue_trace *item = NULL, *tmp;
+
+       LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
+               LIST_DEL(&item->link);
+               free(item);
+       }
+}
+
+static void
+_trace_emit(tbm_surface_queue_h surface_queue,
+            struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
+{
+       queue_trace *item = NULL, *tmp;;
+
+       /*
+               The item->cb is the outside function of the libtbm.
+               The tbm user may/can remove the item of the list,
+               so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
+       */
+       LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
+               item->cb(surface_queue, surface, trace, item->data);
+}
+
 static int
 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
 {
@@ -457,6 +484,34 @@ _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
 }
 
 static void
+_tbm_surface_queue_need_attach(tbm_surface_queue_h surface_queue)
+{
+       tbm_surface_h surface;
+
+       if (surface_queue->queue_size == surface_queue->num_attached)
+               return;
+
+       if (surface_queue->alloc_cb) {
+               surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
+
+               /* silent return */
+               if (!surface)
+                       return;
+
+               tbm_surface_internal_ref(surface);
+       } else {
+               surface = tbm_surface_internal_create_with_flags(surface_queue->width,
+                               surface_queue->height,
+                               surface_queue->format,
+                               surface_queue->flags);
+               TBM_RETURN_IF_FAIL(surface != NULL);
+       }
+
+       _tbm_surface_queue_attach(surface_queue, surface);
+       tbm_surface_internal_unref(surface);
+}
+
+static void
 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
                          tbm_surface_h surface)
 {
@@ -485,14 +540,6 @@ _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
 {
        queue_node *node;
 
-       if (_queue_is_empty(&surface_queue->free_queue)) {
-               if (surface_queue->impl && surface_queue->impl->need_attach)
-                       surface_queue->impl->need_attach(surface_queue);
-
-               if (_queue_is_empty(&surface_queue->free_queue))
-                       return NULL;
-       }
-
        node = _queue_node_pop_front(&surface_queue->free_queue);
 
        return node;
@@ -524,25 +571,35 @@ _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
 static void
 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
                        int queue_size,
-                       int width, int height, int format,
+                       int width, int height, int format, int flags,
                        const tbm_surface_queue_interface *impl, void *data)
 {
+       pthread_condattr_t free_attr, dirty_attr;
+
        TBM_RETURN_IF_FAIL(surface_queue != NULL);
        TBM_RETURN_IF_FAIL(impl != NULL);
 
-       if (!g_surf_queue_bufmgr)
-               _init_tbm_surf_queue_bufmgr();
-
        pthread_mutex_init(&surface_queue->lock, NULL);
-       pthread_cond_init(&surface_queue->free_cond, NULL);
-       pthread_cond_init(&surface_queue->dirty_cond, NULL);
 
+       pthread_condattr_init(&free_attr);
+       pthread_condattr_setclock(&free_attr, CLOCK_MONOTONIC);
+       pthread_cond_init(&surface_queue->free_cond, &free_attr);
+       pthread_condattr_destroy(&free_attr);
+
+       pthread_condattr_init(&dirty_attr);
+       pthread_condattr_setclock(&dirty_attr, CLOCK_MONOTONIC);
+       pthread_cond_init(&surface_queue->dirty_cond, &dirty_attr);
+       pthread_condattr_destroy(&dirty_attr);
+
+       surface_queue->magic = TBM_SURFACE_QUEUE_MAGIC;
        surface_queue->queue_size = queue_size;
        surface_queue->width = width;
        surface_queue->height = height;
        surface_queue->format = format;
+       surface_queue->flags = flags;
        surface_queue->impl = impl;
        surface_queue->impl_data = data;
+       surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
 
        _queue_init(&surface_queue->free_queue);
        _queue_init(&surface_queue->dirty_queue);
@@ -551,13 +608,13 @@ _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
        LIST_INITHEAD(&surface_queue->destory_noti);
        LIST_INITHEAD(&surface_queue->dequeuable_noti);
        LIST_INITHEAD(&surface_queue->dequeue_noti);
+       LIST_INITHEAD(&surface_queue->can_dequeue_noti);
        LIST_INITHEAD(&surface_queue->acquirable_noti);
        LIST_INITHEAD(&surface_queue->reset_noti);
+       LIST_INITHEAD(&surface_queue->trace_noti);
 
        if (surface_queue->impl && surface_queue->impl->init)
                surface_queue->impl->init(surface_queue);
-
-       LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
 }
 
 tbm_surface_queue_error_e
@@ -566,13 +623,16 @@ tbm_surface_queue_add_destroy_cb(
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
+                              TBM_ERROR_INVALID_PARAMETER);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        _notify_add(&surface_queue->destory_noti, destroy_cb, data);
 
@@ -589,13 +649,14 @@ tbm_surface_queue_remove_destroy_cb(
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
 
@@ -612,13 +673,16 @@ tbm_surface_queue_add_dequeuable_cb(
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
+                              TBM_ERROR_INVALID_PARAMETER);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
 
@@ -635,13 +699,14 @@ tbm_surface_queue_remove_dequeuable_cb(
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
 
@@ -658,13 +723,16 @@ tbm_surface_queue_add_dequeue_cb(
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
+                              TBM_ERROR_INVALID_PARAMETER);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
 
@@ -681,13 +749,14 @@ tbm_surface_queue_remove_dequeue_cb(
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
 
@@ -699,18 +768,71 @@ tbm_surface_queue_remove_dequeue_cb(
 }
 
 tbm_surface_queue_error_e
+tbm_surface_queue_add_can_dequeue_cb(
+       tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
+       void *data)
+{
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
+                              TBM_ERROR_INVALID_PARAMETER);
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+       _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
+
+       pthread_mutex_unlock(&surface_queue->lock);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_remove_can_dequeue_cb(
+       tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
+       void *data)
+{
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+       _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
+
+       pthread_mutex_unlock(&surface_queue->lock);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
 tbm_surface_queue_add_acquirable_cb(
        tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
+                              TBM_ERROR_INVALID_PARAMETER);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
 
@@ -727,13 +849,14 @@ tbm_surface_queue_remove_acquirable_cb(
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
 
@@ -745,6 +868,56 @@ tbm_surface_queue_remove_acquirable_cb(
 }
 
 tbm_surface_queue_error_e
+tbm_surface_queue_add_trace_cb(
+       tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
+       void *data)
+{
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
+                              TBM_ERROR_INVALID_PARAMETER);
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+       _trace_add(&surface_queue->trace_noti, trace_cb, data);
+
+       pthread_mutex_unlock(&surface_queue->lock);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_remove_trace_cb(
+       tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
+       void *data)
+{
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+       _trace_remove(&surface_queue->trace_noti, trace_cb, data);
+
+       pthread_mutex_unlock(&surface_queue->lock);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
 tbm_surface_queue_set_alloc_cb(
        tbm_surface_queue_h surface_queue,
        tbm_surface_alloc_cb alloc_cb,
@@ -752,13 +925,14 @@ tbm_surface_queue_set_alloc_cb(
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        surface_queue->alloc_cb = alloc_cb;
        surface_queue->free_cb = free_cb;
@@ -777,10 +951,11 @@ tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
        int width;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        width = surface_queue->width;
 
@@ -795,10 +970,11 @@ tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
        int height;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        height = surface_queue->height;
 
@@ -813,10 +989,11 @@ tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
        int format;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        format = surface_queue->format;
 
@@ -831,10 +1008,11 @@ tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
        int queue_size;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        queue_size = surface_queue->queue_size;
 
@@ -849,13 +1027,16 @@ tbm_surface_queue_add_reset_cb(
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
+                              TBM_ERROR_INVALID_PARAMETER);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        _notify_add(&surface_queue->reset_noti, reset_cb, data);
 
@@ -872,13 +1053,14 @@ tbm_surface_queue_remove_reset_cb(
        void *data)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        _notify_remove(&surface_queue->reset_noti, reset_cb, data);
 
@@ -897,6 +1079,7 @@ tbm_surface_queue_enqueue(tbm_surface_queue_h
        int queue_type;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
@@ -908,16 +1091,23 @@ tbm_surface_queue_enqueue(tbm_surface_queue_h
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
 
        node = _queue_get_node(surface_queue, 0, surface, &queue_type);
        if (node == NULL || queue_type != NODE_LIST) {
-               TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+               TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
                        node, queue_type);
                pthread_mutex_unlock(&surface_queue->lock);
 
                _tbm_surf_queue_mutex_unlock();
-               return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+
+               if (!node) {
+                       _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
+                       return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+               } else {
+                       _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
+                       return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+               }
        }
 
        if (surface_queue->impl && surface_queue->impl->enqueue)
@@ -925,35 +1115,49 @@ tbm_surface_queue_enqueue(tbm_surface_queue_h
        else
                _tbm_surface_queue_enqueue(surface_queue, node, 1);
 
-       if (_queue_is_empty(&surface_queue->dirty_queue)) {
-               TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
+       if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
+               TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
+               _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE);
                pthread_mutex_unlock(&surface_queue->lock);
 
                _tbm_surf_queue_mutex_unlock();
-               return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+               return TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE;
        }
 
        node->type = QUEUE_NODE_TYPE_ENQUEUE;
 
+       if (surface_queue->enqueue_sync_count == 1) {
+               tbm_surface_info_s info;
+               int ret;
+
+               ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
+               if (ret == TBM_SURFACE_ERROR_NONE)
+                       tbm_surface_unmap(surface);
+       }
+
+       if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
+
        pthread_mutex_unlock(&surface_queue->lock);
        pthread_cond_signal(&surface_queue->dirty_cond);
 
        _tbm_surf_queue_mutex_unlock();
 
+       _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
+
        _notify_emit(surface_queue, &surface_queue->acquirable_noti);
 
        return TBM_SURFACE_QUEUE_ERROR_NONE;
 }
 
 tbm_surface_queue_error_e
-tbm_surface_queue_dequeue(tbm_surface_queue_h
-                         surface_queue, tbm_surface_h *surface)
+tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
+                         surface_queue, tbm_surface_h surface)
 {
        queue_node *node;
+       int queue_type;
 
        _tbm_surf_queue_mutex_lock();
-
-       *surface = NULL;
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
@@ -962,79 +1166,257 @@ tbm_surface_queue_dequeue(tbm_surface_queue_h
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       if (surface_queue->impl && surface_queue->impl->dequeue)
-               node = surface_queue->impl->dequeue(surface_queue);
-       else
-               node = _tbm_surface_queue_dequeue(surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
 
-       if (node == NULL || node->surface == NULL) {
-               TBM_LOG_E("_queue_node_pop_front failed\n");
+       node = _queue_get_node(surface_queue, 0, surface, &queue_type);
+       if (node == NULL || queue_type != NODE_LIST) {
+               TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+                       node, queue_type);
+               _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
                pthread_mutex_unlock(&surface_queue->lock);
 
                _tbm_surf_queue_mutex_unlock();
-               return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
+               return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
        }
 
-       node->type = QUEUE_NODE_TYPE_DEQUEUE;
-       *surface = node->surface;
+       if (node->delete_pending) {
+               TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
+               _queue_delete_node(surface_queue, node);
 
-       pthread_mutex_unlock(&surface_queue->lock);
+               pthread_mutex_unlock(&surface_queue->lock);
 
-       _tbm_surf_queue_mutex_unlock();
+               _tbm_surf_queue_mutex_unlock();
 
-       _notify_emit(surface_queue, &surface_queue->dequeue_noti);
+               _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
 
-       return TBM_SURFACE_QUEUE_ERROR_NONE;
-}
+               return TBM_SURFACE_QUEUE_ERROR_NONE;
+       }
 
-int
-tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
-{
-       _tbm_surf_queue_mutex_lock();
+       if (surface_queue->queue_size < surface_queue->num_attached) {
+               TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
 
-       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
+               if (surface_queue->impl && surface_queue->impl->need_detach)
+                       surface_queue->impl->need_detach(surface_queue, node);
+               else
+                       _tbm_surface_queue_detach(surface_queue, surface);
 
-       pthread_mutex_lock(&surface_queue->lock);
+               pthread_mutex_unlock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+               _tbm_surf_queue_mutex_unlock();
 
-       if (_queue_is_empty(&surface_queue->free_queue)) {
-               if (surface_queue->impl && surface_queue->impl->need_attach)
-                       surface_queue->impl->need_attach(surface_queue);
+               _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
 
-               if (!_tbm_surface_queue_is_valid(surface_queue)) {
-                       TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
-                       _tbm_surf_queue_mutex_unlock();
-                       return 0;
-               }
+               return TBM_SURFACE_QUEUE_ERROR_NONE;
        }
 
-       if (!_queue_is_empty(&surface_queue->free_queue)) {
-               pthread_mutex_unlock(&surface_queue->lock);
-               _tbm_surf_queue_mutex_unlock();
-               return 1;
+       if (surface_queue->impl && surface_queue->impl->release)
+               surface_queue->impl->release(surface_queue, node);
+       else
+               _tbm_surface_queue_release(surface_queue, node, 1);
+
+       if (_queue_is_empty(&surface_queue->free_queue)) {
+               TBM_ERR("surface_queue->free_queue is empty.\n");
+               _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+               pthread_mutex_unlock(&surface_queue->lock);
+
+               _tbm_surf_queue_mutex_unlock();
+               return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
        }
 
-       if (wait && _tbm_surface_queue_get_node_count(surface_queue,
-                                               QUEUE_NODE_TYPE_ACQUIRE)) {
+       node->type = QUEUE_NODE_TYPE_RELEASE;
+
+       pthread_mutex_unlock(&surface_queue->lock);
+       pthread_cond_signal(&surface_queue->free_cond);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_dequeue(tbm_surface_queue_h
+                         surface_queue, tbm_surface_h *surface)
+{
+       queue_node *node;
+
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+
+       *surface = NULL;
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       if (_queue_is_empty(&surface_queue->free_queue)) {
+               if (surface_queue->impl && surface_queue->impl->need_attach)
+                       surface_queue->impl->need_attach(surface_queue);
+               else
+                       _tbm_surface_queue_need_attach(surface_queue);
+       }
+
+       if (surface_queue->impl && surface_queue->impl->dequeue)
+               node = surface_queue->impl->dequeue(surface_queue);
+       else
+               node = _tbm_surface_queue_dequeue(surface_queue);
+
+       if (node == NULL || node->surface == NULL) {
+               TBM_ERR("_queue_node_pop_front failed\n");
+               _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
+               pthread_mutex_unlock(&surface_queue->lock);
+
                _tbm_surf_queue_mutex_unlock();
-               pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
-               _tbm_surf_queue_mutex_lock();
+               return TBM_SURFACE_QUEUE_ERROR_EMPTY;
+       }
 
-               if (!_tbm_surface_queue_is_valid(surface_queue)) {
-                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
-                       pthread_mutex_unlock(&surface_queue->lock);
-                         _tbm_surf_queue_mutex_unlock();
-                         return 0;
+       node->type = QUEUE_NODE_TYPE_DEQUEUE;
+       *surface = node->surface;
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
+
+       pthread_mutex_unlock(&surface_queue->lock);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
+
+       _notify_emit(surface_queue, &surface_queue->dequeue_noti);
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_can_dequeue_wait_timeout(tbm_surface_queue_h surface_queue, int ms_timeout)
+{
+       int ret;
+       struct timespec tp;
+
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                                                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
+
+       _tbm_surf_queue_mutex_lock();
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                                                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+       if (_queue_is_empty(&surface_queue->free_queue)) {
+               if (surface_queue->impl && surface_queue->impl->need_attach)
+                       surface_queue->impl->need_attach(surface_queue);
+               else
+                       _tbm_surface_queue_need_attach(surface_queue);
+       }
+
+       if (!_queue_is_empty(&surface_queue->free_queue)) {
+               pthread_mutex_unlock(&surface_queue->lock);
+               _tbm_surf_queue_mutex_unlock();
+               return TBM_SURFACE_QUEUE_ERROR_NONE;
+       }
+
+       _tbm_surf_queue_mutex_unlock();
+
+       while (1) {
+               clock_gettime(CLOCK_MONOTONIC, &tp);
+
+               if (ms_timeout > 1000)
+                       tp.tv_sec += ms_timeout / 1000;
+
+               tp.tv_nsec += (ms_timeout % 1000) * 1000000;
+
+               if (tp.tv_nsec > 1000000000L) {
+                       tp.tv_sec++;
+                       tp.tv_nsec -= 1000000000L;
                }
 
+               ret = pthread_cond_timedwait(&surface_queue->free_cond, &surface_queue->lock, &tp);
+               if (ret) {
+                       if (ret == ETIMEDOUT) {
+                               TBM_ERR("surface_queue:%p can dequeue wait timeout", surface_queue);
+                               pthread_mutex_unlock(&surface_queue->lock);
+                               return TBM_SURFACE_QUEUE_ERROR_TIMEOUT;
+                       } else {
+                               TBM_INFO("surface_queue:%p timedwait error retry wait", surface_queue);
+                       }
+               } else {
+                       if (surface_queue->impl && surface_queue->impl->need_attach)
+                               surface_queue->impl->need_attach(surface_queue);
+                       else
+                               _tbm_surface_queue_need_attach(surface_queue);
+
+                       if (!_queue_is_empty(&surface_queue->free_queue)) {
+                               pthread_mutex_unlock(&surface_queue->lock);
+                               return TBM_SURFACE_QUEUE_ERROR_NONE;
+                       }
+               }
+       }
+}
+
+int
+tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
+{
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
+
+       _tbm_surf_queue_mutex_lock();
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+       if (_queue_is_empty(&surface_queue->free_queue)) {
+               if (surface_queue->impl && surface_queue->impl->need_attach)
+                       surface_queue->impl->need_attach(surface_queue);
+               else
+                       _tbm_surface_queue_need_attach(surface_queue);
+       }
+
+       if (!_queue_is_empty(&surface_queue->free_queue)) {
                pthread_mutex_unlock(&surface_queue->lock);
                _tbm_surf_queue_mutex_unlock();
                return 1;
        }
 
+       if (wait) {
+               _tbm_surf_queue_mutex_unlock();
+               while (1) {
+                       pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
+
+                       if (surface_queue->impl && surface_queue->impl->need_attach)
+                               surface_queue->impl->need_attach(surface_queue);
+                       else
+                               _tbm_surface_queue_need_attach(surface_queue);
+
+                       if (!_queue_is_empty(&surface_queue->free_queue)) {
+                               pthread_mutex_unlock(&surface_queue->lock);
+                               return 1;
+                       }
+               }
+       }
+
        pthread_mutex_unlock(&surface_queue->lock);
        _tbm_surf_queue_mutex_unlock();
        return 0;
@@ -1048,6 +1430,7 @@ tbm_surface_queue_release(tbm_surface_queue_h
        int queue_type;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
@@ -1056,20 +1439,41 @@ tbm_surface_queue_release(tbm_surface_queue_h
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
 
        node = _queue_get_node(surface_queue, 0, surface, &queue_type);
        if (node == NULL || queue_type != NODE_LIST) {
-               TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+               TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
                        node, queue_type);
                pthread_mutex_unlock(&surface_queue->lock);
 
                _tbm_surf_queue_mutex_unlock();
-               return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+
+               if (!node) {
+                       _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
+                       return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+               } else {
+                       _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
+                       return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+               }
+       }
+
+       if (node->delete_pending) {
+               TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+
+               _queue_delete_node(surface_queue, node);
+
+               pthread_mutex_unlock(&surface_queue->lock);
+
+               _tbm_surf_queue_mutex_unlock();
+
+               _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
+
+               return TBM_SURFACE_QUEUE_ERROR_NONE;
        }
 
        if (surface_queue->queue_size < surface_queue->num_attached) {
-               TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+               TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
 
                if (surface_queue->impl && surface_queue->impl->need_detach)
                        surface_queue->impl->need_detach(surface_queue, node);
@@ -1079,6 +1483,9 @@ tbm_surface_queue_release(tbm_surface_queue_h
                pthread_mutex_unlock(&surface_queue->lock);
 
                _tbm_surf_queue_mutex_unlock();
+
+               _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
+
                return TBM_SURFACE_QUEUE_ERROR_NONE;
        }
 
@@ -1087,11 +1494,13 @@ tbm_surface_queue_release(tbm_surface_queue_h
        else
                _tbm_surface_queue_release(surface_queue, node, 1);
 
-       if (_queue_is_empty(&surface_queue->free_queue)) {
+       if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
+               TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
+               _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
                pthread_mutex_unlock(&surface_queue->lock);
 
                _tbm_surf_queue_mutex_unlock();
-               return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
+               return TBM_SURFACE_ERROR_INVALID_OPERATION;
        }
 
        node->type = QUEUE_NODE_TYPE_RELEASE;
@@ -1101,18 +1510,79 @@ tbm_surface_queue_release(tbm_surface_queue_h
 
        _tbm_surf_queue_mutex_unlock();
 
+       _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
+
        _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
 
        return TBM_SURFACE_QUEUE_ERROR_NONE;
 }
 
 tbm_surface_queue_error_e
+tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
+                       surface_queue, tbm_surface_h surface)
+{
+       queue_node *node;
+       int queue_type;
+
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
+
+       node = _queue_get_node(surface_queue, 0, surface, &queue_type);
+       if (node == NULL || queue_type != NODE_LIST) {
+               TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
+                       node, queue_type);
+               _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
+               pthread_mutex_unlock(&surface_queue->lock);
+
+               _tbm_surf_queue_mutex_unlock();
+               return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
+       }
+
+       if (surface_queue->impl && surface_queue->impl->enqueue)
+               surface_queue->impl->enqueue(surface_queue, node);
+       else
+               _tbm_surface_queue_enqueue(surface_queue, node, 1);
+
+       if (_queue_is_empty(&surface_queue->dirty_queue)) {
+               TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
+               _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
+               pthread_mutex_unlock(&surface_queue->lock);
+
+               _tbm_surf_queue_mutex_unlock();
+               return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
+       }
+
+       node->type = QUEUE_NODE_TYPE_ENQUEUE;
+
+       pthread_mutex_unlock(&surface_queue->lock);
+       pthread_cond_signal(&surface_queue->dirty_cond);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
+
+       _notify_emit(surface_queue, &surface_queue->acquirable_noti);
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
 tbm_surface_queue_acquire(tbm_surface_queue_h
                          surface_queue, tbm_surface_h *surface)
 {
        queue_node *node;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        *surface = NULL;
 
@@ -1129,18 +1599,32 @@ tbm_surface_queue_acquire(tbm_surface_queue_h
                node = _tbm_surface_queue_acquire(surface_queue);
 
        if (node == NULL || node->surface == NULL) {
-               TBM_LOG_E("_queue_node_pop_front failed\n");
+               TBM_ERR("_queue_node_pop_front failed\n");
+               _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
                pthread_mutex_unlock(&surface_queue->lock);
 
                _tbm_surf_queue_mutex_unlock();
-               return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
+               return TBM_SURFACE_QUEUE_ERROR_EMPTY;
        }
 
        node->type = QUEUE_NODE_TYPE_ACQUIRE;
 
        *surface = node->surface;
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
+       if (surface_queue->acquire_sync_count == 1) {
+               tbm_surface_info_s info;
+               int ret;
+
+               TBM_ERR("start map surface:%p", *surface);
+               ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
+               TBM_ERR("end map surface:%p", *surface);
+               if (ret == TBM_SURFACE_ERROR_NONE)
+                       tbm_surface_unmap(*surface);
+       }
+
+       if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
 
        pthread_mutex_unlock(&surface_queue->lock);
 
@@ -1149,6 +1633,8 @@ tbm_surface_queue_acquire(tbm_surface_queue_h
        if (b_dump_queue)
                tbm_surface_internal_dump_buffer(*surface, "acquire");
 
+       _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
+
        return TBM_SURFACE_QUEUE_ERROR_NONE;
 }
 
@@ -1156,12 +1642,13 @@ int
 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
 {
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        if (!_queue_is_empty(&surface_queue->dirty_queue)) {
                pthread_mutex_unlock(&surface_queue->lock);
@@ -1173,17 +1660,7 @@ tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
                                                QUEUE_NODE_TYPE_DEQUEUE)) {
                _tbm_surf_queue_mutex_unlock();
                pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
-               _tbm_surf_queue_mutex_lock();
-
-               if (!_tbm_surface_queue_is_valid(surface_queue)) {
-                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
-                       pthread_mutex_unlock(&surface_queue->lock);
-                         _tbm_surf_queue_mutex_unlock();
-                         return 0;
-               }
-
                pthread_mutex_unlock(&surface_queue->lock);
-               _tbm_surf_queue_mutex_unlock();
                return 1;
        }
 
@@ -1198,12 +1675,13 @@ tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
        queue_node *node = NULL, *tmp;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
-       LIST_DEL(&surface_queue->item_link);
+       surface_queue->magic = 0;
 
        LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
                _queue_delete_node(surface_queue, node);
@@ -1216,16 +1694,15 @@ tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
        _notify_remove_all(&surface_queue->destory_noti);
        _notify_remove_all(&surface_queue->dequeuable_noti);
        _notify_remove_all(&surface_queue->dequeue_noti);
+       _notify_remove_all(&surface_queue->can_dequeue_noti);
        _notify_remove_all(&surface_queue->acquirable_noti);
        _notify_remove_all(&surface_queue->reset_noti);
+       _trace_remove_all(&surface_queue->trace_noti);
 
        pthread_mutex_destroy(&surface_queue->lock);
 
        free(surface_queue);
 
-       if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
-               _deinit_tbm_surf_queue_bufmgr();
-
        _tbm_surf_queue_mutex_unlock();
 }
 
@@ -1236,11 +1713,12 @@ tbm_surface_queue_reset(tbm_surface_queue_h
        queue_node *node = NULL, *tmp;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        if (width == surface_queue->width && height == surface_queue->height &&
                format == surface_queue->format) {
@@ -1254,14 +1732,23 @@ tbm_surface_queue_reset(tbm_surface_queue_h
        surface_queue->height = height;
        surface_queue->format = format;
 
-       /* Destory surface and Push to free_queue */
-       LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
-               _queue_delete_node(surface_queue, node);
+       if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
+               /* Destory surface and Push to free_queue */
+               LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
+                       _queue_delete_node(surface_queue, node);
+
+               LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+                       node->delete_pending = 1;
+       } else {
+               LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+                       _queue_delete_node(surface_queue, node);
+
+               _queue_init(&surface_queue->dirty_queue);
+               LIST_INITHEAD(&surface_queue->list);
+       }
 
        /* Reset queue */
        _queue_init(&surface_queue->free_queue);
-       _queue_init(&surface_queue->dirty_queue);
-       LIST_INITHEAD(&surface_queue->list);
 
        surface_queue->num_attached = 0;
 
@@ -1279,19 +1766,56 @@ tbm_surface_queue_reset(tbm_surface_queue_h
 }
 
 tbm_surface_queue_error_e
+tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
+{
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       _notify_emit(surface_queue, &surface_queue->reset_noti);
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
+{
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+       pthread_mutex_lock(&surface_queue->lock);
+       pthread_mutex_unlock(&surface_queue->lock);
+       pthread_cond_signal(&surface_queue->free_cond);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
 tbm_surface_queue_set_size(tbm_surface_queue_h
                        surface_queue, int queue_size, int flush)
 {
        queue_node *node = NULL, *tmp;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                                        TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
-                                       TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
+                                       TBM_ERROR_INVALID_PARAMETER);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        if ((surface_queue->queue_size == queue_size) && !flush) {
                _tbm_surf_queue_mutex_unlock();
@@ -1301,17 +1825,33 @@ tbm_surface_queue_set_size(tbm_surface_queue_h
        pthread_mutex_lock(&surface_queue->lock);
 
        if (flush) {
-               /* Destory surface and Push to free_queue */
-               LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
-                       _queue_delete_node(surface_queue, node);
+               surface_queue->queue_size = queue_size;
+
+               if (surface_queue->num_attached == 0) {
+                       pthread_mutex_unlock(&surface_queue->lock);
+                       _tbm_surf_queue_mutex_unlock();
+                       return TBM_SURFACE_QUEUE_ERROR_NONE;
+               }
+
+               if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
+                       /* Destory surface and Push to free_queue */
+                       LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
+                               _queue_delete_node(surface_queue, node);
+
+                       LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+                               node->delete_pending = 1;
+               } else {
+                       LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+                               _queue_delete_node(surface_queue, node);
+
+                       _queue_init(&surface_queue->dirty_queue);
+                       LIST_INITHEAD(&surface_queue->list);
+               }
 
                /* Reset queue */
                _queue_init(&surface_queue->free_queue);
-               _queue_init(&surface_queue->dirty_queue);
-               LIST_INITHEAD(&surface_queue->list);
 
                surface_queue->num_attached = 0;
-               surface_queue->queue_size = queue_size;
 
                if (surface_queue->impl && surface_queue->impl->reset)
                        surface_queue->impl->reset(surface_queue);
@@ -1329,7 +1869,7 @@ tbm_surface_queue_set_size(tbm_surface_queue_h
                        int need_del = surface_queue->queue_size - queue_size;
 
                        LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
-                               TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+                               TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
 
                                if (surface_queue->impl && surface_queue->impl->need_detach)
                                        surface_queue->impl->need_detach(surface_queue, node);
@@ -1353,16 +1893,61 @@ tbm_surface_queue_set_size(tbm_surface_queue_h
 }
 
 tbm_surface_queue_error_e
+tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
+{
+       queue_node *node = NULL;
+       int is_guarantee_cycle = 0;
+
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
+       if (surface_queue->num_attached == 0) {
+               _tbm_surf_queue_mutex_unlock();
+               return TBM_SURFACE_QUEUE_ERROR_NONE;
+       }
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       /* Destory surface in free_queue */
+       while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
+               if (surface_queue->impl && surface_queue->impl->need_detach)
+                       surface_queue->impl->need_detach(surface_queue, node);
+               else
+                       _tbm_surface_queue_detach(surface_queue, node->surface);
+       }
+
+       /* Reset queue */
+       _queue_init(&surface_queue->free_queue);
+
+       if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
+               is_guarantee_cycle = 1;
+
+       pthread_mutex_unlock(&surface_queue->lock);
+       _tbm_surf_queue_mutex_unlock();
+
+       if (is_guarantee_cycle)
+               _notify_emit(surface_queue, &surface_queue->reset_noti);
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
 {
        queue_node *node = NULL, *tmp;
 
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        if (surface_queue->num_attached == 0) {
                _tbm_surf_queue_mutex_unlock();
@@ -1371,14 +1956,23 @@ tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
 
        pthread_mutex_lock(&surface_queue->lock);
 
-       /* Destory surface and Push to free_queue */
-       LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
-               _queue_delete_node(surface_queue, node);
+       if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
+               /* Destory surface and Push to free_queue */
+               LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
+                       _queue_delete_node(surface_queue, node);
+
+               LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+                       node->delete_pending = 1;
+       } else {
+               LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+                       _queue_delete_node(surface_queue, node);
+
+               _queue_init(&surface_queue->dirty_queue);
+               LIST_INITHEAD(&surface_queue->list);
+       }
 
        /* Reset queue */
        _queue_init(&surface_queue->free_queue);
-       _queue_init(&surface_queue->dirty_queue);
-       LIST_INITHEAD(&surface_queue->list);
 
        surface_queue->num_attached = 0;
 
@@ -1402,17 +1996,20 @@ tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
        queue_node *node = NULL;
 
        _tbm_surf_queue_mutex_lock();
-
-       *num = 0;
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
                               TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
        TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
-                              TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
+                              TBM_ERROR_INVALID_PARAMETER);
+
+       *num = 0;
 
        pthread_mutex_lock(&surface_queue->lock);
 
        LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
+               if (node->delete_pending) continue;
+
                if (surfaces)
                        surfaces[*num] = node->surface;
 
@@ -1426,53 +2023,85 @@ tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
        return TBM_SURFACE_QUEUE_ERROR_NONE;
 }
 
-typedef struct {
-       int flags;
-} tbm_queue_default;
-
-static void
-__tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
+tbm_surface_queue_error_e
+tbm_surface_queue_get_acquirable_surfaces(tbm_surface_queue_h surface_queue,
+                       tbm_surface_h *surfaces, int *num)
 {
-       free(surface_queue->impl_data);
+       queue_node *node = NULL;
+
+       _tbm_surf_queue_mutex_lock();
+
+       *num = 0;
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
+                              TBM_ERROR_INVALID_PARAMETER);
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head, item_link) {
+               if (surfaces)
+                       surfaces[*num] = node->surface;
+
+               *num = *num + 1;
+       }
+
+       pthread_mutex_unlock(&surface_queue->lock);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
 }
 
-static void
-__tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
+tbm_surface_queue_error_e
+tbm_surface_queue_get_trace_surface_num(
+                       tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
 {
-       tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
-       tbm_surface_h surface;
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
 
-       if (surface_queue->queue_size == surface_queue->num_attached)
-               return;
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
+                              TBM_ERROR_INVALID_PARAMETER);
 
-       if (surface_queue->alloc_cb) {
-               pthread_mutex_unlock(&surface_queue->lock);
-               _tbm_surf_queue_mutex_unlock();
-               surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
-               _tbm_surf_queue_mutex_lock();
-               pthread_mutex_lock(&surface_queue->lock);
+       *num = 0;
 
-               if (!surface)
-                       return;
+       pthread_mutex_lock(&surface_queue->lock);
 
-               tbm_surface_internal_ref(surface);
-       } else {
-               surface = tbm_surface_internal_create_with_flags(surface_queue->width,
-                               surface_queue->height,
-                               surface_queue->format,
-                               data->flags);
-               TBM_RETURN_IF_FAIL(surface != NULL);
+       switch (trace) {
+       case TBM_SURFACE_QUEUE_TRACE_NONE:
+               *num = 0;
+               break;
+       case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
+               *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
+               break;
+       case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
+               *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
+               break;
+       case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
+               *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
+               break;
+       case TBM_SURFACE_QUEUE_TRACE_RELEASE:
+               *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
+               break;
+       default:
+               break;
        }
 
-       _tbm_surface_queue_attach(surface_queue, surface);
-       tbm_surface_internal_unref(surface);
+       pthread_mutex_unlock(&surface_queue->lock);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
 }
 
 static const tbm_surface_queue_interface tbm_queue_default_impl = {
        NULL,                           /*__tbm_queue_default_init*/
        NULL,                           /*__tbm_queue_default_reset*/
-       __tbm_queue_default_destroy,
-       __tbm_queue_default_need_attach,
+       NULL,                           /*__tbm_queue_default_destroy*/
+       NULL,                           /*__tbm_queue_default_need_attach*/
        NULL,                           /*__tbm_queue_default_enqueue*/
        NULL,                           /*__tbm_queue_default_release*/
        NULL,                           /*__tbm_queue_default_dequeue*/
@@ -1484,32 +2113,29 @@ tbm_surface_queue_h
 tbm_surface_queue_create(int queue_size, int width,
                         int height, int format, int flags)
 {
-       TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
-       TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
-       TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
-       TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
-
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
 
        tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
                                            sizeof(struct _tbm_surface_queue));
-       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
-
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
-
-       tbm_queue_default *data = (tbm_queue_default *) calloc(1,
-                                 sizeof(tbm_queue_default));
-       if (data == NULL) {
-               free(surface_queue);
+       if (!surface_queue) {
+               TBM_ERR("cannot allocate the surface_queue.\n");
+               _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
                _tbm_surf_queue_mutex_unlock();
                return NULL;
        }
 
-       data->flags = flags;
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
+
        _tbm_surface_queue_init(surface_queue,
                                queue_size,
-                               width, height, format,
-                               &tbm_queue_default_impl, data);
+                               width, height, format, flags,
+                               &tbm_queue_default_impl, NULL);
 
        _tbm_surf_queue_mutex_unlock();
 
@@ -1517,7 +2143,6 @@ tbm_surface_queue_create(int queue_size, int width,
 }
 
 typedef struct {
-       int flags;
        queue dequeue_list;
 } tbm_queue_sequence;
 
@@ -1534,6 +2159,9 @@ __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
 {
        tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
 
+       if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
+               return;
+
        _queue_init(&data->dequeue_list);
 }
 
@@ -1544,52 +2172,35 @@ __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
 }
 
 static void
-__tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
+__tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
+                            queue_node *node)
 {
        tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
-       tbm_surface_h surface;
+       queue_node *first = NULL;
 
-       if (surface_queue->queue_size == surface_queue->num_attached)
+       first = container_of(data->dequeue_list.head.next, first, item_link);
+       if (first != node) {
                return;
-
-       if (surface_queue->alloc_cb) {
-               pthread_mutex_unlock(&surface_queue->lock);
-               _tbm_surf_queue_mutex_unlock();
-               surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
-               _tbm_surf_queue_mutex_lock();
-               pthread_mutex_lock(&surface_queue->lock);
-
-               if (!surface)
-                       return;
-
-               tbm_surface_internal_ref(surface);
-       } else {
-               surface = tbm_surface_internal_create_with_flags(surface_queue->width,
-                               surface_queue->height,
-                               surface_queue->format,
-                               data->flags);
-               TBM_RETURN_IF_FAIL(surface != NULL);
        }
 
-       _tbm_surface_queue_attach(surface_queue, surface);
-       tbm_surface_internal_unref(surface);
+       node->priv_flags = 0;
+
+       _queue_node_pop(&data->dequeue_list, node);
+       _tbm_surface_queue_enqueue(surface_queue, node, 1);
 }
 
 static void
-__tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
-                            queue_node *node)
+__tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
+                               queue_node *node)
 {
        tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
-       queue_node *next = NULL, *tmp;
-
-       node->priv_flags = 0;
 
-       LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) {
-               if (next->priv_flags)
-                       break;
-               _queue_node_pop(&data->dequeue_list, next);
-               _tbm_surface_queue_enqueue(surface_queue, next, 1);
+       if (node->priv_flags) {
+               node->priv_flags = 0;
+               _queue_node_pop(&data->dequeue_list, node);
        }
+
+       _tbm_surface_queue_release(surface_queue, node, 1);
 }
 
 static queue_node *
@@ -1612,9 +2223,9 @@ static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
        __tbm_queue_sequence_init,
        __tbm_queue_sequence_reset,
        __tbm_queue_sequence_destroy,
-       __tbm_queue_sequence_need_attach,
+       NULL,
        __tbm_queue_sequence_enqueue,
-       NULL,                                   /*__tbm_queue_sequence_release*/
+       __tbm_queue_sequence_release,
        __tbm_queue_sequence_dequeue,
        NULL,                                   /*__tbm_queue_sequence_acquire*/
        NULL,                                   /*__tbm_queue_sequence_need_dettach*/
@@ -1624,35 +2235,97 @@ tbm_surface_queue_h
 tbm_surface_queue_sequence_create(int queue_size, int width,
                                  int height, int format, int flags)
 {
-       TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
-       TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
-       TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
-       TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
-
        _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
 
        tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
                                            sizeof(struct _tbm_surface_queue));
-       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
+       if (surface_queue == NULL) {
+               TBM_ERR("cannot allocate the surface_queue.\n");
+               _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
+               _tbm_surf_queue_mutex_unlock();
+               return NULL;
+       }
 
-       TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
 
        tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
                                   sizeof(tbm_queue_sequence));
        if (data == NULL) {
+               TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
+               _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
                free(surface_queue);
                _tbm_surf_queue_mutex_unlock();
                return NULL;
        }
 
-       data->flags = flags;
        _tbm_surface_queue_init(surface_queue,
                                queue_size,
-                               width, height, format,
+                               width, height, format, flags,
                                &tbm_queue_sequence_impl, data);
 
        _tbm_surf_queue_mutex_unlock();
 
        return surface_queue;
 }
-/* LCOV_EXCL_STOP */
+
+tbm_surface_queue_error_e
+tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
+                                 int modes)
+{
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                              TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
+               modes = TBM_SURFACE_QUEUE_MODE_NONE;
+       else
+               surface_queue->modes |= modes;
+
+       pthread_mutex_unlock(&surface_queue->lock);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
+tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
+                                 unsigned int sync_count)
+{
+       int dequeue_num, enqueue_num;
+
+       _tbm_surf_queue_mutex_lock();
+       _tbm_set_last_result(TBM_ERROR_NONE);
+
+       TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+                                  TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+
+       pthread_mutex_lock(&surface_queue->lock);
+
+       dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
+       enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
+
+       if (dequeue_num + sync_count == 0)
+               surface_queue->acquire_sync_count = enqueue_num;
+       else
+               surface_queue->enqueue_sync_count = dequeue_num + sync_count;
+
+       TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
+                               surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
+
+       pthread_mutex_unlock(&surface_queue->lock);
+
+       _tbm_surf_queue_mutex_unlock();
+
+       return TBM_SURFACE_QUEUE_ERROR_NONE;
+}