1 /**************************************************************************
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 **************************************************************************/
34 #include "tbm_bufmgr_int.h"
41 #define TBM_QUEUE_DEBUG 0
44 #define TBM_QUEUE_TRACE(fmt, ...) { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
46 #define TBM_QUEUE_TRACE(fmt, ...)
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
64 TBM_LOG_E("'%s' failed.\n", #cond);\
65 _tbm_surf_queue_mutex_unlock();\
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
72 TBM_LOG_E("'%s' failed.\n", #cond);\
73 _tbm_surf_queue_mutex_unlock();\
78 typedef enum _queue_node_type {
80 QUEUE_NODE_TYPE_DEQUEUE,
81 QUEUE_NODE_TYPE_ENQUEUE,
82 QUEUE_NODE_TYPE_ACQUIRE,
83 QUEUE_NODE_TYPE_RELEASE
87 struct list_head head;
92 tbm_surface_h surface;
94 struct list_head item_link;
95 struct list_head link;
99 unsigned int priv_flags; /*for each queue*/
105 struct list_head link;
107 tbm_surface_queue_notify_cb cb;
112 struct list_head link;
114 tbm_surface_queue_trace_cb cb;
118 typedef struct _tbm_surface_queue_interface {
119 void (*init)(tbm_surface_queue_h queue);
120 void (*reset)(tbm_surface_queue_h queue);
121 void (*destroy)(tbm_surface_queue_h queue);
122 void (*need_attach)(tbm_surface_queue_h queue);
124 void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
125 void (*release)(tbm_surface_queue_h queue, queue_node *node);
126 queue_node *(*dequeue)(tbm_surface_queue_h queue);
127 queue_node *(*acquire)(tbm_surface_queue_h queue);
128 void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
129 } tbm_surface_queue_interface;
131 struct _tbm_surface_queue {
140 struct list_head list;
142 struct list_head destory_noti;
143 struct list_head dequeuable_noti;
144 struct list_head dequeue_noti;
145 struct list_head can_dequeue_noti;
146 struct list_head acquirable_noti;
147 struct list_head reset_noti;
148 struct list_head trace_noti;
150 pthread_mutex_t lock;
151 pthread_cond_t free_cond;
152 pthread_cond_t dirty_cond;
154 const tbm_surface_queue_interface *impl;
157 //For external buffer allocation
158 tbm_surface_alloc_cb alloc_cb;
159 tbm_surface_free_cb free_cb;
162 struct list_head item_link; /* link of surface queue */
165 unsigned int enqueue_sync_count;
166 unsigned int acquire_sync_count;
170 _tbm_surf_queue_mutex_init(void)
172 static bool tbm_surf_queue_mutex_init = false;
174 if (tbm_surf_queue_mutex_init)
177 if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
178 TBM_LOG_E("fail: pthread_mutex_init\n");
182 tbm_surf_queue_mutex_init = true;
188 _tbm_surf_queue_mutex_lock(void)
190 if (!_tbm_surf_queue_mutex_init()) {
191 TBM_LOG_E("fail: _tbm_surf_queue_mutex_init\n");
195 pthread_mutex_lock(&tbm_surf_queue_lock);
199 _tbm_surf_queue_mutex_unlock(void)
201 pthread_mutex_unlock(&tbm_surf_queue_lock);
205 _init_tbm_surf_queue_bufmgr(void)
207 g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
211 _deinit_tbm_surf_queue_bufmgr(void)
213 if (!g_surf_queue_bufmgr)
216 tbm_bufmgr_deinit(g_surf_queue_bufmgr);
217 g_surf_queue_bufmgr = NULL;
221 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
223 tbm_surface_queue_h old_data = NULL;
225 if (surface_queue == NULL) {
226 TBM_LOG_E("error: surface_queue is NULL.\n");
230 if (g_surf_queue_bufmgr == NULL) {
231 TBM_LOG_E("error: g_surf_queue_bufmgr is NULL.\n");
235 if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
236 TBM_LOG_E("error: surf_queue_list is empty\n");
240 LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
242 if (old_data == surface_queue) {
243 TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
248 TBM_LOG_E("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
254 _queue_node_create(void)
256 queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
258 TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
264 _queue_node_delete(queue_node *node)
266 LIST_DEL(&node->item_link);
267 LIST_DEL(&node->link);
272 _queue_is_empty(queue *queue)
274 if (LIST_IS_EMPTY(&queue->head))
281 _queue_node_push_back(queue *queue, queue_node *node)
283 LIST_ADDTAIL(&node->item_link, &queue->head);
288 _queue_node_push_front(queue *queue, queue_node *node)
290 LIST_ADD(&node->item_link, &queue->head);
295 _queue_node_pop_front(queue *queue)
299 if (!queue->head.next) return NULL;
300 if (!queue->count) return NULL;
302 node = LIST_ENTRY(queue_node, queue->head.next, item_link);
304 LIST_DELINIT(&node->item_link);
311 _queue_node_pop(queue *queue, queue_node *node)
313 LIST_DELINIT(&node->item_link);
320 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
321 tbm_surface_h surface, int *out_type)
323 queue_node *node = NULL;
326 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
330 if (type & FREE_QUEUE) {
331 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
333 if (node->surface == surface) {
335 *out_type = FREE_QUEUE;
342 if (type & DIRTY_QUEUE) {
343 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
345 if (node->surface == surface) {
347 *out_type = DIRTY_QUEUE;
354 if (type & NODE_LIST) {
355 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
356 if (node->surface == surface) {
358 *out_type = NODE_LIST;
365 TBM_LOG_E("fail to get the queue_node.\n");
371 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
374 if (surface_queue->free_cb) {
375 surface_queue->free_cb(surface_queue,
376 surface_queue->alloc_cb_data,
380 tbm_surface_destroy(node->surface);
383 _queue_node_delete(node);
387 _queue_init(queue *queue)
389 LIST_INITHEAD(&queue->head);
395 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
398 TBM_RETURN_IF_FAIL(cb != NULL);
400 queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
402 TBM_RETURN_IF_FAIL(item != NULL);
404 LIST_INITHEAD(&item->link);
408 LIST_ADDTAIL(&item->link, list);
412 _notify_remove(struct list_head *list,
413 tbm_surface_queue_notify_cb cb, void *data)
415 queue_notify *item = NULL, *tmp;
417 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
418 if (item->cb == cb && item->data == data) {
419 LIST_DEL(&item->link);
425 TBM_LOG_E("Cannot find notifiy\n");
429 _notify_remove_all(struct list_head *list)
431 queue_notify *item = NULL, *tmp;
433 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
434 LIST_DEL(&item->link);
440 _notify_emit(tbm_surface_queue_h surface_queue,
441 struct list_head *list)
443 queue_notify *item = NULL, *tmp;;
446 The item->cb is the outside function of the libtbm.
447 The tbm user may/can remove the item of the list,
448 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
450 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
451 item->cb(surface_queue, item->data);
455 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
458 TBM_RETURN_IF_FAIL(cb != NULL);
460 queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
462 TBM_RETURN_IF_FAIL(item != NULL);
464 LIST_INITHEAD(&item->link);
468 LIST_ADDTAIL(&item->link, list);
472 _trace_remove(struct list_head *list,
473 tbm_surface_queue_trace_cb cb, void *data)
475 queue_trace *item = NULL, *tmp;
477 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
478 if (item->cb == cb && item->data == data) {
479 LIST_DEL(&item->link);
485 TBM_LOG_E("Cannot find notifiy\n");
489 _trace_remove_all(struct list_head *list)
491 queue_trace *item = NULL, *tmp;
493 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
494 LIST_DEL(&item->link);
500 _trace_emit(tbm_surface_queue_h surface_queue,
501 struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
503 queue_trace *item = NULL, *tmp;;
506 The item->cb is the outside function of the libtbm.
507 The tbm user may/can remove the item of the list,
508 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
510 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
511 item->cb(surface_queue, surface, trace, item->data);
515 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
517 queue_node *node = NULL;
520 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
521 if (node->type == type)
529 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
530 tbm_surface_h surface)
534 node = _queue_node_create();
535 TBM_RETURN_IF_FAIL(node != NULL);
537 tbm_surface_internal_ref(surface);
538 node->surface = surface;
540 LIST_ADDTAIL(&node->link, &surface_queue->list);
541 surface_queue->num_attached++;
542 _queue_node_push_back(&surface_queue->free_queue, node);
546 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
547 tbm_surface_h surface)
552 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
554 _queue_delete_node(surface_queue, node);
555 surface_queue->num_attached--;
560 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
561 queue_node *node, int push_back)
564 _queue_node_push_back(&surface_queue->dirty_queue, node);
566 _queue_node_push_front(&surface_queue->dirty_queue, node);
570 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
574 node = _queue_node_pop_front(&surface_queue->free_queue);
580 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
584 if (_queue_is_empty(&surface_queue->dirty_queue))
587 node = _queue_node_pop_front(&surface_queue->dirty_queue);
593 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
594 queue_node *node, int push_back)
597 _queue_node_push_back(&surface_queue->free_queue, node);
599 _queue_node_push_front(&surface_queue->free_queue, node);
603 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
605 int width, int height, int format,
606 const tbm_surface_queue_interface *impl, void *data)
608 TBM_RETURN_IF_FAIL(surface_queue != NULL);
609 TBM_RETURN_IF_FAIL(impl != NULL);
611 if (!g_surf_queue_bufmgr)
612 _init_tbm_surf_queue_bufmgr();
614 pthread_mutex_init(&surface_queue->lock, NULL);
615 pthread_cond_init(&surface_queue->free_cond, NULL);
616 pthread_cond_init(&surface_queue->dirty_cond, NULL);
618 surface_queue->queue_size = queue_size;
619 surface_queue->width = width;
620 surface_queue->height = height;
621 surface_queue->format = format;
622 surface_queue->impl = impl;
623 surface_queue->impl_data = data;
624 surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
626 _queue_init(&surface_queue->free_queue);
627 _queue_init(&surface_queue->dirty_queue);
628 LIST_INITHEAD(&surface_queue->list);
630 LIST_INITHEAD(&surface_queue->destory_noti);
631 LIST_INITHEAD(&surface_queue->dequeuable_noti);
632 LIST_INITHEAD(&surface_queue->dequeue_noti);
633 LIST_INITHEAD(&surface_queue->can_dequeue_noti);
634 LIST_INITHEAD(&surface_queue->acquirable_noti);
635 LIST_INITHEAD(&surface_queue->reset_noti);
636 LIST_INITHEAD(&surface_queue->trace_noti);
638 if (surface_queue->impl && surface_queue->impl->init)
639 surface_queue->impl->init(surface_queue);
641 LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
644 tbm_surface_queue_error_e
645 tbm_surface_queue_add_destroy_cb(
646 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
649 _tbm_surf_queue_mutex_lock();
651 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
652 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
654 pthread_mutex_lock(&surface_queue->lock);
656 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
658 _notify_add(&surface_queue->destory_noti, destroy_cb, data);
660 pthread_mutex_unlock(&surface_queue->lock);
662 _tbm_surf_queue_mutex_unlock();
664 return TBM_SURFACE_QUEUE_ERROR_NONE;
667 tbm_surface_queue_error_e
668 tbm_surface_queue_remove_destroy_cb(
669 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
672 _tbm_surf_queue_mutex_lock();
674 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
675 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
677 pthread_mutex_lock(&surface_queue->lock);
679 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
681 _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
683 pthread_mutex_unlock(&surface_queue->lock);
685 _tbm_surf_queue_mutex_unlock();
687 return TBM_SURFACE_QUEUE_ERROR_NONE;
690 tbm_surface_queue_error_e
691 tbm_surface_queue_add_dequeuable_cb(
692 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
695 _tbm_surf_queue_mutex_lock();
697 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
698 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
700 pthread_mutex_lock(&surface_queue->lock);
702 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
704 _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
706 pthread_mutex_unlock(&surface_queue->lock);
708 _tbm_surf_queue_mutex_unlock();
710 return TBM_SURFACE_QUEUE_ERROR_NONE;
713 tbm_surface_queue_error_e
714 tbm_surface_queue_remove_dequeuable_cb(
715 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
718 _tbm_surf_queue_mutex_lock();
720 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
721 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
723 pthread_mutex_lock(&surface_queue->lock);
725 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
727 _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
729 pthread_mutex_unlock(&surface_queue->lock);
731 _tbm_surf_queue_mutex_unlock();
733 return TBM_SURFACE_QUEUE_ERROR_NONE;
736 tbm_surface_queue_error_e
737 tbm_surface_queue_add_dequeue_cb(
738 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
741 _tbm_surf_queue_mutex_lock();
743 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
744 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
746 pthread_mutex_lock(&surface_queue->lock);
748 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
750 _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
752 pthread_mutex_unlock(&surface_queue->lock);
754 _tbm_surf_queue_mutex_unlock();
756 return TBM_SURFACE_QUEUE_ERROR_NONE;
759 tbm_surface_queue_error_e
760 tbm_surface_queue_remove_dequeue_cb(
761 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
764 _tbm_surf_queue_mutex_lock();
766 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
767 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
769 pthread_mutex_lock(&surface_queue->lock);
771 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
773 _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
775 pthread_mutex_unlock(&surface_queue->lock);
777 _tbm_surf_queue_mutex_unlock();
779 return TBM_SURFACE_QUEUE_ERROR_NONE;
782 tbm_surface_queue_error_e
783 tbm_surface_queue_add_can_dequeue_cb(
784 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
787 _tbm_surf_queue_mutex_lock();
789 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
790 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
792 pthread_mutex_lock(&surface_queue->lock);
794 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
796 _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
798 pthread_mutex_unlock(&surface_queue->lock);
800 _tbm_surf_queue_mutex_unlock();
802 return TBM_SURFACE_QUEUE_ERROR_NONE;
805 tbm_surface_queue_error_e
806 tbm_surface_queue_remove_can_dequeue_cb(
807 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
810 _tbm_surf_queue_mutex_lock();
812 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
813 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
815 pthread_mutex_lock(&surface_queue->lock);
817 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
819 _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
821 pthread_mutex_unlock(&surface_queue->lock);
823 _tbm_surf_queue_mutex_unlock();
825 return TBM_SURFACE_QUEUE_ERROR_NONE;
828 tbm_surface_queue_error_e
829 tbm_surface_queue_add_acquirable_cb(
830 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
833 _tbm_surf_queue_mutex_lock();
835 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
836 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
838 pthread_mutex_lock(&surface_queue->lock);
840 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
842 _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
844 pthread_mutex_unlock(&surface_queue->lock);
846 _tbm_surf_queue_mutex_unlock();
848 return TBM_SURFACE_QUEUE_ERROR_NONE;
851 tbm_surface_queue_error_e
852 tbm_surface_queue_remove_acquirable_cb(
853 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
856 _tbm_surf_queue_mutex_lock();
858 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
859 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
861 pthread_mutex_lock(&surface_queue->lock);
863 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
865 _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
867 pthread_mutex_unlock(&surface_queue->lock);
869 _tbm_surf_queue_mutex_unlock();
871 return TBM_SURFACE_QUEUE_ERROR_NONE;
874 tbm_surface_queue_error_e
875 tbm_surface_queue_add_trace_cb(
876 tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
879 _tbm_surf_queue_mutex_lock();
881 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
882 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
884 pthread_mutex_lock(&surface_queue->lock);
886 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
888 _trace_add(&surface_queue->trace_noti, trace_cb, data);
890 pthread_mutex_unlock(&surface_queue->lock);
892 _tbm_surf_queue_mutex_unlock();
894 return TBM_SURFACE_QUEUE_ERROR_NONE;
897 tbm_surface_queue_error_e
898 tbm_surface_queue_remove_trace_cb(
899 tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
902 _tbm_surf_queue_mutex_lock();
904 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
905 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
907 pthread_mutex_lock(&surface_queue->lock);
909 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
911 _trace_remove(&surface_queue->trace_noti, trace_cb, data);
913 pthread_mutex_unlock(&surface_queue->lock);
915 _tbm_surf_queue_mutex_unlock();
917 return TBM_SURFACE_QUEUE_ERROR_NONE;
920 tbm_surface_queue_error_e
921 tbm_surface_queue_set_alloc_cb(
922 tbm_surface_queue_h surface_queue,
923 tbm_surface_alloc_cb alloc_cb,
924 tbm_surface_free_cb free_cb,
927 _tbm_surf_queue_mutex_lock();
929 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
930 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
932 pthread_mutex_lock(&surface_queue->lock);
934 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
936 surface_queue->alloc_cb = alloc_cb;
937 surface_queue->free_cb = free_cb;
938 surface_queue->alloc_cb_data = data;
940 pthread_mutex_unlock(&surface_queue->lock);
942 _tbm_surf_queue_mutex_unlock();
944 return TBM_SURFACE_QUEUE_ERROR_NONE;
948 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
952 _tbm_surf_queue_mutex_lock();
954 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
956 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
958 width = surface_queue->width;
960 _tbm_surf_queue_mutex_unlock();
966 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
970 _tbm_surf_queue_mutex_lock();
972 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
974 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
976 height = surface_queue->height;
978 _tbm_surf_queue_mutex_unlock();
984 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
988 _tbm_surf_queue_mutex_lock();
990 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
992 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
994 format = surface_queue->format;
996 _tbm_surf_queue_mutex_unlock();
1002 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1006 _tbm_surf_queue_mutex_lock();
1008 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1010 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1012 queue_size = surface_queue->queue_size;
1014 _tbm_surf_queue_mutex_unlock();
1019 tbm_surface_queue_error_e
1020 tbm_surface_queue_add_reset_cb(
1021 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1024 _tbm_surf_queue_mutex_lock();
1026 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1027 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1029 pthread_mutex_lock(&surface_queue->lock);
1031 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1033 _notify_add(&surface_queue->reset_noti, reset_cb, data);
1035 pthread_mutex_unlock(&surface_queue->lock);
1037 _tbm_surf_queue_mutex_unlock();
1039 return TBM_SURFACE_QUEUE_ERROR_NONE;
1042 tbm_surface_queue_error_e
1043 tbm_surface_queue_remove_reset_cb(
1044 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1047 _tbm_surf_queue_mutex_lock();
1049 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1050 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1052 pthread_mutex_lock(&surface_queue->lock);
1054 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1056 _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1058 pthread_mutex_unlock(&surface_queue->lock);
1060 _tbm_surf_queue_mutex_unlock();
1062 return TBM_SURFACE_QUEUE_ERROR_NONE;
1065 tbm_surface_queue_error_e
1066 tbm_surface_queue_enqueue(tbm_surface_queue_h
1067 surface_queue, tbm_surface_h surface)
1072 _tbm_surf_queue_mutex_lock();
1074 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1075 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1076 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1077 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1080 tbm_surface_internal_dump_buffer(surface, "enqueue");
1082 pthread_mutex_lock(&surface_queue->lock);
1084 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1086 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1087 if (node == NULL || queue_type != NODE_LIST) {
1088 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1090 pthread_mutex_unlock(&surface_queue->lock);
1092 _tbm_surf_queue_mutex_unlock();
1095 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1097 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1100 if (surface_queue->impl && surface_queue->impl->enqueue)
1101 surface_queue->impl->enqueue(surface_queue, node);
1103 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1105 if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1106 TBM_LOG_E("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1107 pthread_mutex_unlock(&surface_queue->lock);
1109 _tbm_surf_queue_mutex_unlock();
1110 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1113 node->type = QUEUE_NODE_TYPE_ENQUEUE;
1115 if (surface_queue->enqueue_sync_count == 1) {
1116 tbm_surface_info_s info;
1119 TBM_LOG_E("start map surface:%p", surface);
1120 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1121 TBM_LOG_E("end map surface:%p", surface);
1122 if (ret == TBM_SURFACE_ERROR_NONE)
1123 tbm_surface_unmap(surface);
1126 if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1128 pthread_mutex_unlock(&surface_queue->lock);
1129 pthread_cond_signal(&surface_queue->dirty_cond);
1131 _tbm_surf_queue_mutex_unlock();
1133 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1135 _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1137 return TBM_SURFACE_QUEUE_ERROR_NONE;
1140 tbm_surface_queue_error_e
1141 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1142 surface_queue, tbm_surface_h surface)
1147 _tbm_surf_queue_mutex_lock();
1149 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1150 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1151 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1152 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1154 pthread_mutex_lock(&surface_queue->lock);
1156 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1158 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1159 if (node == NULL || queue_type != NODE_LIST) {
1160 TBM_LOG_E("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1162 pthread_mutex_unlock(&surface_queue->lock);
1164 _tbm_surf_queue_mutex_unlock();
1165 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1168 if (node->delete_pending) {
1169 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1171 _queue_delete_node(surface_queue, node);
1173 pthread_mutex_unlock(&surface_queue->lock);
1175 _tbm_surf_queue_mutex_unlock();
1177 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1179 return TBM_SURFACE_QUEUE_ERROR_NONE;
1182 if (surface_queue->queue_size < surface_queue->num_attached) {
1183 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1185 if (surface_queue->impl && surface_queue->impl->need_detach)
1186 surface_queue->impl->need_detach(surface_queue, node);
1188 _tbm_surface_queue_detach(surface_queue, surface);
1190 pthread_mutex_unlock(&surface_queue->lock);
1192 _tbm_surf_queue_mutex_unlock();
1194 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1196 return TBM_SURFACE_QUEUE_ERROR_NONE;
1199 if (surface_queue->impl && surface_queue->impl->release)
1200 surface_queue->impl->release(surface_queue, node);
1202 _tbm_surface_queue_release(surface_queue, node, 1);
1204 if (_queue_is_empty(&surface_queue->free_queue)) {
1205 pthread_mutex_unlock(&surface_queue->lock);
1207 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1208 _tbm_surf_queue_mutex_unlock();
1209 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1212 node->type = QUEUE_NODE_TYPE_RELEASE;
1214 pthread_mutex_unlock(&surface_queue->lock);
1215 pthread_cond_signal(&surface_queue->free_cond);
1217 _tbm_surf_queue_mutex_unlock();
1219 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1221 return TBM_SURFACE_QUEUE_ERROR_NONE;
1224 tbm_surface_queue_error_e
1225 tbm_surface_queue_dequeue(tbm_surface_queue_h
1226 surface_queue, tbm_surface_h *surface)
1230 _tbm_surf_queue_mutex_lock();
1232 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1233 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1234 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1235 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1239 pthread_mutex_lock(&surface_queue->lock);
1241 if (_queue_is_empty(&surface_queue->free_queue)) {
1242 if (surface_queue->impl && surface_queue->impl->need_attach)
1243 surface_queue->impl->need_attach(surface_queue);
1245 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1246 TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1247 pthread_mutex_unlock(&surface_queue->lock);
1248 _tbm_surf_queue_mutex_unlock();
1249 return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1253 if (surface_queue->impl && surface_queue->impl->dequeue)
1254 node = surface_queue->impl->dequeue(surface_queue);
1256 node = _tbm_surface_queue_dequeue(surface_queue);
1258 if (node == NULL || node->surface == NULL) {
1259 TBM_LOG_E("_queue_node_pop_front failed\n");
1260 pthread_mutex_unlock(&surface_queue->lock);
1262 _tbm_surf_queue_mutex_unlock();
1263 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1266 node->type = QUEUE_NODE_TYPE_DEQUEUE;
1267 *surface = node->surface;
1269 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1271 pthread_mutex_unlock(&surface_queue->lock);
1273 _tbm_surf_queue_mutex_unlock();
1275 _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1277 _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1279 return TBM_SURFACE_QUEUE_ERROR_NONE;
1283 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1285 _tbm_surf_queue_mutex_lock();
1287 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1289 _tbm_surf_queue_mutex_unlock();
1291 _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1293 _tbm_surf_queue_mutex_lock();
1295 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1297 pthread_mutex_lock(&surface_queue->lock);
1299 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1301 if (_queue_is_empty(&surface_queue->free_queue)) {
1302 if (surface_queue->impl && surface_queue->impl->need_attach)
1303 surface_queue->impl->need_attach(surface_queue);
1305 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1306 TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1307 pthread_mutex_unlock(&surface_queue->lock);
1308 _tbm_surf_queue_mutex_unlock();
1313 if (!_queue_is_empty(&surface_queue->free_queue)) {
1314 pthread_mutex_unlock(&surface_queue->lock);
1315 _tbm_surf_queue_mutex_unlock();
1319 if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1320 QUEUE_NODE_TYPE_ACQUIRE)) {
1321 _tbm_surf_queue_mutex_unlock();
1322 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1323 pthread_mutex_unlock(&surface_queue->lock);
1327 pthread_mutex_unlock(&surface_queue->lock);
1328 _tbm_surf_queue_mutex_unlock();
1332 tbm_surface_queue_error_e
1333 tbm_surface_queue_release(tbm_surface_queue_h
1334 surface_queue, tbm_surface_h surface)
1339 _tbm_surf_queue_mutex_lock();
1341 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1342 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1343 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1344 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1346 pthread_mutex_lock(&surface_queue->lock);
1348 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1350 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1351 if (node == NULL || queue_type != NODE_LIST) {
1352 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1354 pthread_mutex_unlock(&surface_queue->lock);
1356 _tbm_surf_queue_mutex_unlock();
1359 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1361 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1364 if (node->delete_pending) {
1365 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1367 _queue_delete_node(surface_queue, node);
1369 pthread_mutex_unlock(&surface_queue->lock);
1371 _tbm_surf_queue_mutex_unlock();
1373 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1375 return TBM_SURFACE_QUEUE_ERROR_NONE;
1378 if (surface_queue->queue_size < surface_queue->num_attached) {
1379 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1381 if (surface_queue->impl && surface_queue->impl->need_detach)
1382 surface_queue->impl->need_detach(surface_queue, node);
1384 _tbm_surface_queue_detach(surface_queue, surface);
1386 pthread_mutex_unlock(&surface_queue->lock);
1388 _tbm_surf_queue_mutex_unlock();
1390 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1392 return TBM_SURFACE_QUEUE_ERROR_NONE;
1395 if (surface_queue->impl && surface_queue->impl->release)
1396 surface_queue->impl->release(surface_queue, node);
1398 _tbm_surface_queue_release(surface_queue, node, 1);
1400 if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1401 TBM_LOG_E("release surface(%p) but surface isn't present in the free_queue\n", surface);
1402 pthread_mutex_unlock(&surface_queue->lock);
1404 _tbm_surf_queue_mutex_unlock();
1405 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1408 node->type = QUEUE_NODE_TYPE_RELEASE;
1410 pthread_mutex_unlock(&surface_queue->lock);
1411 pthread_cond_signal(&surface_queue->free_cond);
1413 _tbm_surf_queue_mutex_unlock();
1415 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1417 _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1419 return TBM_SURFACE_QUEUE_ERROR_NONE;
1422 tbm_surface_queue_error_e
1423 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1424 surface_queue, tbm_surface_h surface)
1429 _tbm_surf_queue_mutex_lock();
1431 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1432 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1433 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1434 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1436 pthread_mutex_lock(&surface_queue->lock);
1438 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1440 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1441 if (node == NULL || queue_type != NODE_LIST) {
1442 TBM_LOG_E("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1444 pthread_mutex_unlock(&surface_queue->lock);
1446 _tbm_surf_queue_mutex_unlock();
1447 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1450 if (surface_queue->impl && surface_queue->impl->enqueue)
1451 surface_queue->impl->enqueue(surface_queue, node);
1453 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1455 if (_queue_is_empty(&surface_queue->dirty_queue)) {
1456 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1457 pthread_mutex_unlock(&surface_queue->lock);
1459 _tbm_surf_queue_mutex_unlock();
1460 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1463 node->type = QUEUE_NODE_TYPE_ENQUEUE;
1465 pthread_mutex_unlock(&surface_queue->lock);
1466 pthread_cond_signal(&surface_queue->dirty_cond);
1468 _tbm_surf_queue_mutex_unlock();
1470 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1472 _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1474 return TBM_SURFACE_QUEUE_ERROR_NONE;
1477 tbm_surface_queue_error_e
1478 tbm_surface_queue_acquire(tbm_surface_queue_h
1479 surface_queue, tbm_surface_h *surface)
1483 _tbm_surf_queue_mutex_lock();
1487 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1488 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1489 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1490 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1492 pthread_mutex_lock(&surface_queue->lock);
1494 if (surface_queue->impl && surface_queue->impl->acquire)
1495 node = surface_queue->impl->acquire(surface_queue);
1497 node = _tbm_surface_queue_acquire(surface_queue);
1499 if (node == NULL || node->surface == NULL) {
1500 TBM_LOG_E("_queue_node_pop_front failed\n");
1501 pthread_mutex_unlock(&surface_queue->lock);
1503 _tbm_surf_queue_mutex_unlock();
1504 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1507 node->type = QUEUE_NODE_TYPE_ACQUIRE;
1509 *surface = node->surface;
1511 if (surface_queue->acquire_sync_count == 1) {
1512 tbm_surface_info_s info;
1515 TBM_LOG_E("start map surface:%p", *surface);
1516 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1517 TBM_LOG_E("end map surface:%p", *surface);
1518 if (ret == TBM_SURFACE_ERROR_NONE)
1519 tbm_surface_unmap(*surface);
1522 if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1524 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1526 pthread_mutex_unlock(&surface_queue->lock);
1528 _tbm_surf_queue_mutex_unlock();
1531 tbm_surface_internal_dump_buffer(*surface, "acquire");
1533 _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1535 return TBM_SURFACE_QUEUE_ERROR_NONE;
1539 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1541 _tbm_surf_queue_mutex_lock();
1543 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1545 pthread_mutex_lock(&surface_queue->lock);
1547 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1549 if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1550 pthread_mutex_unlock(&surface_queue->lock);
1551 _tbm_surf_queue_mutex_unlock();
1555 if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1556 QUEUE_NODE_TYPE_DEQUEUE)) {
1557 _tbm_surf_queue_mutex_unlock();
1558 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1559 pthread_mutex_unlock(&surface_queue->lock);
1563 pthread_mutex_unlock(&surface_queue->lock);
1564 _tbm_surf_queue_mutex_unlock();
1569 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1571 queue_node *node = NULL, *tmp;
1573 _tbm_surf_queue_mutex_lock();
1575 TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1577 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1579 LIST_DEL(&surface_queue->item_link);
1581 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1582 _queue_delete_node(surface_queue, node);
1584 if (surface_queue->impl && surface_queue->impl->destroy)
1585 surface_queue->impl->destroy(surface_queue);
1587 _notify_emit(surface_queue, &surface_queue->destory_noti);
1589 _notify_remove_all(&surface_queue->destory_noti);
1590 _notify_remove_all(&surface_queue->dequeuable_noti);
1591 _notify_remove_all(&surface_queue->dequeue_noti);
1592 _notify_remove_all(&surface_queue->can_dequeue_noti);
1593 _notify_remove_all(&surface_queue->acquirable_noti);
1594 _notify_remove_all(&surface_queue->reset_noti);
1595 _trace_remove_all(&surface_queue->trace_noti);
1597 pthread_mutex_destroy(&surface_queue->lock);
1599 free(surface_queue);
1601 if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1602 _deinit_tbm_surf_queue_bufmgr();
1604 _tbm_surf_queue_mutex_unlock();
1607 tbm_surface_queue_error_e
1608 tbm_surface_queue_reset(tbm_surface_queue_h
1609 surface_queue, int width, int height, int format)
1611 queue_node *node = NULL, *tmp;
1613 _tbm_surf_queue_mutex_lock();
1615 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1616 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1618 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1620 if (width == surface_queue->width && height == surface_queue->height &&
1621 format == surface_queue->format) {
1622 _tbm_surf_queue_mutex_unlock();
1623 return TBM_SURFACE_QUEUE_ERROR_NONE;
1626 pthread_mutex_lock(&surface_queue->lock);
1628 surface_queue->width = width;
1629 surface_queue->height = height;
1630 surface_queue->format = format;
1632 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1633 /* Destory surface and Push to free_queue */
1634 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1635 _queue_delete_node(surface_queue, node);
1637 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1638 node->delete_pending = 1;
1640 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1641 _queue_delete_node(surface_queue, node);
1643 _queue_init(&surface_queue->dirty_queue);
1644 LIST_INITHEAD(&surface_queue->list);
1648 _queue_init(&surface_queue->free_queue);
1650 surface_queue->num_attached = 0;
1652 if (surface_queue->impl && surface_queue->impl->reset)
1653 surface_queue->impl->reset(surface_queue);
1655 pthread_mutex_unlock(&surface_queue->lock);
1656 pthread_cond_signal(&surface_queue->free_cond);
1658 _tbm_surf_queue_mutex_unlock();
1660 _notify_emit(surface_queue, &surface_queue->reset_noti);
1662 return TBM_SURFACE_QUEUE_ERROR_NONE;
1665 tbm_surface_queue_error_e
1666 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1668 _tbm_surf_queue_mutex_lock();
1670 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1671 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1673 _tbm_surf_queue_mutex_unlock();
1675 _notify_emit(surface_queue, &surface_queue->reset_noti);
1677 return TBM_SURFACE_QUEUE_ERROR_NONE;
1680 tbm_surface_queue_error_e
1681 tbm_surface_queue_set_size(tbm_surface_queue_h
1682 surface_queue, int queue_size, int flush)
1684 queue_node *node = NULL, *tmp;
1686 _tbm_surf_queue_mutex_lock();
1688 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1689 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1690 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1691 TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1693 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1695 if ((surface_queue->queue_size == queue_size) && !flush) {
1696 _tbm_surf_queue_mutex_unlock();
1697 return TBM_SURFACE_QUEUE_ERROR_NONE;
1700 pthread_mutex_lock(&surface_queue->lock);
1703 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1704 /* Destory surface and Push to free_queue */
1705 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1706 _queue_delete_node(surface_queue, node);
1708 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1709 node->delete_pending = 1;
1711 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1712 _queue_delete_node(surface_queue, node);
1714 _queue_init(&surface_queue->dirty_queue);
1715 LIST_INITHEAD(&surface_queue->list);
1719 _queue_init(&surface_queue->free_queue);
1721 surface_queue->num_attached = 0;
1722 surface_queue->queue_size = queue_size;
1724 if (surface_queue->impl && surface_queue->impl->reset)
1725 surface_queue->impl->reset(surface_queue);
1727 pthread_mutex_unlock(&surface_queue->lock);
1728 pthread_cond_signal(&surface_queue->free_cond);
1730 _tbm_surf_queue_mutex_unlock();
1732 _notify_emit(surface_queue, &surface_queue->reset_noti);
1734 return TBM_SURFACE_QUEUE_ERROR_NONE;
1736 if (surface_queue->queue_size > queue_size) {
1737 int need_del = surface_queue->queue_size - queue_size;
1739 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1740 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1742 if (surface_queue->impl && surface_queue->impl->need_detach)
1743 surface_queue->impl->need_detach(surface_queue, node);
1745 _tbm_surface_queue_detach(surface_queue, node->surface);
1753 surface_queue->queue_size = queue_size;
1755 pthread_mutex_unlock(&surface_queue->lock);
1757 _tbm_surf_queue_mutex_unlock();
1759 return TBM_SURFACE_QUEUE_ERROR_NONE;
1763 tbm_surface_queue_error_e
1764 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1766 queue_node *node = NULL;
1768 _tbm_surf_queue_mutex_lock();
1770 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1771 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1773 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1775 if (surface_queue->num_attached == 0) {
1776 _tbm_surf_queue_mutex_unlock();
1777 return TBM_SURFACE_QUEUE_ERROR_NONE;
1780 pthread_mutex_lock(&surface_queue->lock);
1782 /* Destory surface in free_queue */
1783 while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1784 if (surface_queue->impl && surface_queue->impl->need_detach)
1785 surface_queue->impl->need_detach(surface_queue, node);
1787 _tbm_surface_queue_detach(surface_queue, node->surface);
1791 _queue_init(&surface_queue->free_queue);
1793 pthread_mutex_unlock(&surface_queue->lock);
1794 _tbm_surf_queue_mutex_unlock();
1796 return TBM_SURFACE_QUEUE_ERROR_NONE;
1799 tbm_surface_queue_error_e
1800 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1802 queue_node *node = NULL, *tmp;
1804 _tbm_surf_queue_mutex_lock();
1806 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1807 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1809 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1811 if (surface_queue->num_attached == 0) {
1812 _tbm_surf_queue_mutex_unlock();
1813 return TBM_SURFACE_QUEUE_ERROR_NONE;
1816 pthread_mutex_lock(&surface_queue->lock);
1818 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1819 /* Destory surface and Push to free_queue */
1820 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1821 _queue_delete_node(surface_queue, node);
1823 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1824 node->delete_pending = 1;
1826 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1827 _queue_delete_node(surface_queue, node);
1829 _queue_init(&surface_queue->dirty_queue);
1830 LIST_INITHEAD(&surface_queue->list);
1834 _queue_init(&surface_queue->free_queue);
1836 surface_queue->num_attached = 0;
1838 if (surface_queue->impl && surface_queue->impl->reset)
1839 surface_queue->impl->reset(surface_queue);
1841 pthread_mutex_unlock(&surface_queue->lock);
1842 pthread_cond_signal(&surface_queue->free_cond);
1844 _tbm_surf_queue_mutex_unlock();
1846 _notify_emit(surface_queue, &surface_queue->reset_noti);
1848 return TBM_SURFACE_QUEUE_ERROR_NONE;
1851 tbm_surface_queue_error_e
1852 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1853 tbm_surface_h *surfaces, int *num)
1855 queue_node *node = NULL;
1857 _tbm_surf_queue_mutex_lock();
1859 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1860 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1861 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1862 TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1866 pthread_mutex_lock(&surface_queue->lock);
1868 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1870 surfaces[*num] = node->surface;
1875 pthread_mutex_unlock(&surface_queue->lock);
1877 _tbm_surf_queue_mutex_unlock();
1879 return TBM_SURFACE_QUEUE_ERROR_NONE;
1882 tbm_surface_queue_error_e
1883 tbm_surface_queue_get_trace_surface_num(
1884 tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1886 _tbm_surf_queue_mutex_lock();
1888 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1889 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1890 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1891 TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1895 pthread_mutex_lock(&surface_queue->lock);
1898 case TBM_SURFACE_QUEUE_TRACE_NONE:
1901 case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1902 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1904 case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1905 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1907 case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1908 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1910 case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1911 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1917 pthread_mutex_unlock(&surface_queue->lock);
1919 _tbm_surf_queue_mutex_unlock();
1921 return TBM_SURFACE_QUEUE_ERROR_NONE;
1926 } tbm_queue_default;
1929 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1931 free(surface_queue->impl_data);
1935 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1937 tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1938 tbm_surface_h surface;
1940 if (surface_queue->queue_size == surface_queue->num_attached)
1943 if (surface_queue->alloc_cb) {
1944 pthread_mutex_unlock(&surface_queue->lock);
1945 _tbm_surf_queue_mutex_unlock();
1946 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1947 _tbm_surf_queue_mutex_lock();
1948 pthread_mutex_lock(&surface_queue->lock);
1954 tbm_surface_internal_ref(surface);
1956 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1957 surface_queue->height,
1958 surface_queue->format,
1960 TBM_RETURN_IF_FAIL(surface != NULL);
1963 _tbm_surface_queue_attach(surface_queue, surface);
1964 tbm_surface_internal_unref(surface);
1967 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1968 NULL, /*__tbm_queue_default_init*/
1969 NULL, /*__tbm_queue_default_reset*/
1970 __tbm_queue_default_destroy,
1971 __tbm_queue_default_need_attach,
1972 NULL, /*__tbm_queue_default_enqueue*/
1973 NULL, /*__tbm_queue_default_release*/
1974 NULL, /*__tbm_queue_default_dequeue*/
1975 NULL, /*__tbm_queue_default_acquire*/
1976 NULL, /*__tbm_queue_default_need_detach*/
1980 tbm_surface_queue_create(int queue_size, int width,
1981 int height, int format, int flags)
1983 TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1984 TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1985 TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1986 TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1988 _tbm_surf_queue_mutex_lock();
1990 tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1991 sizeof(struct _tbm_surface_queue));
1992 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1994 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1996 tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1997 sizeof(tbm_queue_default));
1999 TBM_LOG_E("cannot allocate the tbm_queue_default.\n");
2000 free(surface_queue);
2001 _tbm_surf_queue_mutex_unlock();
2005 data->flags = flags;
2006 _tbm_surface_queue_init(surface_queue,
2008 width, height, format,
2009 &tbm_queue_default_impl, data);
2011 _tbm_surf_queue_mutex_unlock();
2013 return surface_queue;
2019 } tbm_queue_sequence;
2022 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2024 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2026 _queue_init(&data->dequeue_list);
2030 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2032 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2034 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2037 _queue_init(&data->dequeue_list);
2041 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2043 free(surface_queue->impl_data);
2047 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2049 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2050 tbm_surface_h surface;
2052 if (surface_queue->queue_size == surface_queue->num_attached)
2055 if (surface_queue->alloc_cb) {
2056 pthread_mutex_unlock(&surface_queue->lock);
2057 _tbm_surf_queue_mutex_unlock();
2058 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2059 _tbm_surf_queue_mutex_lock();
2060 pthread_mutex_lock(&surface_queue->lock);
2066 tbm_surface_internal_ref(surface);
2068 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2069 surface_queue->height,
2070 surface_queue->format,
2072 TBM_RETURN_IF_FAIL(surface != NULL);
2075 _tbm_surface_queue_attach(surface_queue, surface);
2076 tbm_surface_internal_unref(surface);
2080 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2083 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2084 queue_node *first = NULL;
2086 first = container_of(data->dequeue_list.head.next, first, item_link);
2087 if (first != node) {
2091 node->priv_flags = 0;
2093 _queue_node_pop(&data->dequeue_list, node);
2094 _tbm_surface_queue_enqueue(surface_queue, node, 1);
2098 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2101 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2103 if (node->priv_flags) {
2104 node->priv_flags = 0;
2105 _queue_node_pop(&data->dequeue_list, node);
2108 _tbm_surface_queue_release(surface_queue, node, 1);
2112 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2115 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2118 node = _tbm_surface_queue_dequeue(surface_queue);
2120 _queue_node_push_back(&data->dequeue_list, node);
2121 node->priv_flags = 1;
2127 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2128 __tbm_queue_sequence_init,
2129 __tbm_queue_sequence_reset,
2130 __tbm_queue_sequence_destroy,
2131 __tbm_queue_sequence_need_attach,
2132 __tbm_queue_sequence_enqueue,
2133 __tbm_queue_sequence_release,
2134 __tbm_queue_sequence_dequeue,
2135 NULL, /*__tbm_queue_sequence_acquire*/
2136 NULL, /*__tbm_queue_sequence_need_dettach*/
2140 tbm_surface_queue_sequence_create(int queue_size, int width,
2141 int height, int format, int flags)
2143 TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2144 TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2145 TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2146 TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2148 _tbm_surf_queue_mutex_lock();
2150 tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2151 sizeof(struct _tbm_surface_queue));
2152 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2154 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2156 tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2157 sizeof(tbm_queue_sequence));
2159 TBM_LOG_E("cannot allocate the tbm_queue_sequence.\n");
2160 free(surface_queue);
2161 _tbm_surf_queue_mutex_unlock();
2165 data->flags = flags;
2166 _tbm_surface_queue_init(surface_queue,
2168 width, height, format,
2169 &tbm_queue_sequence_impl, data);
2171 _tbm_surf_queue_mutex_unlock();
2173 return surface_queue;
2176 tbm_surface_queue_error_e
2177 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2180 _tbm_surf_queue_mutex_lock();
2182 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2183 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2185 pthread_mutex_lock(&surface_queue->lock);
2187 if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2188 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2190 surface_queue->modes |= modes;
2192 pthread_mutex_unlock(&surface_queue->lock);
2194 _tbm_surf_queue_mutex_unlock();
2196 return TBM_SURFACE_QUEUE_ERROR_NONE;
2199 tbm_surface_queue_error_e
2200 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2201 unsigned int sync_count)
2203 int dequeue_num, enqueue_num;
2205 _tbm_surf_queue_mutex_lock();
2207 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2208 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2210 pthread_mutex_lock(&surface_queue->lock);
2212 dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2213 enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2215 if (dequeue_num + sync_count == 0)
2216 surface_queue->acquire_sync_count = enqueue_num;
2218 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2220 TBM_QUEUE_TRACE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2221 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2223 pthread_mutex_unlock(&surface_queue->lock);
2225 _tbm_surf_queue_mutex_unlock();
2227 return TBM_SURFACE_QUEUE_ERROR_NONE;