1 /**************************************************************************
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 **************************************************************************/
34 #include "tbm_bufmgr_int.h"
41 #define TBM_QUEUE_DEBUG 0
44 #define TBM_QUEUE_TRACE(fmt, ...) { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
46 #define TBM_QUEUE_TRACE(fmt, ...)
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
64 TBM_LOG_E("'%s' failed.\n", #cond);\
65 _tbm_surf_queue_mutex_unlock();\
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
72 TBM_LOG_E("'%s' failed.\n", #cond);\
73 _tbm_surf_queue_mutex_unlock();\
78 typedef enum _queue_node_type {
80 QUEUE_NODE_TYPE_DEQUEUE,
81 QUEUE_NODE_TYPE_ENQUEUE,
82 QUEUE_NODE_TYPE_ACQUIRE,
83 QUEUE_NODE_TYPE_RELEASE
87 struct list_head head;
92 tbm_surface_h surface;
94 struct list_head item_link;
95 struct list_head link;
99 unsigned int priv_flags; /*for each queue*/
103 struct list_head link;
105 tbm_surface_queue_notify_cb cb;
109 typedef struct _tbm_surface_queue_interface {
110 void (*init)(tbm_surface_queue_h queue);
111 void (*reset)(tbm_surface_queue_h queue);
112 void (*destroy)(tbm_surface_queue_h queue);
113 void (*need_attach)(tbm_surface_queue_h queue);
115 void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
116 void (*release)(tbm_surface_queue_h queue, queue_node *node);
117 queue_node *(*dequeue)(tbm_surface_queue_h queue);
118 queue_node *(*acquire)(tbm_surface_queue_h queue);
119 void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
120 } tbm_surface_queue_interface;
122 struct _tbm_surface_queue {
131 struct list_head list;
133 struct list_head destory_noti;
134 struct list_head dequeuable_noti;
135 struct list_head dequeue_noti;
136 struct list_head acquirable_noti;
137 struct list_head reset_noti;
139 pthread_mutex_t lock;
140 pthread_cond_t free_cond;
141 pthread_cond_t dirty_cond;
143 const tbm_surface_queue_interface *impl;
146 //For external buffer allocation
147 tbm_surface_alloc_cb alloc_cb;
148 tbm_surface_free_cb free_cb;
151 struct list_head item_link; /* link of surface queue */
154 /* LCOV_EXCL_START */
157 _tbm_surf_queue_mutex_init(void)
159 static bool tbm_surf_queue_mutex_init = false;
161 if (tbm_surf_queue_mutex_init)
164 if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
165 TBM_LOG_E("fail: tbm_surf_queue mutex init\n");
169 tbm_surf_queue_mutex_init = true;
175 _tbm_surf_queue_mutex_lock(void)
177 if (!_tbm_surf_queue_mutex_init())
180 pthread_mutex_lock(&tbm_surf_queue_lock);
184 _tbm_surf_queue_mutex_unlock(void)
186 pthread_mutex_unlock(&tbm_surf_queue_lock);
190 _init_tbm_surf_queue_bufmgr(void)
192 g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
196 _deinit_tbm_surf_queue_bufmgr(void)
198 if (!g_surf_queue_bufmgr)
201 tbm_bufmgr_deinit(g_surf_queue_bufmgr);
202 g_surf_queue_bufmgr = NULL;
206 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
208 tbm_surface_queue_h old_data;
210 if (surface_queue == NULL || g_surf_queue_bufmgr == NULL) {
211 TBM_TRACE("error: surface_queue is NULL or not initialized\n");
215 if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
216 TBM_TRACE("error: surf_queue_list is empty\n");
220 LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
222 if (old_data == surface_queue) {
223 TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
228 TBM_TRACE("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
233 _queue_node_create(void)
235 queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
237 TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
243 _queue_node_delete(queue_node *node)
245 LIST_DEL(&node->item_link);
246 LIST_DEL(&node->link);
251 _queue_is_empty(queue *queue)
253 if (LIST_IS_EMPTY(&queue->head))
260 _queue_node_push_back(queue *queue, queue_node *node)
262 LIST_ADDTAIL(&node->item_link, &queue->head);
267 _queue_node_push_front(queue *queue, queue_node *node)
269 LIST_ADD(&node->item_link, &queue->head);
274 _queue_node_pop_front(queue *queue)
278 node = LIST_ENTRY(queue_node, queue->head.next, item_link);
280 LIST_DEL(&node->item_link);
287 _queue_node_pop(queue *queue, queue_node *node)
289 LIST_DEL(&node->item_link);
296 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
297 tbm_surface_h surface, int *out_type)
302 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
306 if (type & FREE_QUEUE) {
307 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
309 if (node->surface == surface) {
311 *out_type = FREE_QUEUE;
318 if (type & DIRTY_QUEUE) {
319 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
321 if (node->surface == surface) {
323 *out_type = DIRTY_QUEUE;
330 if (type & NODE_LIST) {
331 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
332 if (node->surface == surface) {
334 *out_type = NODE_LIST;
345 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
348 if (surface_queue->free_cb) {
349 surface_queue->free_cb(surface_queue,
350 surface_queue->alloc_cb_data,
354 tbm_surface_destroy(node->surface);
357 _queue_node_delete(node);
361 _queue_init(queue *queue)
363 LIST_INITHEAD(&queue->head);
369 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
372 TBM_RETURN_IF_FAIL(cb != NULL);
374 queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
376 TBM_RETURN_IF_FAIL(item != NULL);
378 LIST_INITHEAD(&item->link);
382 LIST_ADDTAIL(&item->link, list);
386 _notify_remove(struct list_head *list,
387 tbm_surface_queue_notify_cb cb, void *data)
389 queue_notify *item, *tmp;
391 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
392 if (item->cb == cb && item->data == data) {
393 LIST_DEL(&item->link);
399 TBM_LOG_E("Cannot find notifiy\n");
403 _notify_remove_all(struct list_head *list)
405 queue_notify *item, *tmp;
407 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
408 LIST_DEL(&item->link);
414 _notify_emit(tbm_surface_queue_h surface_queue,
415 struct list_head *list)
419 LIST_FOR_EACH_ENTRY(item, list, link)
420 item->cb(surface_queue, item->data);
424 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
429 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
430 if (node->type == type)
438 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
439 tbm_surface_h surface)
443 node = _queue_node_create();
444 TBM_RETURN_IF_FAIL(node != NULL);
446 tbm_surface_internal_ref(surface);
447 node->surface = surface;
449 LIST_ADDTAIL(&node->link, &surface_queue->list);
450 surface_queue->num_attached++;
451 _queue_node_push_back(&surface_queue->free_queue, node);
455 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
456 tbm_surface_h surface)
461 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
463 _queue_delete_node(surface_queue, node);
464 surface_queue->num_attached--;
469 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
470 queue_node *node, int push_back)
473 _queue_node_push_back(&surface_queue->dirty_queue, node);
475 _queue_node_push_front(&surface_queue->dirty_queue, node);
479 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
483 if (_queue_is_empty(&surface_queue->free_queue)) {
484 if (surface_queue->impl && surface_queue->impl->need_attach)
485 surface_queue->impl->need_attach(surface_queue);
487 if (_queue_is_empty(&surface_queue->free_queue))
491 node = _queue_node_pop_front(&surface_queue->free_queue);
497 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
501 if (_queue_is_empty(&surface_queue->dirty_queue))
504 node = _queue_node_pop_front(&surface_queue->dirty_queue);
510 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
511 queue_node *node, int push_back)
514 _queue_node_push_back(&surface_queue->free_queue, node);
516 _queue_node_push_front(&surface_queue->free_queue, node);
520 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
522 int width, int height, int format,
523 const tbm_surface_queue_interface *impl, void *data)
525 TBM_RETURN_IF_FAIL(surface_queue != NULL);
526 TBM_RETURN_IF_FAIL(impl != NULL);
528 if (!g_surf_queue_bufmgr)
529 _init_tbm_surf_queue_bufmgr();
531 pthread_mutex_init(&surface_queue->lock, NULL);
532 pthread_cond_init(&surface_queue->free_cond, NULL);
533 pthread_cond_init(&surface_queue->dirty_cond, NULL);
535 surface_queue->queue_size = queue_size;
536 surface_queue->width = width;
537 surface_queue->height = height;
538 surface_queue->format = format;
539 surface_queue->impl = impl;
540 surface_queue->impl_data = data;
542 _queue_init(&surface_queue->free_queue);
543 _queue_init(&surface_queue->dirty_queue);
544 LIST_INITHEAD(&surface_queue->list);
546 LIST_INITHEAD(&surface_queue->destory_noti);
547 LIST_INITHEAD(&surface_queue->dequeuable_noti);
548 LIST_INITHEAD(&surface_queue->dequeue_noti);
549 LIST_INITHEAD(&surface_queue->acquirable_noti);
550 LIST_INITHEAD(&surface_queue->reset_noti);
552 if (surface_queue->impl && surface_queue->impl->init)
553 surface_queue->impl->init(surface_queue);
555 LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
558 tbm_surface_queue_error_e
559 tbm_surface_queue_add_destroy_cb(
560 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
563 _tbm_surf_queue_mutex_lock();
565 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
566 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
568 pthread_mutex_lock(&surface_queue->lock);
570 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
572 _notify_add(&surface_queue->destory_noti, destroy_cb, data);
574 pthread_mutex_unlock(&surface_queue->lock);
576 _tbm_surf_queue_mutex_unlock();
578 return TBM_SURFACE_QUEUE_ERROR_NONE;
581 tbm_surface_queue_error_e
582 tbm_surface_queue_remove_destroy_cb(
583 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
586 _tbm_surf_queue_mutex_lock();
588 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
589 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
591 pthread_mutex_lock(&surface_queue->lock);
593 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
595 _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
597 pthread_mutex_unlock(&surface_queue->lock);
599 _tbm_surf_queue_mutex_unlock();
601 return TBM_SURFACE_QUEUE_ERROR_NONE;
604 tbm_surface_queue_error_e
605 tbm_surface_queue_add_dequeuable_cb(
606 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
609 _tbm_surf_queue_mutex_lock();
611 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
612 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
614 pthread_mutex_lock(&surface_queue->lock);
616 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
618 _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
620 pthread_mutex_unlock(&surface_queue->lock);
622 _tbm_surf_queue_mutex_unlock();
624 return TBM_SURFACE_QUEUE_ERROR_NONE;
627 tbm_surface_queue_error_e
628 tbm_surface_queue_remove_dequeuable_cb(
629 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
632 _tbm_surf_queue_mutex_lock();
634 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
635 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
637 pthread_mutex_lock(&surface_queue->lock);
639 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
641 _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
643 pthread_mutex_unlock(&surface_queue->lock);
645 _tbm_surf_queue_mutex_unlock();
647 return TBM_SURFACE_QUEUE_ERROR_NONE;
650 tbm_surface_queue_error_e
651 tbm_surface_queue_add_dequeue_cb(
652 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
655 _tbm_surf_queue_mutex_lock();
657 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
658 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
660 pthread_mutex_lock(&surface_queue->lock);
662 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
664 _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
666 pthread_mutex_unlock(&surface_queue->lock);
668 _tbm_surf_queue_mutex_unlock();
670 return TBM_SURFACE_QUEUE_ERROR_NONE;
673 tbm_surface_queue_error_e
674 tbm_surface_queue_remove_dequeue_cb(
675 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
678 _tbm_surf_queue_mutex_lock();
680 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
681 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
683 pthread_mutex_lock(&surface_queue->lock);
685 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
687 _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
689 pthread_mutex_unlock(&surface_queue->lock);
691 _tbm_surf_queue_mutex_unlock();
693 return TBM_SURFACE_QUEUE_ERROR_NONE;
696 tbm_surface_queue_error_e
697 tbm_surface_queue_add_acquirable_cb(
698 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
701 _tbm_surf_queue_mutex_lock();
703 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
704 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
706 pthread_mutex_lock(&surface_queue->lock);
708 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
710 _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
712 pthread_mutex_unlock(&surface_queue->lock);
714 _tbm_surf_queue_mutex_unlock();
716 return TBM_SURFACE_QUEUE_ERROR_NONE;
719 tbm_surface_queue_error_e
720 tbm_surface_queue_remove_acquirable_cb(
721 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
724 _tbm_surf_queue_mutex_lock();
726 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
727 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
729 pthread_mutex_lock(&surface_queue->lock);
731 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
733 _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
735 pthread_mutex_unlock(&surface_queue->lock);
737 _tbm_surf_queue_mutex_unlock();
739 return TBM_SURFACE_QUEUE_ERROR_NONE;
742 tbm_surface_queue_error_e
743 tbm_surface_queue_set_alloc_cb(
744 tbm_surface_queue_h surface_queue,
745 tbm_surface_alloc_cb alloc_cb,
746 tbm_surface_free_cb free_cb,
749 _tbm_surf_queue_mutex_lock();
751 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
752 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
754 pthread_mutex_lock(&surface_queue->lock);
756 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
758 surface_queue->alloc_cb = alloc_cb;
759 surface_queue->free_cb = free_cb;
760 surface_queue->alloc_cb_data = data;
762 pthread_mutex_unlock(&surface_queue->lock);
764 _tbm_surf_queue_mutex_unlock();
766 return TBM_SURFACE_QUEUE_ERROR_NONE;
770 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
774 _tbm_surf_queue_mutex_lock();
776 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
778 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
780 width = surface_queue->width;
782 _tbm_surf_queue_mutex_unlock();
788 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
792 _tbm_surf_queue_mutex_lock();
794 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
796 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
798 height = surface_queue->height;
800 _tbm_surf_queue_mutex_unlock();
806 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
810 _tbm_surf_queue_mutex_lock();
812 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
814 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
816 format = surface_queue->format;
818 _tbm_surf_queue_mutex_unlock();
824 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
828 _tbm_surf_queue_mutex_lock();
830 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
832 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
834 queue_size = surface_queue->queue_size;
836 _tbm_surf_queue_mutex_unlock();
841 tbm_surface_queue_error_e
842 tbm_surface_queue_add_reset_cb(
843 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
846 _tbm_surf_queue_mutex_lock();
848 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
849 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
851 pthread_mutex_lock(&surface_queue->lock);
853 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
855 _notify_add(&surface_queue->reset_noti, reset_cb, data);
857 pthread_mutex_unlock(&surface_queue->lock);
859 _tbm_surf_queue_mutex_unlock();
861 return TBM_SURFACE_QUEUE_ERROR_NONE;
864 tbm_surface_queue_error_e
865 tbm_surface_queue_remove_reset_cb(
866 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
869 _tbm_surf_queue_mutex_lock();
871 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
872 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
874 pthread_mutex_lock(&surface_queue->lock);
876 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
878 _notify_remove(&surface_queue->reset_noti, reset_cb, data);
880 pthread_mutex_unlock(&surface_queue->lock);
882 _tbm_surf_queue_mutex_unlock();
884 return TBM_SURFACE_QUEUE_ERROR_NONE;
887 tbm_surface_queue_error_e
888 tbm_surface_queue_enqueue(tbm_surface_queue_h
889 surface_queue, tbm_surface_h surface)
894 _tbm_surf_queue_mutex_lock();
896 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
897 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
898 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
899 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
902 tbm_surface_internal_dump_buffer(surface, "enqueue");
904 pthread_mutex_lock(&surface_queue->lock);
906 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
908 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
909 if (node == NULL || queue_type != NODE_LIST) {
910 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
912 pthread_mutex_unlock(&surface_queue->lock);
914 _tbm_surf_queue_mutex_unlock();
915 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
918 if (surface_queue->impl && surface_queue->impl->enqueue)
919 surface_queue->impl->enqueue(surface_queue, node);
921 _tbm_surface_queue_enqueue(surface_queue, node, 1);
923 if (_queue_is_empty(&surface_queue->dirty_queue)) {
924 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
925 pthread_mutex_unlock(&surface_queue->lock);
927 _tbm_surf_queue_mutex_unlock();
928 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
931 node->type = QUEUE_NODE_TYPE_ENQUEUE;
933 pthread_mutex_unlock(&surface_queue->lock);
934 pthread_cond_signal(&surface_queue->dirty_cond);
936 _tbm_surf_queue_mutex_unlock();
938 _notify_emit(surface_queue, &surface_queue->acquirable_noti);
940 return TBM_SURFACE_QUEUE_ERROR_NONE;
943 tbm_surface_queue_error_e
944 tbm_surface_queue_dequeue(tbm_surface_queue_h
945 surface_queue, tbm_surface_h *surface)
949 _tbm_surf_queue_mutex_lock();
951 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
952 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
953 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
954 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
956 pthread_mutex_lock(&surface_queue->lock);
958 if (surface_queue->impl && surface_queue->impl->dequeue)
959 node = surface_queue->impl->dequeue(surface_queue);
961 node = _tbm_surface_queue_dequeue(surface_queue);
965 pthread_mutex_unlock(&surface_queue->lock);
967 _tbm_surf_queue_mutex_unlock();
968 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
971 if (node->surface == NULL) {
973 TBM_LOG_E("_queue_node_pop_front failed\n");
974 pthread_mutex_unlock(&surface_queue->lock);
976 _tbm_surf_queue_mutex_unlock();
977 return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
980 node->type = QUEUE_NODE_TYPE_DEQUEUE;
981 *surface = node->surface;
983 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
985 pthread_mutex_unlock(&surface_queue->lock);
987 _tbm_surf_queue_mutex_unlock();
989 _notify_emit(surface_queue, &surface_queue->dequeue_noti);
991 return TBM_SURFACE_QUEUE_ERROR_NONE;
995 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
997 _tbm_surf_queue_mutex_lock();
999 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1001 pthread_mutex_lock(&surface_queue->lock);
1003 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1005 if (_queue_is_empty(&surface_queue->free_queue)) {
1006 if (surface_queue->impl && surface_queue->impl->need_attach)
1007 surface_queue->impl->need_attach(surface_queue);
1009 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1010 TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1011 _tbm_surf_queue_mutex_unlock();
1016 if (_queue_is_empty(&surface_queue->free_queue)) {
1018 _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE)) {
1020 _tbm_surf_queue_mutex_unlock();
1022 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1024 _tbm_surf_queue_mutex_lock();
1026 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1027 TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1028 _tbm_surf_queue_mutex_unlock();
1032 pthread_mutex_unlock(&surface_queue->lock);
1034 _tbm_surf_queue_mutex_unlock();
1038 pthread_mutex_unlock(&surface_queue->lock);
1040 _tbm_surf_queue_mutex_unlock();
1044 pthread_mutex_unlock(&surface_queue->lock);
1046 _tbm_surf_queue_mutex_unlock();
1051 tbm_surface_queue_error_e
1052 tbm_surface_queue_release(tbm_surface_queue_h
1053 surface_queue, tbm_surface_h surface)
1058 _tbm_surf_queue_mutex_lock();
1060 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1061 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1062 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1063 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1065 pthread_mutex_lock(&surface_queue->lock);
1067 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1069 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1070 if (node == NULL || queue_type != NODE_LIST) {
1071 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1073 pthread_mutex_unlock(&surface_queue->lock);
1075 _tbm_surf_queue_mutex_unlock();
1076 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1079 if (surface_queue->queue_size < surface_queue->num_attached) {
1081 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1083 if (surface_queue->impl && surface_queue->impl->need_detach)
1084 surface_queue->impl->need_detach(surface_queue, node);
1086 _tbm_surface_queue_detach(surface_queue, surface);
1088 pthread_mutex_unlock(&surface_queue->lock);
1090 _tbm_surf_queue_mutex_unlock();
1091 return TBM_SURFACE_QUEUE_ERROR_NONE;
1094 if (surface_queue->impl && surface_queue->impl->release)
1095 surface_queue->impl->release(surface_queue, node);
1097 _tbm_surface_queue_release(surface_queue, node, 1);
1099 if (_queue_is_empty(&surface_queue->free_queue)) {
1100 pthread_mutex_unlock(&surface_queue->lock);
1102 _tbm_surf_queue_mutex_unlock();
1103 return TBM_SURFACE_QUEUE_ERROR_NONE;
1106 node->type = QUEUE_NODE_TYPE_RELEASE;
1108 pthread_mutex_unlock(&surface_queue->lock);
1109 pthread_cond_signal(&surface_queue->free_cond);
1111 _tbm_surf_queue_mutex_unlock();
1113 _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1115 return TBM_SURFACE_QUEUE_ERROR_NONE;
1118 tbm_surface_queue_error_e
1119 tbm_surface_queue_acquire(tbm_surface_queue_h
1120 surface_queue, tbm_surface_h *surface)
1124 _tbm_surf_queue_mutex_lock();
1126 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1127 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1128 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1129 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1131 pthread_mutex_lock(&surface_queue->lock);
1133 if (surface_queue->impl && surface_queue->impl->acquire)
1134 node = surface_queue->impl->acquire(surface_queue);
1136 node = _tbm_surface_queue_acquire(surface_queue);
1140 pthread_mutex_unlock(&surface_queue->lock);
1142 _tbm_surf_queue_mutex_unlock();
1143 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1146 if (node->surface == NULL) {
1148 TBM_LOG_E("_queue_node_pop_front failed\n");
1149 pthread_mutex_unlock(&surface_queue->lock);
1151 _tbm_surf_queue_mutex_unlock();
1152 return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1155 node->type = QUEUE_NODE_TYPE_ACQUIRE;
1157 *surface = node->surface;
1159 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1161 pthread_mutex_unlock(&surface_queue->lock);
1163 _tbm_surf_queue_mutex_unlock();
1166 tbm_surface_internal_dump_buffer(*surface, "acquire");
1168 return TBM_SURFACE_QUEUE_ERROR_NONE;
1172 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1174 _tbm_surf_queue_mutex_lock();
1176 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1178 pthread_mutex_lock(&surface_queue->lock);
1180 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1182 if (_queue_is_empty(&surface_queue->dirty_queue)) {
1184 _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE)) {
1186 _tbm_surf_queue_mutex_unlock();
1188 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1190 _tbm_surf_queue_mutex_lock();
1192 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1193 TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1194 _tbm_surf_queue_mutex_unlock();
1198 pthread_mutex_unlock(&surface_queue->lock);
1200 _tbm_surf_queue_mutex_unlock();
1204 pthread_mutex_unlock(&surface_queue->lock);
1206 _tbm_surf_queue_mutex_unlock();
1210 pthread_mutex_unlock(&surface_queue->lock);
1212 _tbm_surf_queue_mutex_unlock();
1218 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1220 queue_node *node, *tmp;
1222 _tbm_surf_queue_mutex_lock();
1224 TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1226 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1228 LIST_DEL(&surface_queue->item_link);
1230 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1231 _queue_delete_node(surface_queue, node);
1233 if (surface_queue->impl && surface_queue->impl->destroy)
1234 surface_queue->impl->destroy(surface_queue);
1236 _notify_emit(surface_queue, &surface_queue->destory_noti);
1238 _notify_remove_all(&surface_queue->destory_noti);
1239 _notify_remove_all(&surface_queue->dequeuable_noti);
1240 _notify_remove_all(&surface_queue->dequeue_noti);
1241 _notify_remove_all(&surface_queue->acquirable_noti);
1242 _notify_remove_all(&surface_queue->reset_noti);
1244 pthread_mutex_destroy(&surface_queue->lock);
1246 free(surface_queue);
1248 if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1249 _deinit_tbm_surf_queue_bufmgr();
1251 _tbm_surf_queue_mutex_unlock();
1254 tbm_surface_queue_error_e
1255 tbm_surface_queue_reset(tbm_surface_queue_h
1256 surface_queue, int width, int height, int format)
1258 queue_node *node, *tmp;
1260 _tbm_surf_queue_mutex_lock();
1262 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1263 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1265 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1267 if (width == surface_queue->width && height == surface_queue->height &&
1268 format == surface_queue->format) {
1269 _tbm_surf_queue_mutex_unlock();
1270 return TBM_SURFACE_QUEUE_ERROR_NONE;
1273 pthread_mutex_lock(&surface_queue->lock);
1275 surface_queue->width = width;
1276 surface_queue->height = height;
1277 surface_queue->format = format;
1279 /* Destory surface and Push to free_queue */
1280 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1281 _queue_delete_node(surface_queue, node);
1284 _queue_init(&surface_queue->free_queue);
1285 _queue_init(&surface_queue->dirty_queue);
1286 LIST_INITHEAD(&surface_queue->list);
1288 surface_queue->num_attached = 0;
1290 if (surface_queue->impl && surface_queue->impl->reset)
1291 surface_queue->impl->reset(surface_queue);
1293 pthread_mutex_unlock(&surface_queue->lock);
1294 pthread_cond_signal(&surface_queue->free_cond);
1296 _tbm_surf_queue_mutex_unlock();
1298 _notify_emit(surface_queue, &surface_queue->reset_noti);
1300 return TBM_SURFACE_QUEUE_ERROR_NONE;
1303 tbm_surface_queue_error_e
1304 tbm_surface_queue_set_size(tbm_surface_queue_h
1305 surface_queue, int queue_size, int flush)
1307 queue_node *node, *tmp;
1309 _tbm_surf_queue_mutex_lock();
1311 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1312 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1313 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1314 TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1316 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1318 if ((surface_queue->queue_size == queue_size) && !flush) {
1319 _tbm_surf_queue_mutex_unlock();
1320 return TBM_SURFACE_QUEUE_ERROR_NONE;
1323 pthread_mutex_lock(&surface_queue->lock);
1326 /* Destory surface and Push to free_queue */
1327 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1328 _queue_delete_node(surface_queue, node);
1331 _queue_init(&surface_queue->free_queue);
1332 _queue_init(&surface_queue->dirty_queue);
1333 LIST_INITHEAD(&surface_queue->list);
1335 surface_queue->num_attached = 0;
1336 surface_queue->queue_size = queue_size;
1338 if (surface_queue->impl && surface_queue->impl->reset)
1339 surface_queue->impl->reset(surface_queue);
1341 pthread_mutex_unlock(&surface_queue->lock);
1342 pthread_cond_signal(&surface_queue->free_cond);
1344 _tbm_surf_queue_mutex_unlock();
1346 _notify_emit(surface_queue, &surface_queue->reset_noti);
1348 return TBM_SURFACE_QUEUE_ERROR_NONE;
1350 if (surface_queue->queue_size > queue_size) {
1351 int need_del = surface_queue->queue_size - queue_size;
1353 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1354 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1356 if (surface_queue->impl && surface_queue->impl->need_detach)
1357 surface_queue->impl->need_detach(surface_queue, node);
1359 _tbm_surface_queue_detach(surface_queue, node->surface);
1367 surface_queue->queue_size = queue_size;
1369 pthread_mutex_unlock(&surface_queue->lock);
1371 _tbm_surf_queue_mutex_unlock();
1373 return TBM_SURFACE_QUEUE_ERROR_NONE;
1377 tbm_surface_queue_error_e
1378 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1380 queue_node *node, *tmp;
1382 _tbm_surf_queue_mutex_lock();
1384 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1385 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1387 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1389 if (surface_queue->num_attached == 0) {
1390 _tbm_surf_queue_mutex_unlock();
1391 return TBM_SURFACE_QUEUE_ERROR_NONE;
1394 pthread_mutex_lock(&surface_queue->lock);
1396 /* Destory surface and Push to free_queue */
1397 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1398 _queue_delete_node(surface_queue, node);
1401 _queue_init(&surface_queue->free_queue);
1402 _queue_init(&surface_queue->dirty_queue);
1403 LIST_INITHEAD(&surface_queue->list);
1405 surface_queue->num_attached = 0;
1407 if (surface_queue->impl && surface_queue->impl->reset)
1408 surface_queue->impl->reset(surface_queue);
1410 pthread_mutex_unlock(&surface_queue->lock);
1411 pthread_cond_signal(&surface_queue->free_cond);
1413 _tbm_surf_queue_mutex_unlock();
1415 _notify_emit(surface_queue, &surface_queue->reset_noti);
1417 return TBM_SURFACE_QUEUE_ERROR_NONE;
1420 tbm_surface_queue_error_e
1421 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1422 tbm_surface_h *surfaces, int *num)
1426 _tbm_surf_queue_mutex_lock();
1428 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1429 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1430 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1431 TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1433 pthread_mutex_lock(&surface_queue->lock);
1436 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1438 surfaces[*num] = node->surface;
1443 pthread_mutex_unlock(&surface_queue->lock);
1445 _tbm_surf_queue_mutex_unlock();
1447 return TBM_SURFACE_QUEUE_ERROR_NONE;
1452 } tbm_queue_default;
1455 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1457 free(surface_queue->impl_data);
1461 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1463 tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1464 tbm_surface_h surface;
1466 if (surface_queue->queue_size == surface_queue->num_attached)
1469 if (surface_queue->alloc_cb) {
1470 _tbm_surf_queue_mutex_unlock();
1471 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1472 _tbm_surf_queue_mutex_lock();
1477 tbm_surface_internal_ref(surface);
1479 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1480 surface_queue->height,
1481 surface_queue->format,
1483 TBM_RETURN_IF_FAIL(surface != NULL);
1486 _tbm_surface_queue_attach(surface_queue, surface);
1487 tbm_surface_internal_unref(surface);
1490 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1491 NULL, /*__tbm_queue_default_init*/
1492 NULL, /*__tbm_queue_default_reset*/
1493 __tbm_queue_default_destroy,
1494 __tbm_queue_default_need_attach,
1495 NULL, /*__tbm_queue_default_enqueue*/
1496 NULL, /*__tbm_queue_default_release*/
1497 NULL, /*__tbm_queue_default_dequeue*/
1498 NULL, /*__tbm_queue_default_acquire*/
1499 NULL, /*__tbm_queue_default_need_detach*/
1503 tbm_surface_queue_create(int queue_size, int width,
1504 int height, int format, int flags)
1506 TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1507 TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1508 TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1509 TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1511 _tbm_surf_queue_mutex_lock();
1513 tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1514 sizeof(struct _tbm_surface_queue));
1515 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1517 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1519 tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1520 sizeof(tbm_queue_default));
1522 free(surface_queue);
1523 _tbm_surf_queue_mutex_unlock();
1527 data->flags = flags;
1528 _tbm_surface_queue_init(surface_queue,
1530 width, height, format,
1531 &tbm_queue_default_impl, data);
1533 _tbm_surf_queue_mutex_unlock();
1535 return surface_queue;
1541 } tbm_queue_sequence;
1544 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
1546 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1548 _queue_init(&data->dequeue_list);
1552 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
1554 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1556 _queue_init(&data->dequeue_list);
1560 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
1562 free(surface_queue->impl_data);
1566 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
1568 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1569 tbm_surface_h surface;
1571 if (surface_queue->queue_size == surface_queue->num_attached)
1574 if (surface_queue->alloc_cb) {
1575 _tbm_surf_queue_mutex_unlock();
1576 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1577 _tbm_surf_queue_mutex_lock();
1582 tbm_surface_internal_ref(surface);
1584 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1585 surface_queue->height,
1586 surface_queue->format,
1588 TBM_RETURN_IF_FAIL(surface != NULL);
1591 _tbm_surface_queue_attach(surface_queue, surface);
1592 tbm_surface_internal_unref(surface);
1596 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
1599 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1600 queue_node *next, *tmp;
1602 node->priv_flags = 0;
1604 LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) {
1605 if (next->priv_flags)
1607 _queue_node_pop(&data->dequeue_list, next);
1608 _tbm_surface_queue_enqueue(surface_queue, next, 1);
1613 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
1616 tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1619 node = _tbm_surface_queue_dequeue(surface_queue);
1621 _queue_node_push_back(&data->dequeue_list, node);
1622 node->priv_flags = 1;
1628 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
1629 __tbm_queue_sequence_init,
1630 __tbm_queue_sequence_reset,
1631 __tbm_queue_sequence_destroy,
1632 __tbm_queue_sequence_need_attach,
1633 __tbm_queue_sequence_enqueue,
1634 NULL, /*__tbm_queue_sequence_release*/
1635 __tbm_queue_sequence_dequeue,
1636 NULL, /*__tbm_queue_sequence_acquire*/
1637 NULL, /*__tbm_queue_sequence_need_dettach*/
1641 tbm_surface_queue_sequence_create(int queue_size, int width,
1642 int height, int format, int flags)
1644 TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1645 TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1646 TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1647 TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1649 _tbm_surf_queue_mutex_lock();
1651 tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1652 sizeof(struct _tbm_surface_queue));
1653 TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1655 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1657 tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
1658 sizeof(tbm_queue_sequence));
1660 free(surface_queue);
1661 _tbm_surf_queue_mutex_unlock();
1665 data->flags = flags;
1666 _tbm_surface_queue_init(surface_queue,
1668 width, height, format,
1669 &tbm_queue_sequence_impl, data);
1671 _tbm_surf_queue_mutex_unlock();
1673 return surface_queue;
1675 /* LCOV_EXCL_STOP */