1 /**************************************************************************
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 **************************************************************************/
32 #include "tbm_bufmgr_int.h"
39 #define TBM_QUEUE_DEBUG 0
42 #define TBM_QUEUE_TRACE(fmt, ...) fprintf(stderr, "[TBM(%d):%s] " fmt, getpid(), __func__, ##__VA_ARGS__)
43 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
44 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
46 #define TBM_QUEUE_TRACE(fmt, ...)
52 struct list_head head;
57 tbm_surface_h surface;
59 struct list_head item_link;
60 struct list_head link;
62 unsigned int priv_flags; /*for each queue*/
66 struct list_head link;
68 tbm_surface_queue_notify_cb cb;
72 typedef struct _tbm_surface_queue_interface {
73 void (*init)(tbm_surface_queue_h queue);
74 void (*reset)(tbm_surface_queue_h queue);
75 void (*destroy)(tbm_surface_queue_h queue);
76 void (*need_attach)(tbm_surface_queue_h queue);
78 void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
79 void (*release)(tbm_surface_queue_h queue, queue_node *node);
80 queue_node *(*dequeue)(tbm_surface_queue_h queue);
81 queue_node *(*acquire)(tbm_surface_queue_h queue);
82 } tbm_surface_queue_interface;
84 struct _tbm_surface_queue {
92 struct list_head list;
94 struct list_head destory_noti;
95 struct list_head dequeuable_noti;
96 struct list_head acquirable_noti;
97 struct list_head reset_noti;
100 pthread_cond_t free_cond;
101 pthread_cond_t dirty_cond;
103 const tbm_surface_queue_interface *impl;
106 //For external buffer allocation
107 tbm_surface_alloc_cb alloc_cb;
108 tbm_surface_free_cb free_cb;
112 /* LCOV_EXCL_START */
114 _queue_node_create(void)
116 queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
118 TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
124 _queue_node_delete(queue_node *node)
126 LIST_DEL(&node->item_link);
127 LIST_DEL(&node->link);
132 _queue_is_empty(queue *queue)
134 if (LIST_IS_EMPTY(&queue->head))
141 _queue_node_push_back(queue *queue, queue_node *node)
143 LIST_ADDTAIL(&node->item_link, &queue->head);
148 _queue_node_push_front(queue *queue, queue_node *node)
150 LIST_ADD(&node->item_link, &queue->head);
155 _queue_node_pop_front(queue *queue)
157 queue_node *node = NULL;
159 node = LIST_ENTRY(queue_node, queue->head.next, item_link);
161 LIST_DEL(&node->item_link);
168 _queue_node_pop(queue *queue, queue_node *node)
170 LIST_DEL(&node->item_link);
177 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
178 tbm_surface_h surface, int *out_type)
180 queue_node *node = NULL;
181 queue_node *tmp = NULL;
184 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
188 if (type & FREE_QUEUE) {
189 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head,
191 if (node->surface == surface) {
193 *out_type = FREE_QUEUE;
200 if (type & DIRTY_QUEUE) {
201 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->dirty_queue.head,
203 if (node->surface == surface) {
205 *out_type = DIRTY_QUEUE;
212 if (type & NODE_LIST) {
213 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) {
214 if (node->surface == surface) {
216 *out_type = NODE_LIST;
227 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
230 if (surface_queue->free_cb) {
231 surface_queue->free_cb(surface_queue,
232 surface_queue->alloc_cb_data,
236 tbm_surface_destroy(node->surface);
239 _queue_node_delete(node);
243 _queue_init(queue *queue)
245 LIST_INITHEAD(&queue->head);
251 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
254 TBM_RETURN_IF_FAIL(cb != NULL);
256 queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
258 TBM_RETURN_IF_FAIL(item != NULL);
260 LIST_INITHEAD(&item->link);
264 LIST_ADDTAIL(&item->link, list);
268 _notify_remove(struct list_head *list,
269 tbm_surface_queue_notify_cb cb, void *data)
271 queue_notify *item = NULL, *tmp = NULL;
273 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
274 if (item->cb == cb && item->data == data) {
275 LIST_DEL(&item->link);
281 TBM_LOG_E("Cannot find notifiy\n");
285 _notify_remove_all(struct list_head *list)
287 queue_notify *item = NULL, *tmp = NULL;
289 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
290 LIST_DEL(&item->link);
296 _notify_emit(tbm_surface_queue_h surface_queue,
297 struct list_head *list)
299 queue_notify *item = NULL, *tmp = NULL;
301 LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
302 item->cb(surface_queue, item->data);
307 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
308 tbm_surface_h surface)
310 queue_node *node = NULL;
312 node = _queue_node_create();
313 TBM_RETURN_IF_FAIL(node != NULL);
315 tbm_surface_internal_ref(surface);
316 node->surface = surface;
318 LIST_ADDTAIL(&node->link, &surface_queue->list);
319 _queue_node_push_back(&surface_queue->free_queue, node);
323 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
324 tbm_surface_h surface)
326 queue_node *node = NULL;
329 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
331 _queue_delete_node(surface_queue, node);
335 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
336 queue_node *node, int push_back)
339 _queue_node_push_back(&surface_queue->dirty_queue, node);
341 _queue_node_push_front(&surface_queue->dirty_queue, node);
345 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
347 queue_node *node = NULL;
349 if (_queue_is_empty(&surface_queue->free_queue)) {
350 if (surface_queue->impl && surface_queue->impl->need_attach)
351 surface_queue->impl->need_attach(surface_queue);
353 if (_queue_is_empty(&surface_queue->free_queue))
357 node = _queue_node_pop_front(&surface_queue->free_queue);
363 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
365 queue_node *node = NULL;
367 if (_queue_is_empty(&surface_queue->dirty_queue))
370 node = _queue_node_pop_front(&surface_queue->dirty_queue);
376 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
377 queue_node *node, int push_back)
380 _queue_node_push_back(&surface_queue->free_queue, node);
382 _queue_node_push_front(&surface_queue->free_queue, node);
386 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
388 int width, int height, int format,
389 const tbm_surface_queue_interface *impl, void *data)
391 TBM_RETURN_IF_FAIL(surface_queue != NULL);
392 TBM_RETURN_IF_FAIL(impl != NULL);
394 memset(surface_queue, 0x00, sizeof(struct _tbm_surface_queue));
396 pthread_mutex_init(&surface_queue->lock, NULL);
397 pthread_cond_init(&surface_queue->free_cond, NULL);
398 pthread_cond_init(&surface_queue->dirty_cond, NULL);
400 surface_queue->queue_size = queue_size;
401 surface_queue->width = width;
402 surface_queue->height = height;
403 surface_queue->format = format;
404 surface_queue->impl = impl;
405 surface_queue->impl_data = data;
407 _queue_init(&surface_queue->free_queue);
408 _queue_init(&surface_queue->dirty_queue);
409 LIST_INITHEAD(&surface_queue->list);
411 LIST_INITHEAD(&surface_queue->destory_noti);
412 LIST_INITHEAD(&surface_queue->acquirable_noti);
413 LIST_INITHEAD(&surface_queue->dequeuable_noti);
414 LIST_INITHEAD(&surface_queue->reset_noti);
416 if (surface_queue->impl && surface_queue->impl->init)
417 surface_queue->impl->init(surface_queue);
420 tbm_surface_queue_error_e
421 tbm_surface_queue_add_destroy_cb(
422 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
425 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
426 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
428 pthread_mutex_lock(&surface_queue->lock);
430 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
432 _notify_add(&surface_queue->destory_noti, destroy_cb, data);
434 pthread_mutex_unlock(&surface_queue->lock);
436 return TBM_SURFACE_QUEUE_ERROR_NONE;
439 tbm_surface_queue_error_e
440 tbm_surface_queue_remove_destroy_cb(
441 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
444 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
445 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
447 pthread_mutex_lock(&surface_queue->lock);
449 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
451 _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
453 pthread_mutex_unlock(&surface_queue->lock);
455 return TBM_SURFACE_QUEUE_ERROR_NONE;
458 tbm_surface_queue_error_e
459 tbm_surface_queue_add_dequeuable_cb(
460 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
463 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
464 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
466 pthread_mutex_lock(&surface_queue->lock);
468 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
470 _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
472 pthread_mutex_unlock(&surface_queue->lock);
474 return TBM_SURFACE_QUEUE_ERROR_NONE;
477 tbm_surface_queue_error_e
478 tbm_surface_queue_remove_dequeuable_cb(
479 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
482 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
483 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
485 pthread_mutex_lock(&surface_queue->lock);
487 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
489 _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
491 pthread_mutex_unlock(&surface_queue->lock);
493 return TBM_SURFACE_QUEUE_ERROR_NONE;
496 tbm_surface_queue_error_e
497 tbm_surface_queue_add_acquirable_cb(
498 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
501 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
502 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
504 pthread_mutex_lock(&surface_queue->lock);
506 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
508 _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
510 pthread_mutex_unlock(&surface_queue->lock);
512 return TBM_SURFACE_QUEUE_ERROR_NONE;
515 tbm_surface_queue_error_e
516 tbm_surface_queue_remove_acquirable_cb(
517 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
520 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
521 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
523 pthread_mutex_lock(&surface_queue->lock);
525 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
527 _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
529 pthread_mutex_unlock(&surface_queue->lock);
531 return TBM_SURFACE_QUEUE_ERROR_NONE;
534 tbm_surface_queue_error_e
535 tbm_surface_queue_set_alloc_cb(
536 tbm_surface_queue_h surface_queue,
537 tbm_surface_alloc_cb alloc_cb,
538 tbm_surface_free_cb free_cb,
541 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
542 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
544 pthread_mutex_lock(&surface_queue->lock);
546 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
548 surface_queue->alloc_cb = alloc_cb;
549 surface_queue->free_cb = free_cb;
550 surface_queue->alloc_cb_data = data;
552 pthread_mutex_unlock(&surface_queue->lock);
554 return TBM_SURFACE_QUEUE_ERROR_NONE;
558 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
560 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL, 0);
562 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
564 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL, 0);
566 return surface_queue->width;
570 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
572 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL, 0);
574 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
576 return surface_queue->height;
580 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
582 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL, 0);
584 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
586 return surface_queue->format;
590 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
592 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL, 0);
594 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
596 return surface_queue->queue_size;
599 tbm_surface_queue_error_e
600 tbm_surface_queue_add_reset_cb(
601 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
604 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
605 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
607 pthread_mutex_lock(&surface_queue->lock);
609 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
611 _notify_add(&surface_queue->reset_noti, reset_cb, data);
613 pthread_mutex_unlock(&surface_queue->lock);
615 return TBM_SURFACE_QUEUE_ERROR_NONE;
618 tbm_surface_queue_error_e
619 tbm_surface_queue_remove_reset_cb(
620 tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
623 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
624 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
626 pthread_mutex_lock(&surface_queue->lock);
628 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
630 _notify_remove(&surface_queue->reset_noti, reset_cb, data);
632 pthread_mutex_unlock(&surface_queue->lock);
634 return TBM_SURFACE_QUEUE_ERROR_NONE;
637 tbm_surface_queue_error_e
638 tbm_surface_queue_enqueue(tbm_surface_queue_h
639 surface_queue, tbm_surface_h surface)
641 queue_node *node = NULL;
644 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
645 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
646 TBM_RETURN_VAL_IF_FAIL(surface != NULL,
647 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
649 pthread_mutex_lock(&surface_queue->lock);
651 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
653 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
654 if (node == NULL || queue_type != NODE_LIST) {
655 TBM_LOG_E("tbm_surface_queue_enqueue::Surface exist in free_queue or dirty_queue node:%p, queue:%d\n",
657 pthread_mutex_unlock(&surface_queue->lock);
658 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
661 if (surface_queue->impl && surface_queue->impl->enqueue)
662 surface_queue->impl->enqueue(surface_queue, node);
664 _tbm_surface_queue_enqueue(surface_queue, node, 1);
666 if (_queue_is_empty(&surface_queue->dirty_queue)) {
667 pthread_mutex_unlock(&surface_queue->lock);
668 return TBM_SURFACE_QUEUE_ERROR_NONE;
671 pthread_mutex_unlock(&surface_queue->lock);
672 pthread_cond_signal(&surface_queue->dirty_cond);
674 _notify_emit(surface_queue, &surface_queue->acquirable_noti);
676 return TBM_SURFACE_QUEUE_ERROR_NONE;
679 tbm_surface_queue_error_e
680 tbm_surface_queue_dequeue(tbm_surface_queue_h
681 surface_queue, tbm_surface_h *surface)
683 queue_node *node = NULL;
685 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
686 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
687 TBM_RETURN_VAL_IF_FAIL(surface != NULL,
688 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
690 pthread_mutex_lock(&surface_queue->lock);
692 if (surface_queue->impl && surface_queue->impl->dequeue)
693 node = surface_queue->impl->dequeue(surface_queue);
695 node = _tbm_surface_queue_dequeue(surface_queue);
699 pthread_mutex_unlock(&surface_queue->lock);
700 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
703 if (node->surface == NULL) {
705 TBM_LOG_E("_queue_node_pop_front failed\n");
706 pthread_mutex_unlock(&surface_queue->lock);
707 return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
710 *surface = node->surface;
712 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
714 pthread_mutex_unlock(&surface_queue->lock);
716 return TBM_SURFACE_QUEUE_ERROR_NONE;
720 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
722 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL, 0);
724 pthread_mutex_lock(&surface_queue->lock);
726 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
728 if (_queue_is_empty(&surface_queue->free_queue)) {
729 if (surface_queue->impl && surface_queue->impl->need_attach)
730 surface_queue->impl->need_attach(surface_queue);
733 if (_queue_is_empty(&surface_queue->free_queue)) {
735 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
736 pthread_mutex_unlock(&surface_queue->lock);
740 pthread_mutex_unlock(&surface_queue->lock);
744 pthread_mutex_unlock(&surface_queue->lock);
749 tbm_surface_queue_error_e
750 tbm_surface_queue_release(tbm_surface_queue_h
751 surface_queue, tbm_surface_h surface)
753 queue_node *node = NULL;
756 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
757 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
758 TBM_RETURN_VAL_IF_FAIL(surface != NULL,
759 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
761 pthread_mutex_lock(&surface_queue->lock);
763 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
765 node = _queue_get_node(surface_queue, 0, surface, &queue_type);
766 if (node == NULL || queue_type != NODE_LIST) {
767 TBM_LOG_E("tbm_surface_queue_release::Surface exist in free_queue or dirty_queue node:%p, queue:%d\n",
769 pthread_mutex_unlock(&surface_queue->lock);
770 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
773 if (surface_queue->impl && surface_queue->impl->release)
774 surface_queue->impl->release(surface_queue, node);
776 _tbm_surface_queue_release(surface_queue, node, 1);
778 if (_queue_is_empty(&surface_queue->free_queue)) {
779 pthread_mutex_unlock(&surface_queue->lock);
780 return TBM_SURFACE_QUEUE_ERROR_NONE;
783 pthread_mutex_unlock(&surface_queue->lock);
784 pthread_cond_signal(&surface_queue->free_cond);
786 _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
788 return TBM_SURFACE_QUEUE_ERROR_NONE;
791 tbm_surface_queue_error_e
792 tbm_surface_queue_acquire(tbm_surface_queue_h
793 surface_queue, tbm_surface_h *surface)
795 queue_node *node = NULL;
797 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
798 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
799 TBM_RETURN_VAL_IF_FAIL(surface != NULL,
800 TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
802 pthread_mutex_lock(&surface_queue->lock);
804 if (surface_queue->impl && surface_queue->impl->acquire)
805 node = surface_queue->impl->acquire(surface_queue);
807 node = _tbm_surface_queue_acquire(surface_queue);
811 pthread_mutex_unlock(&surface_queue->lock);
812 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
815 if (node->surface == NULL) {
817 TBM_LOG_E("_queue_node_pop_front failed\n");
818 pthread_mutex_unlock(&surface_queue->lock);
819 return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
822 *surface = node->surface;
824 TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
826 pthread_mutex_unlock(&surface_queue->lock);
828 return TBM_SURFACE_QUEUE_ERROR_NONE;
832 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
834 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL, 0);
836 pthread_mutex_lock(&surface_queue->lock);
838 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
840 if (_queue_is_empty(&surface_queue->dirty_queue)) {
842 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
843 pthread_mutex_unlock(&surface_queue->lock);
847 pthread_mutex_unlock(&surface_queue->lock);
851 pthread_mutex_unlock(&surface_queue->lock);
857 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
859 queue_node *node = NULL, *tmp = NULL;
861 TBM_RETURN_IF_FAIL(surface_queue != NULL);
863 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
865 _notify_emit(surface_queue, &surface_queue->destory_noti);
867 if (surface_queue->impl && surface_queue->impl->destroy)
868 surface_queue->impl->destroy(surface_queue);
870 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) {
871 _queue_delete_node(surface_queue, node);
874 _notify_remove_all(&surface_queue->destory_noti);
875 _notify_remove_all(&surface_queue->acquirable_noti);
876 _notify_remove_all(&surface_queue->dequeuable_noti);
877 _notify_remove_all(&surface_queue->reset_noti);
879 pthread_mutex_destroy(&surface_queue->lock);
883 tbm_surface_queue_error_e
884 tbm_surface_queue_reset(tbm_surface_queue_h
885 surface_queue, int width, int height, int format)
887 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
888 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
890 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
892 queue_node *node = NULL, *tmp = NULL;
894 if (width == surface_queue->width && height == surface_queue->height &&
895 format == surface_queue->format)
896 return TBM_SURFACE_QUEUE_ERROR_NONE;
898 pthread_mutex_lock(&surface_queue->lock);
900 surface_queue->width = width;
901 surface_queue->height = height;
902 surface_queue->format = format;
904 /* Destory surface and Push to free_queue */
905 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) {
906 _queue_delete_node(surface_queue, node);
910 _queue_init(&surface_queue->free_queue);
911 _queue_init(&surface_queue->dirty_queue);
912 LIST_INITHEAD(&surface_queue->list);
914 if (surface_queue->impl && surface_queue->impl->reset)
915 surface_queue->impl->reset(surface_queue);
917 pthread_mutex_unlock(&surface_queue->lock);
918 pthread_cond_signal(&surface_queue->free_cond);
920 _notify_emit(surface_queue, &surface_queue->reset_noti);
922 return TBM_SURFACE_QUEUE_ERROR_NONE;
925 tbm_surface_queue_error_e
926 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
928 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL,
929 TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
931 queue_node *node = NULL, *tmp = NULL;
933 pthread_mutex_lock(&surface_queue->lock);
935 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
937 /* Destory surface and Push to free_queue */
938 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) {
939 _queue_delete_node(surface_queue, node);
943 _queue_init(&surface_queue->free_queue);
944 _queue_init(&surface_queue->dirty_queue);
945 LIST_INITHEAD(&surface_queue->list);
947 if (surface_queue->impl && surface_queue->impl->reset)
948 surface_queue->impl->reset(surface_queue);
950 _notify_emit(surface_queue, &surface_queue->reset_noti);
952 pthread_mutex_unlock(&surface_queue->lock);
953 pthread_cond_signal(&surface_queue->free_cond);
955 return TBM_SURFACE_QUEUE_ERROR_NONE;
965 __tbm_queue_default_init(tbm_surface_queue_h surface_queue)
967 tbm_queue_default *data = surface_queue->impl_data;
969 data->num_attached = 0;
973 __tbm_queue_default_reset(tbm_surface_queue_h surface_queue)
975 tbm_queue_default *data = surface_queue->impl_data;
977 data->num_attached = 0;
981 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
983 free(surface_queue->impl_data);
987 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
989 tbm_queue_default *data = surface_queue->impl_data;
990 tbm_surface_h surface;
992 if (data->queue_size == data->num_attached)
995 if (surface_queue->alloc_cb) {
996 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
997 TBM_RETURN_IF_FAIL(surface != NULL);
998 tbm_surface_internal_ref(surface);
1000 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1001 surface_queue->height,
1002 surface_queue->format,
1004 TBM_RETURN_IF_FAIL(surface != NULL);
1007 _tbm_surface_queue_attach(surface_queue, surface);
1008 tbm_surface_internal_unref(surface);
1009 data->num_attached++;
1012 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1013 __tbm_queue_default_init,
1014 __tbm_queue_default_reset,
1015 __tbm_queue_default_destroy,
1016 __tbm_queue_default_need_attach,
1017 NULL, /*__tbm_queue_default_enqueue*/
1018 NULL, /*__tbm_queue_default_release*/
1019 NULL, /*__tbm_queue_default_dequeue*/
1020 NULL, /*__tbm_queue_default_acquire*/
1024 tbm_surface_queue_create(int queue_size, int width,
1025 int height, int format, int flags)
1027 TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1028 TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1029 TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1030 TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1032 tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1033 sizeof(struct _tbm_surface_queue));
1034 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1036 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1038 tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1039 sizeof(tbm_queue_default));
1041 free(surface_queue);
1045 data->queue_size = queue_size;
1046 data->flags = flags;
1047 _tbm_surface_queue_init(surface_queue,
1049 width, height, format,
1050 &tbm_queue_default_impl, data);
1052 return surface_queue;
1060 } tbm_queue_sequence;
1063 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
1065 tbm_queue_sequence *data = surface_queue->impl_data;
1067 data->num_attached = 0;
1068 _queue_init(&data->dequeue_list);
1072 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
1074 tbm_queue_sequence *data = surface_queue->impl_data;
1076 data->num_attached = 0;
1077 _queue_init(&data->dequeue_list);
1081 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
1083 free(surface_queue->impl_data);
1087 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
1089 tbm_queue_sequence *data = surface_queue->impl_data;
1090 tbm_surface_h surface;
1092 if (data->queue_size == data->num_attached)
1095 if (surface_queue->alloc_cb) {
1096 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1097 TBM_RETURN_IF_FAIL(surface != NULL);
1098 tbm_surface_internal_ref(surface);
1100 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1101 surface_queue->height,
1102 surface_queue->format,
1104 TBM_RETURN_IF_FAIL(surface != NULL);
1107 _tbm_surface_queue_attach(surface_queue, surface);
1108 tbm_surface_internal_unref(surface);
1109 data->num_attached++;
1113 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
1116 tbm_queue_sequence *data = surface_queue->impl_data;
1117 queue_node *next = NULL;
1118 queue_node *tmp = NULL;
1120 node->priv_flags = 0;
1122 LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) {
1123 if (next->priv_flags)
1125 _queue_node_pop(&data->dequeue_list, next);
1126 _tbm_surface_queue_enqueue(surface_queue, next, 1);
1131 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
1134 tbm_queue_sequence *data = surface_queue->impl_data;
1135 queue_node *node = NULL;
1137 node = _tbm_surface_queue_dequeue(surface_queue);
1139 _queue_node_push_back(&data->dequeue_list, node);
1140 node->priv_flags = 1;
1146 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
1147 __tbm_queue_sequence_init,
1148 __tbm_queue_sequence_reset,
1149 __tbm_queue_sequence_destroy,
1150 __tbm_queue_sequence_need_attach,
1151 __tbm_queue_sequence_enqueue,
1152 NULL, /*__tbm_queue_sequence_release*/
1153 __tbm_queue_sequence_dequeue,
1154 NULL, /*__tbm_queue_sequence_acquire*/
1158 tbm_surface_queue_sequence_create(int queue_size, int width,
1159 int height, int format, int flags)
1161 TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1162 TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1163 TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1164 TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1166 tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1167 sizeof(struct _tbm_surface_queue));
1168 TBM_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1170 TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1172 tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
1173 sizeof(tbm_queue_sequence));
1175 free(surface_queue);
1179 data->queue_size = queue_size;
1180 data->flags = flags;
1181 _tbm_surface_queue_init(surface_queue,
1183 width, height, format,
1184 &tbm_queue_sequence_impl, data);
1186 return surface_queue;
1188 /* LCOV_EXCL_STOP */