tbm_surface_queue_enqueue/release: changed the check for success
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 #define TBM_QUEUE_DEBUG 0
42
43 #ifdef TRACE
44 #define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
45 #else
46 #define TBM_QUEUE_TRACE(fmt, ...)
47 #endif /* TRACE */
48
49 #if TBM_QUEUE_DEBUG
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
52 #else
53 #define TBM_LOCK()
54 #define TBM_UNLOCK()
55 #endif
56
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
60
61 /* check condition */
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
63         if (!(cond)) {\
64                 TBM_LOG_E("'%s' failed.\n", #cond);\
65                 _tbm_surf_queue_mutex_unlock();\
66                 return;\
67         } \
68 }
69
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
71         if (!(cond)) {\
72                 TBM_LOG_E("'%s' failed.\n", #cond);\
73                 _tbm_surf_queue_mutex_unlock();\
74                 return val;\
75         } \
76 }
77
78 typedef enum _queue_node_type {
79         QUEUE_NODE_TYPE_NONE,
80         QUEUE_NODE_TYPE_DEQUEUE,
81         QUEUE_NODE_TYPE_ENQUEUE,
82         QUEUE_NODE_TYPE_ACQUIRE,
83         QUEUE_NODE_TYPE_RELEASE
84 } Queue_Node_Type;
85
86 typedef struct {
87         struct list_head head;
88         int count;
89 } queue;
90
91 typedef struct {
92         tbm_surface_h surface;
93
94         struct list_head item_link;
95         struct list_head link;
96
97         Queue_Node_Type type;
98
99         unsigned int priv_flags;        /*for each queue*/
100
101         int delete_pending;
102 } queue_node;
103
104 typedef struct {
105         struct list_head link;
106
107         tbm_surface_queue_notify_cb cb;
108         void *data;
109 } queue_notify;
110
111 typedef struct {
112         struct list_head link;
113
114         tbm_surface_queue_trace_cb cb;
115         void *data;
116 } queue_trace;
117
118 typedef struct _tbm_surface_queue_interface {
119         void (*init)(tbm_surface_queue_h queue);
120         void (*reset)(tbm_surface_queue_h queue);
121         void (*destroy)(tbm_surface_queue_h queue);
122         void (*need_attach)(tbm_surface_queue_h queue);
123
124         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
125         void (*release)(tbm_surface_queue_h queue, queue_node *node);
126         queue_node *(*dequeue)(tbm_surface_queue_h queue);
127         queue_node *(*acquire)(tbm_surface_queue_h queue);
128         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
129 } tbm_surface_queue_interface;
130
131 struct _tbm_surface_queue {
132         int width;
133         int height;
134         int format;
135         int queue_size;
136         int num_attached;
137
138         queue free_queue;
139         queue dirty_queue;
140         struct list_head list;
141
142         struct list_head destory_noti;
143         struct list_head dequeuable_noti;
144         struct list_head dequeue_noti;
145         struct list_head can_dequeue_noti;
146         struct list_head acquirable_noti;
147         struct list_head reset_noti;
148         struct list_head trace_noti;
149
150         pthread_mutex_t lock;
151         pthread_cond_t free_cond;
152         pthread_cond_t dirty_cond;
153
154         const tbm_surface_queue_interface *impl;
155         void *impl_data;
156
157         //For external buffer allocation
158         tbm_surface_alloc_cb alloc_cb;
159         tbm_surface_free_cb free_cb;
160         void *alloc_cb_data;
161
162         struct list_head item_link; /* link of surface queue */
163
164         int modes;
165 };
166
167 /* LCOV_EXCL_START */
168
169 static bool
170 _tbm_surf_queue_mutex_init(void)
171 {
172         static bool tbm_surf_queue_mutex_init = false;
173
174         if (tbm_surf_queue_mutex_init)
175                 return true;
176
177         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
178                 TBM_LOG_E("fail: pthread_mutex_init\n");
179                 return false;
180         }
181
182         tbm_surf_queue_mutex_init = true;
183
184         return true;
185 }
186
187 static void
188 _tbm_surf_queue_mutex_lock(void)
189 {
190         if (!_tbm_surf_queue_mutex_init()) {
191                 TBM_LOG_E("fail: _tbm_surf_queue_mutex_init\n");
192                 return;
193         }
194
195         pthread_mutex_lock(&tbm_surf_queue_lock);
196 }
197
198 static void
199 _tbm_surf_queue_mutex_unlock(void)
200 {
201         pthread_mutex_unlock(&tbm_surf_queue_lock);
202 }
203
204 static void
205 _init_tbm_surf_queue_bufmgr(void)
206 {
207         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
208 }
209
210 static void
211 _deinit_tbm_surf_queue_bufmgr(void)
212 {
213         if (!g_surf_queue_bufmgr)
214                 return;
215
216         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
217         g_surf_queue_bufmgr = NULL;
218 }
219
220 static int
221 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
222 {
223         tbm_surface_queue_h old_data = NULL;
224
225         if (surface_queue == NULL) {
226                 TBM_LOG_E("error: surface_queue is NULL.\n");
227                 return 0;
228         }
229
230         if (g_surf_queue_bufmgr == NULL) {
231                 TBM_LOG_E("error: g_surf_queue_bufmgr is NULL.\n");
232                 return 0;
233         }
234
235         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
236                 TBM_LOG_E("error: surf_queue_list is empty\n");
237                 return 0;
238         }
239
240         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
241                                 item_link) {
242                 if (old_data == surface_queue) {
243                         TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
244                         return 1;
245                 }
246         }
247
248         TBM_LOG_E("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
249
250         return 0;
251 }
252
253 static queue_node *
254 _queue_node_create(void)
255 {
256         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
257
258         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
259
260         return node;
261 }
262
263 static void
264 _queue_node_delete(queue_node *node)
265 {
266         LIST_DEL(&node->item_link);
267         LIST_DEL(&node->link);
268         free(node);
269 }
270
271 static int
272 _queue_is_empty(queue *queue)
273 {
274         if (LIST_IS_EMPTY(&queue->head))
275                 return 1;
276
277         return 0;
278 }
279
280 static void
281 _queue_node_push_back(queue *queue, queue_node *node)
282 {
283         LIST_ADDTAIL(&node->item_link, &queue->head);
284         queue->count++;
285 }
286
287 static void
288 _queue_node_push_front(queue *queue, queue_node *node)
289 {
290         LIST_ADD(&node->item_link, &queue->head);
291         queue->count++;
292 }
293
294 static queue_node *
295 _queue_node_pop_front(queue *queue)
296 {
297         queue_node *node;
298
299         if (!queue->head.next) return NULL;
300         if (!queue->count) return NULL;
301
302         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
303
304         LIST_DELINIT(&node->item_link);
305         queue->count--;
306
307         return node;
308 }
309
310 static queue_node *
311 _queue_node_pop(queue *queue, queue_node *node)
312 {
313         LIST_DELINIT(&node->item_link);
314         queue->count--;
315
316         return node;
317 }
318
319 static queue_node *
320 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
321                 tbm_surface_h surface, int *out_type)
322 {
323         queue_node *node = NULL;
324
325         if (type == 0)
326                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
327         if (out_type)
328                 *out_type = 0;
329
330         if (type & FREE_QUEUE) {
331                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
332                                          item_link) {
333                         if (node->surface == surface) {
334                                 if (out_type)
335                                         *out_type = FREE_QUEUE;
336
337                                 return node;
338                         }
339                 }
340         }
341
342         if (type & DIRTY_QUEUE) {
343                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
344                                          item_link) {
345                         if (node->surface == surface) {
346                                 if (out_type)
347                                         *out_type = DIRTY_QUEUE;
348
349                                 return node;
350                         }
351                 }
352         }
353
354         if (type & NODE_LIST) {
355                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
356                         if (node->surface == surface) {
357                                 if (out_type)
358                                         *out_type = NODE_LIST;
359
360                                 return node;
361                         }
362                 }
363         }
364
365         TBM_LOG_E("fail to get the queue_node.\n");
366
367         return NULL;
368 }
369
370 static void
371 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
372 {
373         if (node->surface) {
374                 if (surface_queue->free_cb) {
375                         surface_queue->free_cb(surface_queue,
376                                         surface_queue->alloc_cb_data,
377                                         node->surface);
378                 }
379
380                 tbm_surface_destroy(node->surface);
381         }
382
383         _queue_node_delete(node);
384 }
385
386 static void
387 _queue_init(queue *queue)
388 {
389         LIST_INITHEAD(&queue->head);
390
391         queue->count = 0;
392 }
393
394 static void
395 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
396             void *data)
397 {
398         TBM_RETURN_IF_FAIL(cb != NULL);
399
400         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
401
402         TBM_RETURN_IF_FAIL(item != NULL);
403
404         LIST_INITHEAD(&item->link);
405         item->cb = cb;
406         item->data = data;
407
408         LIST_ADDTAIL(&item->link, list);
409 }
410
411 static void
412 _notify_remove(struct list_head *list,
413                tbm_surface_queue_notify_cb cb, void *data)
414 {
415         queue_notify *item = NULL, *tmp;
416
417         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
418                 if (item->cb == cb && item->data == data) {
419                         LIST_DEL(&item->link);
420                         free(item);
421                         return;
422                 }
423         }
424
425         TBM_LOG_E("Cannot find notifiy\n");
426 }
427
428 static void
429 _notify_remove_all(struct list_head *list)
430 {
431         queue_notify *item = NULL, *tmp;
432
433         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
434                 LIST_DEL(&item->link);
435                 free(item);
436         }
437 }
438
439 static void
440 _notify_emit(tbm_surface_queue_h surface_queue,
441              struct list_head *list)
442 {
443         queue_notify *item = NULL, *tmp;;
444
445         /*
446                 The item->cb is the outside function of the libtbm.
447                 The tbm user may/can remove the item of the list,
448                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
449         */
450         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
451                 item->cb(surface_queue, item->data);
452 }
453
454 static void
455 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
456             void *data)
457 {
458         TBM_RETURN_IF_FAIL(cb != NULL);
459
460         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
461
462         TBM_RETURN_IF_FAIL(item != NULL);
463
464         LIST_INITHEAD(&item->link);
465         item->cb = cb;
466         item->data = data;
467
468         LIST_ADDTAIL(&item->link, list);
469 }
470
471 static void
472 _trace_remove(struct list_head *list,
473                tbm_surface_queue_trace_cb cb, void *data)
474 {
475         queue_trace *item = NULL, *tmp;
476
477         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
478                 if (item->cb == cb && item->data == data) {
479                         LIST_DEL(&item->link);
480                         free(item);
481                         return;
482                 }
483         }
484
485         TBM_LOG_E("Cannot find notifiy\n");
486 }
487
488 static void
489 _trace_remove_all(struct list_head *list)
490 {
491         queue_trace *item = NULL, *tmp;
492
493         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
494                 LIST_DEL(&item->link);
495                 free(item);
496         }
497 }
498
499 static void
500 _trace_emit(tbm_surface_queue_h surface_queue,
501              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
502 {
503         queue_trace *item = NULL, *tmp;;
504
505         /*
506                 The item->cb is the outside function of the libtbm.
507                 The tbm user may/can remove the item of the list,
508                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
509         */
510         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
511                 item->cb(surface_queue, surface, trace, item->data);
512 }
513
514 static int
515 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
516 {
517         queue_node *node = NULL;
518         int count = 0;
519
520         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
521                 if (node->type == type)
522                         count++;
523         }
524
525         return count;
526 }
527
528 static void
529 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
530                           tbm_surface_h surface)
531 {
532         queue_node *node;
533
534         node = _queue_node_create();
535         TBM_RETURN_IF_FAIL(node != NULL);
536
537         tbm_surface_internal_ref(surface);
538         node->surface = surface;
539
540         LIST_ADDTAIL(&node->link, &surface_queue->list);
541         surface_queue->num_attached++;
542         _queue_node_push_back(&surface_queue->free_queue, node);
543 }
544
545 static void
546 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
547                           tbm_surface_h surface)
548 {
549         queue_node *node;
550         int queue_type;
551
552         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
553         if (node) {
554                 _queue_delete_node(surface_queue, node);
555                 surface_queue->num_attached--;
556         }
557 }
558
559 static void
560 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
561                            queue_node *node, int push_back)
562 {
563         if (push_back)
564                 _queue_node_push_back(&surface_queue->dirty_queue, node);
565         else
566                 _queue_node_push_front(&surface_queue->dirty_queue, node);
567 }
568
569 static queue_node *
570 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
571 {
572         queue_node *node;
573
574         node = _queue_node_pop_front(&surface_queue->free_queue);
575
576         return node;
577 }
578
579 static queue_node *
580 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
581 {
582         queue_node *node;
583
584         if (_queue_is_empty(&surface_queue->dirty_queue))
585                 return NULL;
586
587         node = _queue_node_pop_front(&surface_queue->dirty_queue);
588
589         return node;
590 }
591
592 static void
593 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
594                            queue_node *node, int push_back)
595 {
596         if (push_back)
597                 _queue_node_push_back(&surface_queue->free_queue, node);
598         else
599                 _queue_node_push_front(&surface_queue->free_queue, node);
600 }
601
602 static void
603 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
604                         int queue_size,
605                         int width, int height, int format,
606                         const tbm_surface_queue_interface *impl, void *data)
607 {
608         TBM_RETURN_IF_FAIL(surface_queue != NULL);
609         TBM_RETURN_IF_FAIL(impl != NULL);
610
611         if (!g_surf_queue_bufmgr)
612                 _init_tbm_surf_queue_bufmgr();
613
614         pthread_mutex_init(&surface_queue->lock, NULL);
615         pthread_cond_init(&surface_queue->free_cond, NULL);
616         pthread_cond_init(&surface_queue->dirty_cond, NULL);
617
618         surface_queue->queue_size = queue_size;
619         surface_queue->width = width;
620         surface_queue->height = height;
621         surface_queue->format = format;
622         surface_queue->impl = impl;
623         surface_queue->impl_data = data;
624         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
625
626         _queue_init(&surface_queue->free_queue);
627         _queue_init(&surface_queue->dirty_queue);
628         LIST_INITHEAD(&surface_queue->list);
629
630         LIST_INITHEAD(&surface_queue->destory_noti);
631         LIST_INITHEAD(&surface_queue->dequeuable_noti);
632         LIST_INITHEAD(&surface_queue->dequeue_noti);
633         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
634         LIST_INITHEAD(&surface_queue->acquirable_noti);
635         LIST_INITHEAD(&surface_queue->reset_noti);
636         LIST_INITHEAD(&surface_queue->trace_noti);
637
638         if (surface_queue->impl && surface_queue->impl->init)
639                 surface_queue->impl->init(surface_queue);
640
641         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
642 }
643
644 tbm_surface_queue_error_e
645 tbm_surface_queue_add_destroy_cb(
646         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
647         void *data)
648 {
649         _tbm_surf_queue_mutex_lock();
650
651         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
652                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
653
654         pthread_mutex_lock(&surface_queue->lock);
655
656         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
657
658         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
659
660         pthread_mutex_unlock(&surface_queue->lock);
661
662         _tbm_surf_queue_mutex_unlock();
663
664         return TBM_SURFACE_QUEUE_ERROR_NONE;
665 }
666
667 tbm_surface_queue_error_e
668 tbm_surface_queue_remove_destroy_cb(
669         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
670         void *data)
671 {
672         _tbm_surf_queue_mutex_lock();
673
674         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
675                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
676
677         pthread_mutex_lock(&surface_queue->lock);
678
679         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
680
681         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
682
683         pthread_mutex_unlock(&surface_queue->lock);
684
685         _tbm_surf_queue_mutex_unlock();
686
687         return TBM_SURFACE_QUEUE_ERROR_NONE;
688 }
689
690 tbm_surface_queue_error_e
691 tbm_surface_queue_add_dequeuable_cb(
692         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
693         void *data)
694 {
695         _tbm_surf_queue_mutex_lock();
696
697         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
698                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
699
700         pthread_mutex_lock(&surface_queue->lock);
701
702         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
703
704         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
705
706         pthread_mutex_unlock(&surface_queue->lock);
707
708         _tbm_surf_queue_mutex_unlock();
709
710         return TBM_SURFACE_QUEUE_ERROR_NONE;
711 }
712
713 tbm_surface_queue_error_e
714 tbm_surface_queue_remove_dequeuable_cb(
715         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
716         void *data)
717 {
718         _tbm_surf_queue_mutex_lock();
719
720         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
721                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
722
723         pthread_mutex_lock(&surface_queue->lock);
724
725         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
726
727         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
728
729         pthread_mutex_unlock(&surface_queue->lock);
730
731         _tbm_surf_queue_mutex_unlock();
732
733         return TBM_SURFACE_QUEUE_ERROR_NONE;
734 }
735
736 tbm_surface_queue_error_e
737 tbm_surface_queue_add_dequeue_cb(
738         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
739         void *data)
740 {
741         _tbm_surf_queue_mutex_lock();
742
743         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
744                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
745
746         pthread_mutex_lock(&surface_queue->lock);
747
748         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
749
750         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
751
752         pthread_mutex_unlock(&surface_queue->lock);
753
754         _tbm_surf_queue_mutex_unlock();
755
756         return TBM_SURFACE_QUEUE_ERROR_NONE;
757 }
758
759 tbm_surface_queue_error_e
760 tbm_surface_queue_remove_dequeue_cb(
761         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
762         void *data)
763 {
764         _tbm_surf_queue_mutex_lock();
765
766         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
767                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
768
769         pthread_mutex_lock(&surface_queue->lock);
770
771         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
772
773         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
774
775         pthread_mutex_unlock(&surface_queue->lock);
776
777         _tbm_surf_queue_mutex_unlock();
778
779         return TBM_SURFACE_QUEUE_ERROR_NONE;
780 }
781
782 tbm_surface_queue_error_e
783 tbm_surface_queue_add_can_dequeue_cb(
784         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
785         void *data)
786 {
787         _tbm_surf_queue_mutex_lock();
788
789         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
790                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
791
792         pthread_mutex_lock(&surface_queue->lock);
793
794         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
795
796         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
797
798         pthread_mutex_unlock(&surface_queue->lock);
799
800         _tbm_surf_queue_mutex_unlock();
801
802         return TBM_SURFACE_QUEUE_ERROR_NONE;
803 }
804
805 tbm_surface_queue_error_e
806 tbm_surface_queue_remove_can_dequeue_cb(
807         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
808         void *data)
809 {
810         _tbm_surf_queue_mutex_lock();
811
812         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
813                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
814
815         pthread_mutex_lock(&surface_queue->lock);
816
817         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
818
819         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
820
821         pthread_mutex_unlock(&surface_queue->lock);
822
823         _tbm_surf_queue_mutex_unlock();
824
825         return TBM_SURFACE_QUEUE_ERROR_NONE;
826 }
827
828 tbm_surface_queue_error_e
829 tbm_surface_queue_add_acquirable_cb(
830         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
831         void *data)
832 {
833         _tbm_surf_queue_mutex_lock();
834
835         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
836                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
837
838         pthread_mutex_lock(&surface_queue->lock);
839
840         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
841
842         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
843
844         pthread_mutex_unlock(&surface_queue->lock);
845
846         _tbm_surf_queue_mutex_unlock();
847
848         return TBM_SURFACE_QUEUE_ERROR_NONE;
849 }
850
851 tbm_surface_queue_error_e
852 tbm_surface_queue_remove_acquirable_cb(
853         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
854         void *data)
855 {
856         _tbm_surf_queue_mutex_lock();
857
858         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
859                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
860
861         pthread_mutex_lock(&surface_queue->lock);
862
863         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
864
865         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
866
867         pthread_mutex_unlock(&surface_queue->lock);
868
869         _tbm_surf_queue_mutex_unlock();
870
871         return TBM_SURFACE_QUEUE_ERROR_NONE;
872 }
873
874 tbm_surface_queue_error_e
875 tbm_surface_queue_add_trace_cb(
876         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
877         void *data)
878 {
879         _tbm_surf_queue_mutex_lock();
880
881         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
882                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
883
884         pthread_mutex_lock(&surface_queue->lock);
885
886         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
887
888         _trace_add(&surface_queue->trace_noti, trace_cb, data);
889
890         pthread_mutex_unlock(&surface_queue->lock);
891
892         _tbm_surf_queue_mutex_unlock();
893
894         return TBM_SURFACE_QUEUE_ERROR_NONE;
895 }
896
897 tbm_surface_queue_error_e
898 tbm_surface_queue_remove_trace_cb(
899         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
900         void *data)
901 {
902         _tbm_surf_queue_mutex_lock();
903
904         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
905                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
906
907         pthread_mutex_lock(&surface_queue->lock);
908
909         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
910
911         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
912
913         pthread_mutex_unlock(&surface_queue->lock);
914
915         _tbm_surf_queue_mutex_unlock();
916
917         return TBM_SURFACE_QUEUE_ERROR_NONE;
918 }
919
920 tbm_surface_queue_error_e
921 tbm_surface_queue_set_alloc_cb(
922         tbm_surface_queue_h surface_queue,
923         tbm_surface_alloc_cb alloc_cb,
924         tbm_surface_free_cb free_cb,
925         void *data)
926 {
927         _tbm_surf_queue_mutex_lock();
928
929         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
930                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
931
932         pthread_mutex_lock(&surface_queue->lock);
933
934         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
935
936         surface_queue->alloc_cb = alloc_cb;
937         surface_queue->free_cb = free_cb;
938         surface_queue->alloc_cb_data = data;
939
940         pthread_mutex_unlock(&surface_queue->lock);
941
942         _tbm_surf_queue_mutex_unlock();
943
944         return TBM_SURFACE_QUEUE_ERROR_NONE;
945 }
946
947 int
948 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
949 {
950         int width;
951
952         _tbm_surf_queue_mutex_lock();
953
954         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
955
956         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
957
958         width = surface_queue->width;
959
960         _tbm_surf_queue_mutex_unlock();
961
962         return width;
963 }
964
965 int
966 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
967 {
968         int height;
969
970         _tbm_surf_queue_mutex_lock();
971
972         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
973
974         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
975
976         height = surface_queue->height;
977
978         _tbm_surf_queue_mutex_unlock();
979
980         return height;
981 }
982
983 int
984 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
985 {
986         int format;
987
988         _tbm_surf_queue_mutex_lock();
989
990         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
991
992         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
993
994         format = surface_queue->format;
995
996         _tbm_surf_queue_mutex_unlock();
997
998         return format;
999 }
1000
1001 int
1002 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1003 {
1004         int queue_size;
1005
1006         _tbm_surf_queue_mutex_lock();
1007
1008         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1009
1010         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1011
1012         queue_size = surface_queue->queue_size;
1013
1014         _tbm_surf_queue_mutex_unlock();
1015
1016         return queue_size;
1017 }
1018
1019 tbm_surface_queue_error_e
1020 tbm_surface_queue_add_reset_cb(
1021         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1022         void *data)
1023 {
1024         _tbm_surf_queue_mutex_lock();
1025
1026         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1027                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1028
1029         pthread_mutex_lock(&surface_queue->lock);
1030
1031         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1032
1033         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1034
1035         pthread_mutex_unlock(&surface_queue->lock);
1036
1037         _tbm_surf_queue_mutex_unlock();
1038
1039         return TBM_SURFACE_QUEUE_ERROR_NONE;
1040 }
1041
1042 tbm_surface_queue_error_e
1043 tbm_surface_queue_remove_reset_cb(
1044         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1045         void *data)
1046 {
1047         _tbm_surf_queue_mutex_lock();
1048
1049         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1050                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1051
1052         pthread_mutex_lock(&surface_queue->lock);
1053
1054         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1055
1056         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1057
1058         pthread_mutex_unlock(&surface_queue->lock);
1059
1060         _tbm_surf_queue_mutex_unlock();
1061
1062         return TBM_SURFACE_QUEUE_ERROR_NONE;
1063 }
1064
1065 tbm_surface_queue_error_e
1066 tbm_surface_queue_enqueue(tbm_surface_queue_h
1067                           surface_queue, tbm_surface_h surface)
1068 {
1069         queue_node *node;
1070         int queue_type;
1071
1072         _tbm_surf_queue_mutex_lock();
1073
1074         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1075                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1076         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1077                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1078
1079         if (b_dump_queue)
1080                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1081
1082         pthread_mutex_lock(&surface_queue->lock);
1083
1084         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1085
1086         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1087         if (node == NULL || queue_type != NODE_LIST) {
1088                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1089                         node, queue_type);
1090                 pthread_mutex_unlock(&surface_queue->lock);
1091
1092                 _tbm_surf_queue_mutex_unlock();
1093
1094                 if (!node)
1095                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1096                 else
1097                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1098         }
1099
1100         if (surface_queue->impl && surface_queue->impl->enqueue)
1101                 surface_queue->impl->enqueue(surface_queue, node);
1102         else
1103                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1104
1105         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1106                 TBM_LOG_E("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1107                 pthread_mutex_unlock(&surface_queue->lock);
1108
1109                 _tbm_surf_queue_mutex_unlock();
1110                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1111         }
1112
1113         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1114
1115         pthread_mutex_unlock(&surface_queue->lock);
1116         pthread_cond_signal(&surface_queue->dirty_cond);
1117
1118         _tbm_surf_queue_mutex_unlock();
1119
1120         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1121
1122         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1123
1124         return TBM_SURFACE_QUEUE_ERROR_NONE;
1125 }
1126
1127 tbm_surface_queue_error_e
1128 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1129                           surface_queue, tbm_surface_h surface)
1130 {
1131         queue_node *node;
1132         int queue_type;
1133
1134         _tbm_surf_queue_mutex_lock();
1135
1136         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1137                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1138         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1139                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1140
1141         pthread_mutex_lock(&surface_queue->lock);
1142
1143         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1144
1145         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1146         if (node == NULL || queue_type != NODE_LIST) {
1147                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1148                         node, queue_type);
1149                 pthread_mutex_unlock(&surface_queue->lock);
1150
1151                 _tbm_surf_queue_mutex_unlock();
1152                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1153         }
1154
1155         if (node->delete_pending) {
1156                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1157
1158                 _queue_delete_node(surface_queue, node);
1159
1160                 pthread_mutex_unlock(&surface_queue->lock);
1161
1162                 _tbm_surf_queue_mutex_unlock();
1163
1164                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1165
1166                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1167         }
1168
1169         if (surface_queue->queue_size < surface_queue->num_attached) {
1170                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1171
1172                 if (surface_queue->impl && surface_queue->impl->need_detach)
1173                         surface_queue->impl->need_detach(surface_queue, node);
1174                 else
1175                         _tbm_surface_queue_detach(surface_queue, surface);
1176
1177                 pthread_mutex_unlock(&surface_queue->lock);
1178
1179                 _tbm_surf_queue_mutex_unlock();
1180
1181                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1182
1183                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1184         }
1185
1186         if (surface_queue->impl && surface_queue->impl->release)
1187                 surface_queue->impl->release(surface_queue, node);
1188         else
1189                 _tbm_surface_queue_release(surface_queue, node, 1);
1190
1191         if (_queue_is_empty(&surface_queue->free_queue)) {
1192                 pthread_mutex_unlock(&surface_queue->lock);
1193
1194                 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1195                 _tbm_surf_queue_mutex_unlock();
1196                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1197         }
1198
1199         node->type = QUEUE_NODE_TYPE_RELEASE;
1200
1201         pthread_mutex_unlock(&surface_queue->lock);
1202         pthread_cond_signal(&surface_queue->free_cond);
1203
1204         _tbm_surf_queue_mutex_unlock();
1205
1206         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1207
1208         return TBM_SURFACE_QUEUE_ERROR_NONE;
1209 }
1210
1211 tbm_surface_queue_error_e
1212 tbm_surface_queue_dequeue(tbm_surface_queue_h
1213                           surface_queue, tbm_surface_h *surface)
1214 {
1215         queue_node *node;
1216
1217         _tbm_surf_queue_mutex_lock();
1218
1219         *surface = NULL;
1220
1221         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1222                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1223         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1224                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1225
1226         pthread_mutex_lock(&surface_queue->lock);
1227
1228         if (_queue_is_empty(&surface_queue->free_queue)) {
1229                 if (surface_queue->impl && surface_queue->impl->need_attach)
1230                         surface_queue->impl->need_attach(surface_queue);
1231
1232                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1233                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1234                         pthread_mutex_unlock(&surface_queue->lock);
1235                         _tbm_surf_queue_mutex_unlock();
1236                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1237                 }
1238         }
1239
1240         if (surface_queue->impl && surface_queue->impl->dequeue)
1241                 node = surface_queue->impl->dequeue(surface_queue);
1242         else
1243                 node = _tbm_surface_queue_dequeue(surface_queue);
1244
1245         if (node == NULL || node->surface == NULL) {
1246                 TBM_LOG_E("_queue_node_pop_front failed\n");
1247                 pthread_mutex_unlock(&surface_queue->lock);
1248
1249                 _tbm_surf_queue_mutex_unlock();
1250                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1251         }
1252
1253         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1254         *surface = node->surface;
1255
1256         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1257
1258         pthread_mutex_unlock(&surface_queue->lock);
1259
1260         _tbm_surf_queue_mutex_unlock();
1261
1262         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1263
1264         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1265
1266         return TBM_SURFACE_QUEUE_ERROR_NONE;
1267 }
1268
1269 int
1270 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1271 {
1272         _tbm_surf_queue_mutex_lock();
1273
1274         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1275
1276         _tbm_surf_queue_mutex_unlock();
1277
1278         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1279
1280         _tbm_surf_queue_mutex_lock();
1281
1282         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1283
1284         pthread_mutex_lock(&surface_queue->lock);
1285
1286         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1287
1288         if (_queue_is_empty(&surface_queue->free_queue)) {
1289                 if (surface_queue->impl && surface_queue->impl->need_attach)
1290                         surface_queue->impl->need_attach(surface_queue);
1291
1292                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1293                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1294                         pthread_mutex_unlock(&surface_queue->lock);
1295                         _tbm_surf_queue_mutex_unlock();
1296                         return 0;
1297                 }
1298         }
1299
1300         if (!_queue_is_empty(&surface_queue->free_queue)) {
1301                 pthread_mutex_unlock(&surface_queue->lock);
1302                 _tbm_surf_queue_mutex_unlock();
1303                 return 1;
1304         }
1305
1306         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1307                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1308                 _tbm_surf_queue_mutex_unlock();
1309                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1310                 _tbm_surf_queue_mutex_lock();
1311
1312                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1313                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1314                         pthread_mutex_unlock(&surface_queue->lock);
1315                         _tbm_surf_queue_mutex_unlock();
1316                         return 0;
1317                 }
1318
1319                 pthread_mutex_unlock(&surface_queue->lock);
1320                 _tbm_surf_queue_mutex_unlock();
1321                 return 1;
1322         }
1323
1324         pthread_mutex_unlock(&surface_queue->lock);
1325         _tbm_surf_queue_mutex_unlock();
1326         return 0;
1327 }
1328
1329 tbm_surface_queue_error_e
1330 tbm_surface_queue_release(tbm_surface_queue_h
1331                           surface_queue, tbm_surface_h surface)
1332 {
1333         queue_node *node;
1334         int queue_type;
1335
1336         _tbm_surf_queue_mutex_lock();
1337
1338         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1339                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1340         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1341                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1342
1343         pthread_mutex_lock(&surface_queue->lock);
1344
1345         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1346
1347         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1348         if (node == NULL || queue_type != NODE_LIST) {
1349                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1350                         node, queue_type);
1351                 pthread_mutex_unlock(&surface_queue->lock);
1352
1353                 _tbm_surf_queue_mutex_unlock();
1354
1355                 if (!node)
1356                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1357                 else
1358                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1359         }
1360
1361         if (node->delete_pending) {
1362                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1363
1364                 _queue_delete_node(surface_queue, node);
1365
1366                 pthread_mutex_unlock(&surface_queue->lock);
1367
1368                 _tbm_surf_queue_mutex_unlock();
1369
1370                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1371
1372                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1373         }
1374
1375         if (surface_queue->queue_size < surface_queue->num_attached) {
1376                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1377
1378                 if (surface_queue->impl && surface_queue->impl->need_detach)
1379                         surface_queue->impl->need_detach(surface_queue, node);
1380                 else
1381                         _tbm_surface_queue_detach(surface_queue, surface);
1382
1383                 pthread_mutex_unlock(&surface_queue->lock);
1384
1385                 _tbm_surf_queue_mutex_unlock();
1386
1387                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1388
1389                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1390         }
1391
1392         if (surface_queue->impl && surface_queue->impl->release)
1393                 surface_queue->impl->release(surface_queue, node);
1394         else
1395                 _tbm_surface_queue_release(surface_queue, node, 1);
1396
1397         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1398                 TBM_LOG_E("release surface(%p) but surface isn't present in the free_queue\n", surface);
1399                 pthread_mutex_unlock(&surface_queue->lock);
1400
1401                 _tbm_surf_queue_mutex_unlock();
1402                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1403         }
1404
1405         node->type = QUEUE_NODE_TYPE_RELEASE;
1406
1407         pthread_mutex_unlock(&surface_queue->lock);
1408         pthread_cond_signal(&surface_queue->free_cond);
1409
1410         _tbm_surf_queue_mutex_unlock();
1411
1412         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1413
1414         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1415
1416         return TBM_SURFACE_QUEUE_ERROR_NONE;
1417 }
1418
1419 tbm_surface_queue_error_e
1420 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1421                         surface_queue, tbm_surface_h surface)
1422 {
1423         queue_node *node;
1424         int queue_type;
1425
1426         _tbm_surf_queue_mutex_lock();
1427
1428         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1429                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1430         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1431                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1432
1433         pthread_mutex_lock(&surface_queue->lock);
1434
1435         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1436
1437         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1438         if (node == NULL || queue_type != NODE_LIST) {
1439                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1440                         node, queue_type);
1441                 pthread_mutex_unlock(&surface_queue->lock);
1442
1443                 _tbm_surf_queue_mutex_unlock();
1444                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1445         }
1446
1447         if (surface_queue->impl && surface_queue->impl->enqueue)
1448                 surface_queue->impl->enqueue(surface_queue, node);
1449         else
1450                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1451
1452         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1453                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1454                 pthread_mutex_unlock(&surface_queue->lock);
1455
1456                 _tbm_surf_queue_mutex_unlock();
1457                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1458         }
1459
1460         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1461
1462         pthread_mutex_unlock(&surface_queue->lock);
1463         pthread_cond_signal(&surface_queue->dirty_cond);
1464
1465         _tbm_surf_queue_mutex_unlock();
1466
1467         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1468
1469         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1470
1471         return TBM_SURFACE_QUEUE_ERROR_NONE;
1472 }
1473
1474 tbm_surface_queue_error_e
1475 tbm_surface_queue_acquire(tbm_surface_queue_h
1476                           surface_queue, tbm_surface_h *surface)
1477 {
1478         queue_node *node;
1479
1480         _tbm_surf_queue_mutex_lock();
1481
1482         *surface = NULL;
1483
1484         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1485                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1486         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1487                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1488
1489         pthread_mutex_lock(&surface_queue->lock);
1490
1491         if (surface_queue->impl && surface_queue->impl->acquire)
1492                 node = surface_queue->impl->acquire(surface_queue);
1493         else
1494                 node = _tbm_surface_queue_acquire(surface_queue);
1495
1496         if (node == NULL || node->surface == NULL) {
1497                 TBM_LOG_E("_queue_node_pop_front failed\n");
1498                 pthread_mutex_unlock(&surface_queue->lock);
1499
1500                 _tbm_surf_queue_mutex_unlock();
1501                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1502         }
1503
1504         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1505
1506         *surface = node->surface;
1507
1508         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1509
1510         pthread_mutex_unlock(&surface_queue->lock);
1511
1512         _tbm_surf_queue_mutex_unlock();
1513
1514         if (b_dump_queue)
1515                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1516
1517         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1518
1519         return TBM_SURFACE_QUEUE_ERROR_NONE;
1520 }
1521
1522 int
1523 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1524 {
1525         _tbm_surf_queue_mutex_lock();
1526
1527         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1528
1529         pthread_mutex_lock(&surface_queue->lock);
1530
1531         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1532
1533         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1534                 pthread_mutex_unlock(&surface_queue->lock);
1535                 _tbm_surf_queue_mutex_unlock();
1536                 return 1;
1537         }
1538
1539         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1540                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1541                 _tbm_surf_queue_mutex_unlock();
1542                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1543                 _tbm_surf_queue_mutex_lock();
1544
1545                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1546                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1547                         pthread_mutex_unlock(&surface_queue->lock);
1548                         _tbm_surf_queue_mutex_unlock();
1549                         return 0;
1550                 }
1551
1552                 pthread_mutex_unlock(&surface_queue->lock);
1553                 _tbm_surf_queue_mutex_unlock();
1554                 return 1;
1555         }
1556
1557         pthread_mutex_unlock(&surface_queue->lock);
1558         _tbm_surf_queue_mutex_unlock();
1559         return 0;
1560 }
1561
1562 void
1563 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1564 {
1565         queue_node *node = NULL, *tmp;
1566
1567         _tbm_surf_queue_mutex_lock();
1568
1569         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1570
1571         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1572
1573         LIST_DEL(&surface_queue->item_link);
1574
1575         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1576                 _queue_delete_node(surface_queue, node);
1577
1578         if (surface_queue->impl && surface_queue->impl->destroy)
1579                 surface_queue->impl->destroy(surface_queue);
1580
1581         _notify_emit(surface_queue, &surface_queue->destory_noti);
1582
1583         _notify_remove_all(&surface_queue->destory_noti);
1584         _notify_remove_all(&surface_queue->dequeuable_noti);
1585         _notify_remove_all(&surface_queue->dequeue_noti);
1586         _notify_remove_all(&surface_queue->can_dequeue_noti);
1587         _notify_remove_all(&surface_queue->acquirable_noti);
1588         _notify_remove_all(&surface_queue->reset_noti);
1589         _trace_remove_all(&surface_queue->trace_noti);
1590
1591         pthread_mutex_destroy(&surface_queue->lock);
1592
1593         free(surface_queue);
1594
1595         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1596                 _deinit_tbm_surf_queue_bufmgr();
1597
1598         _tbm_surf_queue_mutex_unlock();
1599 }
1600
1601 tbm_surface_queue_error_e
1602 tbm_surface_queue_reset(tbm_surface_queue_h
1603                         surface_queue, int width, int height, int format)
1604 {
1605         queue_node *node = NULL, *tmp;
1606
1607         _tbm_surf_queue_mutex_lock();
1608
1609         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1610                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1611
1612         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1613
1614         if (width == surface_queue->width && height == surface_queue->height &&
1615                 format == surface_queue->format) {
1616                 _tbm_surf_queue_mutex_unlock();
1617                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1618         }
1619
1620         pthread_mutex_lock(&surface_queue->lock);
1621
1622         surface_queue->width = width;
1623         surface_queue->height = height;
1624         surface_queue->format = format;
1625
1626         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1627                 /* Destory surface and Push to free_queue */
1628                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1629                         _queue_delete_node(surface_queue, node);
1630
1631                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1632                         node->delete_pending = 1;
1633         } else {
1634                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1635                         _queue_delete_node(surface_queue, node);
1636
1637                 _queue_init(&surface_queue->dirty_queue);
1638                 LIST_INITHEAD(&surface_queue->list);
1639         }
1640
1641         /* Reset queue */
1642         _queue_init(&surface_queue->free_queue);
1643
1644         surface_queue->num_attached = 0;
1645
1646         if (surface_queue->impl && surface_queue->impl->reset)
1647                 surface_queue->impl->reset(surface_queue);
1648
1649         pthread_mutex_unlock(&surface_queue->lock);
1650         pthread_cond_signal(&surface_queue->free_cond);
1651
1652         _tbm_surf_queue_mutex_unlock();
1653
1654         _notify_emit(surface_queue, &surface_queue->reset_noti);
1655
1656         return TBM_SURFACE_QUEUE_ERROR_NONE;
1657 }
1658
1659 tbm_surface_queue_error_e
1660 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1661 {
1662         _tbm_surf_queue_mutex_lock();
1663
1664         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1665                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1666
1667         _tbm_surf_queue_mutex_unlock();
1668
1669         _notify_emit(surface_queue, &surface_queue->reset_noti);
1670
1671         return TBM_SURFACE_QUEUE_ERROR_NONE;
1672 }
1673
1674 tbm_surface_queue_error_e
1675 tbm_surface_queue_set_size(tbm_surface_queue_h
1676                         surface_queue, int queue_size, int flush)
1677 {
1678         queue_node *node = NULL, *tmp;
1679
1680         _tbm_surf_queue_mutex_lock();
1681
1682         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1683                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1684         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1685                                         TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1686
1687         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1688
1689         if ((surface_queue->queue_size == queue_size) && !flush) {
1690                 _tbm_surf_queue_mutex_unlock();
1691                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1692         }
1693
1694         pthread_mutex_lock(&surface_queue->lock);
1695
1696         if (flush) {
1697                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1698                         /* Destory surface and Push to free_queue */
1699                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1700                                 _queue_delete_node(surface_queue, node);
1701
1702                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1703                                 node->delete_pending = 1;
1704                 } else {
1705                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1706                                 _queue_delete_node(surface_queue, node);
1707
1708                         _queue_init(&surface_queue->dirty_queue);
1709                         LIST_INITHEAD(&surface_queue->list);
1710                 }
1711
1712                 /* Reset queue */
1713                 _queue_init(&surface_queue->free_queue);
1714
1715                 surface_queue->num_attached = 0;
1716                 surface_queue->queue_size = queue_size;
1717
1718                 if (surface_queue->impl && surface_queue->impl->reset)
1719                         surface_queue->impl->reset(surface_queue);
1720
1721                 pthread_mutex_unlock(&surface_queue->lock);
1722                 pthread_cond_signal(&surface_queue->free_cond);
1723
1724                 _tbm_surf_queue_mutex_unlock();
1725
1726                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1727
1728                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1729         } else {
1730                 if (surface_queue->queue_size > queue_size) {
1731                         int need_del = surface_queue->queue_size - queue_size;
1732
1733                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1734                                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1735
1736                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1737                                         surface_queue->impl->need_detach(surface_queue, node);
1738                                 else
1739                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1740
1741                                 need_del--;
1742                                 if (need_del == 0)
1743                                         break;
1744                         }
1745                 }
1746
1747                 surface_queue->queue_size = queue_size;
1748
1749                 pthread_mutex_unlock(&surface_queue->lock);
1750
1751                 _tbm_surf_queue_mutex_unlock();
1752
1753                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1754         }
1755 }
1756
1757 tbm_surface_queue_error_e
1758 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1759 {
1760         queue_node *node = NULL;
1761
1762         _tbm_surf_queue_mutex_lock();
1763
1764         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1765                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1766
1767         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1768
1769         if (surface_queue->num_attached == 0) {
1770                 _tbm_surf_queue_mutex_unlock();
1771                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1772         }
1773
1774         pthread_mutex_lock(&surface_queue->lock);
1775
1776         /* Destory surface in free_queue */
1777         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1778                 if (surface_queue->impl && surface_queue->impl->need_detach)
1779                         surface_queue->impl->need_detach(surface_queue, node);
1780                 else
1781                         _tbm_surface_queue_detach(surface_queue, node->surface);
1782         }
1783
1784         /* Reset queue */
1785         _queue_init(&surface_queue->free_queue);
1786
1787         pthread_mutex_unlock(&surface_queue->lock);
1788         _tbm_surf_queue_mutex_unlock();
1789
1790         return TBM_SURFACE_QUEUE_ERROR_NONE;
1791 }
1792
1793 tbm_surface_queue_error_e
1794 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1795 {
1796         queue_node *node = NULL, *tmp;
1797
1798         _tbm_surf_queue_mutex_lock();
1799
1800         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1801                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1802
1803         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1804
1805         if (surface_queue->num_attached == 0) {
1806                 _tbm_surf_queue_mutex_unlock();
1807                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1808         }
1809
1810         pthread_mutex_lock(&surface_queue->lock);
1811
1812         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1813                 /* Destory surface and Push to free_queue */
1814                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1815                         _queue_delete_node(surface_queue, node);
1816
1817                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1818                         node->delete_pending = 1;
1819         } else {
1820                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1821                         _queue_delete_node(surface_queue, node);
1822
1823                 _queue_init(&surface_queue->dirty_queue);
1824                 LIST_INITHEAD(&surface_queue->list);
1825         }
1826
1827         /* Reset queue */
1828         _queue_init(&surface_queue->free_queue);
1829
1830         surface_queue->num_attached = 0;
1831
1832         if (surface_queue->impl && surface_queue->impl->reset)
1833                 surface_queue->impl->reset(surface_queue);
1834
1835         pthread_mutex_unlock(&surface_queue->lock);
1836         pthread_cond_signal(&surface_queue->free_cond);
1837
1838         _tbm_surf_queue_mutex_unlock();
1839
1840         _notify_emit(surface_queue, &surface_queue->reset_noti);
1841
1842         return TBM_SURFACE_QUEUE_ERROR_NONE;
1843 }
1844
1845 tbm_surface_queue_error_e
1846 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1847                         tbm_surface_h *surfaces, int *num)
1848 {
1849         queue_node *node = NULL;
1850
1851         _tbm_surf_queue_mutex_lock();
1852
1853         *num = 0;
1854
1855         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1856                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1857         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1858                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1859
1860         pthread_mutex_lock(&surface_queue->lock);
1861
1862         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1863                 if (surfaces)
1864                         surfaces[*num] = node->surface;
1865
1866                 *num = *num + 1;
1867         }
1868
1869         pthread_mutex_unlock(&surface_queue->lock);
1870
1871         _tbm_surf_queue_mutex_unlock();
1872
1873         return TBM_SURFACE_QUEUE_ERROR_NONE;
1874 }
1875
1876 tbm_surface_queue_error_e
1877 tbm_surface_queue_get_trace_surface_num(
1878                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1879 {
1880         _tbm_surf_queue_mutex_lock();
1881
1882         *num = 0;
1883
1884         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1885                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1886         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1887                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1888
1889         pthread_mutex_lock(&surface_queue->lock);
1890
1891         switch (trace) {
1892         case TBM_SURFACE_QUEUE_TRACE_NONE:
1893                 *num = 0;
1894                 break;
1895         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1896                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1897                 break;
1898         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1899                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1900                 break;
1901         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1902                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1903                 break;
1904         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1905                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1906                 break;
1907         default:
1908                 break;
1909         }
1910
1911         pthread_mutex_unlock(&surface_queue->lock);
1912
1913         _tbm_surf_queue_mutex_unlock();
1914
1915         return TBM_SURFACE_QUEUE_ERROR_NONE;
1916 }
1917
1918 typedef struct {
1919         int flags;
1920 } tbm_queue_default;
1921
1922 static void
1923 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1924 {
1925         free(surface_queue->impl_data);
1926 }
1927
1928 static void
1929 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1930 {
1931         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1932         tbm_surface_h surface;
1933
1934         if (surface_queue->queue_size == surface_queue->num_attached)
1935                 return;
1936
1937         if (surface_queue->alloc_cb) {
1938                 pthread_mutex_unlock(&surface_queue->lock);
1939                 _tbm_surf_queue_mutex_unlock();
1940                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1941                 _tbm_surf_queue_mutex_lock();
1942                 pthread_mutex_lock(&surface_queue->lock);
1943
1944                 /* silent return */
1945                 if (!surface)
1946                         return;
1947
1948                 tbm_surface_internal_ref(surface);
1949         } else {
1950                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1951                                 surface_queue->height,
1952                                 surface_queue->format,
1953                                 data->flags);
1954                 TBM_RETURN_IF_FAIL(surface != NULL);
1955         }
1956
1957         _tbm_surface_queue_attach(surface_queue, surface);
1958         tbm_surface_internal_unref(surface);
1959 }
1960
1961 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1962         NULL,                           /*__tbm_queue_default_init*/
1963         NULL,                           /*__tbm_queue_default_reset*/
1964         __tbm_queue_default_destroy,
1965         __tbm_queue_default_need_attach,
1966         NULL,                           /*__tbm_queue_default_enqueue*/
1967         NULL,                           /*__tbm_queue_default_release*/
1968         NULL,                           /*__tbm_queue_default_dequeue*/
1969         NULL,                           /*__tbm_queue_default_acquire*/
1970         NULL,                           /*__tbm_queue_default_need_detach*/
1971 };
1972
1973 tbm_surface_queue_h
1974 tbm_surface_queue_create(int queue_size, int width,
1975                          int height, int format, int flags)
1976 {
1977         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1978         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1979         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1980         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1981
1982         _tbm_surf_queue_mutex_lock();
1983
1984         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1985                                             sizeof(struct _tbm_surface_queue));
1986         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1987
1988         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1989
1990         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1991                                   sizeof(tbm_queue_default));
1992         if (data == NULL) {
1993                 TBM_LOG_E("cannot allocate the tbm_queue_default.\n");
1994                 free(surface_queue);
1995                 _tbm_surf_queue_mutex_unlock();
1996                 return NULL;
1997         }
1998
1999         data->flags = flags;
2000         _tbm_surface_queue_init(surface_queue,
2001                                 queue_size,
2002                                 width, height, format,
2003                                 &tbm_queue_default_impl, data);
2004
2005         _tbm_surf_queue_mutex_unlock();
2006
2007         return surface_queue;
2008 }
2009
2010 typedef struct {
2011         int flags;
2012         queue dequeue_list;
2013 } tbm_queue_sequence;
2014
2015 static void
2016 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2017 {
2018         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2019
2020         _queue_init(&data->dequeue_list);
2021 }
2022
2023 static void
2024 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2025 {
2026         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2027
2028         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2029                 return;
2030
2031         _queue_init(&data->dequeue_list);
2032 }
2033
2034 static void
2035 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2036 {
2037         free(surface_queue->impl_data);
2038 }
2039
2040 static void
2041 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2042 {
2043         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2044         tbm_surface_h surface;
2045
2046         if (surface_queue->queue_size == surface_queue->num_attached)
2047                 return;
2048
2049         if (surface_queue->alloc_cb) {
2050                 pthread_mutex_unlock(&surface_queue->lock);
2051                 _tbm_surf_queue_mutex_unlock();
2052                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2053                 _tbm_surf_queue_mutex_lock();
2054                 pthread_mutex_lock(&surface_queue->lock);
2055
2056                 /* silent return */
2057                 if (!surface)
2058                         return;
2059
2060                 tbm_surface_internal_ref(surface);
2061         } else {
2062                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2063                                 surface_queue->height,
2064                                 surface_queue->format,
2065                                 data->flags);
2066                 TBM_RETURN_IF_FAIL(surface != NULL);
2067         }
2068
2069         _tbm_surface_queue_attach(surface_queue, surface);
2070         tbm_surface_internal_unref(surface);
2071 }
2072
2073 static void
2074 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2075                              queue_node *node)
2076 {
2077         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2078         queue_node *next = NULL, *tmp;
2079
2080         node->priv_flags = 0;
2081
2082         LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) {
2083                 if (next->priv_flags)
2084                         break;
2085                 _queue_node_pop(&data->dequeue_list, next);
2086                 _tbm_surface_queue_enqueue(surface_queue, next, 1);
2087         }
2088 }
2089
2090 static void
2091 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2092                                 queue_node *node)
2093 {
2094         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2095
2096         if (node->priv_flags) {
2097                 node->priv_flags = 0;
2098                 _queue_node_pop(&data->dequeue_list, node);
2099         }
2100
2101         _tbm_surface_queue_release(surface_queue, node, 1);
2102 }
2103
2104 static queue_node *
2105 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2106                              surface_queue)
2107 {
2108         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2109         queue_node *node;
2110
2111         node = _tbm_surface_queue_dequeue(surface_queue);
2112         if (node) {
2113                 _queue_node_push_back(&data->dequeue_list, node);
2114                 node->priv_flags = 1;
2115         }
2116
2117         return node;
2118 }
2119
2120 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2121         __tbm_queue_sequence_init,
2122         __tbm_queue_sequence_reset,
2123         __tbm_queue_sequence_destroy,
2124         __tbm_queue_sequence_need_attach,
2125         __tbm_queue_sequence_enqueue,
2126         __tbm_queue_sequence_release,
2127         __tbm_queue_sequence_dequeue,
2128         NULL,                                   /*__tbm_queue_sequence_acquire*/
2129         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2130 };
2131
2132 tbm_surface_queue_h
2133 tbm_surface_queue_sequence_create(int queue_size, int width,
2134                                   int height, int format, int flags)
2135 {
2136         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2137         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2138         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2139         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2140
2141         _tbm_surf_queue_mutex_lock();
2142
2143         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2144                                             sizeof(struct _tbm_surface_queue));
2145         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2146
2147         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2148
2149         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2150                                    sizeof(tbm_queue_sequence));
2151         if (data == NULL) {
2152                 TBM_LOG_E("cannot allocate the tbm_queue_sequence.\n");
2153                 free(surface_queue);
2154                 _tbm_surf_queue_mutex_unlock();
2155                 return NULL;
2156         }
2157
2158         data->flags = flags;
2159         _tbm_surface_queue_init(surface_queue,
2160                                 queue_size,
2161                                 width, height, format,
2162                                 &tbm_queue_sequence_impl, data);
2163
2164         _tbm_surf_queue_mutex_unlock();
2165
2166         return surface_queue;
2167 }
2168
2169 tbm_surface_queue_error_e
2170 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2171                                   int modes)
2172 {
2173         _tbm_surf_queue_mutex_lock();
2174
2175         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2176                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2177
2178         pthread_mutex_lock(&surface_queue->lock);
2179
2180         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2181                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2182         else
2183                 surface_queue->modes |= modes;
2184
2185         pthread_mutex_unlock(&surface_queue->lock);
2186
2187         _tbm_surf_queue_mutex_unlock();
2188
2189         return TBM_SURFACE_QUEUE_ERROR_NONE;
2190 }
2191 /* LCOV_EXCL_STOP */