turn on the utests bulding and fix svace issues
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 #define TBM_QUEUE_DEBUG 0
42
43 #ifdef TRACE
44 #define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
45 #else
46 #define TBM_QUEUE_TRACE(fmt, ...)
47 #endif /* TRACE */
48
49 #if TBM_QUEUE_DEBUG
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
52 #else
53 #define TBM_LOCK()
54 #define TBM_UNLOCK()
55 #endif
56
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
60
61 /* check condition */
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
63         if (!(cond)) {\
64                 TBM_LOG_E("'%s' failed.\n", #cond);\
65                 _tbm_surf_queue_mutex_unlock();\
66                 return;\
67         } \
68 }
69
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
71         if (!(cond)) {\
72                 TBM_LOG_E("'%s' failed.\n", #cond);\
73                 _tbm_surf_queue_mutex_unlock();\
74                 return val;\
75         } \
76 }
77
78 typedef enum _queue_node_type {
79         QUEUE_NODE_TYPE_NONE,
80         QUEUE_NODE_TYPE_DEQUEUE,
81         QUEUE_NODE_TYPE_ENQUEUE,
82         QUEUE_NODE_TYPE_ACQUIRE,
83         QUEUE_NODE_TYPE_RELEASE
84 } Queue_Node_Type;
85
86 typedef struct {
87         struct list_head head;
88         int count;
89 } queue;
90
91 typedef struct {
92         tbm_surface_h surface;
93
94         struct list_head item_link;
95         struct list_head link;
96
97         Queue_Node_Type type;
98
99         unsigned int priv_flags;        /*for each queue*/
100
101         int delete_pending;
102 } queue_node;
103
104 typedef struct {
105         struct list_head link;
106
107         tbm_surface_queue_notify_cb cb;
108         void *data;
109 } queue_notify;
110
111 typedef struct {
112         struct list_head link;
113
114         tbm_surface_queue_trace_cb cb;
115         void *data;
116 } queue_trace;
117
118 typedef struct _tbm_surface_queue_interface {
119         void (*init)(tbm_surface_queue_h queue);
120         void (*reset)(tbm_surface_queue_h queue);
121         void (*destroy)(tbm_surface_queue_h queue);
122         void (*need_attach)(tbm_surface_queue_h queue);
123
124         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
125         void (*release)(tbm_surface_queue_h queue, queue_node *node);
126         queue_node *(*dequeue)(tbm_surface_queue_h queue);
127         queue_node *(*acquire)(tbm_surface_queue_h queue);
128         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
129 } tbm_surface_queue_interface;
130
131 struct _tbm_surface_queue {
132         int width;
133         int height;
134         int format;
135         int queue_size;
136         int num_attached;
137
138         queue free_queue;
139         queue dirty_queue;
140         struct list_head list;
141
142         struct list_head destory_noti;
143         struct list_head dequeuable_noti;
144         struct list_head dequeue_noti;
145         struct list_head can_dequeue_noti;
146         struct list_head acquirable_noti;
147         struct list_head reset_noti;
148         struct list_head trace_noti;
149
150         pthread_mutex_t lock;
151         pthread_cond_t free_cond;
152         pthread_cond_t dirty_cond;
153
154         const tbm_surface_queue_interface *impl;
155         void *impl_data;
156
157         //For external buffer allocation
158         tbm_surface_alloc_cb alloc_cb;
159         tbm_surface_free_cb free_cb;
160         void *alloc_cb_data;
161
162         struct list_head item_link; /* link of surface queue */
163
164         int modes;
165 };
166
167 static bool
168 _tbm_surf_queue_mutex_init(void)
169 {
170         static bool tbm_surf_queue_mutex_init = false;
171
172         if (tbm_surf_queue_mutex_init)
173                 return true;
174
175         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
176                 TBM_LOG_E("fail: pthread_mutex_init\n");
177                 return false;
178         }
179
180         tbm_surf_queue_mutex_init = true;
181
182         return true;
183 }
184
185 static void
186 _tbm_surf_queue_mutex_lock(void)
187 {
188         if (!_tbm_surf_queue_mutex_init()) {
189                 TBM_LOG_E("fail: _tbm_surf_queue_mutex_init\n");
190                 return;
191         }
192
193         pthread_mutex_lock(&tbm_surf_queue_lock);
194 }
195
196 static void
197 _tbm_surf_queue_mutex_unlock(void)
198 {
199         pthread_mutex_unlock(&tbm_surf_queue_lock);
200 }
201
202 static void
203 _init_tbm_surf_queue_bufmgr(void)
204 {
205         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
206 }
207
208 static void
209 _deinit_tbm_surf_queue_bufmgr(void)
210 {
211         if (!g_surf_queue_bufmgr)
212                 return;
213
214         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
215         g_surf_queue_bufmgr = NULL;
216 }
217
218 static int
219 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
220 {
221         tbm_surface_queue_h old_data = NULL;
222
223         if (surface_queue == NULL) {
224                 TBM_LOG_E("error: surface_queue is NULL.\n");
225                 return 0;
226         }
227
228         if (g_surf_queue_bufmgr == NULL) {
229                 TBM_LOG_E("error: g_surf_queue_bufmgr is NULL.\n");
230                 return 0;
231         }
232
233         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
234                 TBM_LOG_E("error: surf_queue_list is empty\n");
235                 return 0;
236         }
237
238         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
239                                 item_link) {
240                 if (old_data == surface_queue) {
241                         TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
242                         return 1;
243                 }
244         }
245
246         TBM_LOG_E("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
247
248         return 0;
249 }
250
251 static queue_node *
252 _queue_node_create(void)
253 {
254         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
255
256         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
257
258         return node;
259 }
260
261 static void
262 _queue_node_delete(queue_node *node)
263 {
264         LIST_DEL(&node->item_link);
265         LIST_DEL(&node->link);
266         free(node);
267 }
268
269 static int
270 _queue_is_empty(queue *queue)
271 {
272         if (LIST_IS_EMPTY(&queue->head))
273                 return 1;
274
275         return 0;
276 }
277
278 static void
279 _queue_node_push_back(queue *queue, queue_node *node)
280 {
281         LIST_ADDTAIL(&node->item_link, &queue->head);
282         queue->count++;
283 }
284
285 static void
286 _queue_node_push_front(queue *queue, queue_node *node)
287 {
288         LIST_ADD(&node->item_link, &queue->head);
289         queue->count++;
290 }
291
292 static queue_node *
293 _queue_node_pop_front(queue *queue)
294 {
295         queue_node *node;
296
297         if (!queue->head.next) return NULL;
298         if (!queue->count) return NULL;
299
300         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
301
302         LIST_DELINIT(&node->item_link);
303         queue->count--;
304
305         return node;
306 }
307
308 static queue_node *
309 _queue_node_pop(queue *queue, queue_node *node)
310 {
311         LIST_DELINIT(&node->item_link);
312         queue->count--;
313
314         return node;
315 }
316
317 static queue_node *
318 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
319                 tbm_surface_h surface, int *out_type)
320 {
321         queue_node *node = NULL;
322
323         if (type == 0)
324                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
325         if (out_type)
326                 *out_type = 0;
327
328         if (type & FREE_QUEUE) {
329                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
330                                          item_link) {
331                         if (node->surface == surface) {
332                                 if (out_type)
333                                         *out_type = FREE_QUEUE;
334
335                                 return node;
336                         }
337                 }
338         }
339
340         if (type & DIRTY_QUEUE) {
341                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
342                                          item_link) {
343                         if (node->surface == surface) {
344                                 if (out_type)
345                                         *out_type = DIRTY_QUEUE;
346
347                                 return node;
348                         }
349                 }
350         }
351
352         if (type & NODE_LIST) {
353                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
354                         if (node->surface == surface) {
355                                 if (out_type)
356                                         *out_type = NODE_LIST;
357
358                                 return node;
359                         }
360                 }
361         }
362
363         TBM_LOG_E("fail to get the queue_node.\n");
364
365         return NULL;
366 }
367
368 static void
369 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
370 {
371         if (node->surface) {
372                 if (surface_queue->free_cb) {
373                         surface_queue->free_cb(surface_queue,
374                                         surface_queue->alloc_cb_data,
375                                         node->surface);
376                 }
377
378                 tbm_surface_destroy(node->surface);
379         }
380
381         _queue_node_delete(node);
382 }
383
384 static void
385 _queue_init(queue *queue)
386 {
387         LIST_INITHEAD(&queue->head);
388
389         queue->count = 0;
390 }
391
392 static void
393 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
394             void *data)
395 {
396         TBM_RETURN_IF_FAIL(cb != NULL);
397
398         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
399
400         TBM_RETURN_IF_FAIL(item != NULL);
401
402         LIST_INITHEAD(&item->link);
403         item->cb = cb;
404         item->data = data;
405
406         LIST_ADDTAIL(&item->link, list);
407 }
408
409 static void
410 _notify_remove(struct list_head *list,
411                tbm_surface_queue_notify_cb cb, void *data)
412 {
413         queue_notify *item = NULL, *tmp;
414
415         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
416                 if (item->cb == cb && item->data == data) {
417                         LIST_DEL(&item->link);
418                         free(item);
419                         return;
420                 }
421         }
422
423         TBM_LOG_E("Cannot find notifiy\n");
424 }
425
426 static void
427 _notify_remove_all(struct list_head *list)
428 {
429         queue_notify *item = NULL, *tmp;
430
431         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
432                 LIST_DEL(&item->link);
433                 free(item);
434         }
435 }
436
437 static void
438 _notify_emit(tbm_surface_queue_h surface_queue,
439              struct list_head *list)
440 {
441         queue_notify *item = NULL, *tmp;;
442
443         /*
444                 The item->cb is the outside function of the libtbm.
445                 The tbm user may/can remove the item of the list,
446                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
447         */
448         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
449                 item->cb(surface_queue, item->data);
450 }
451
452 static void
453 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
454             void *data)
455 {
456         TBM_RETURN_IF_FAIL(cb != NULL);
457
458         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
459
460         TBM_RETURN_IF_FAIL(item != NULL);
461
462         LIST_INITHEAD(&item->link);
463         item->cb = cb;
464         item->data = data;
465
466         LIST_ADDTAIL(&item->link, list);
467 }
468
469 static void
470 _trace_remove(struct list_head *list,
471                tbm_surface_queue_trace_cb cb, void *data)
472 {
473         queue_trace *item = NULL, *tmp;
474
475         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
476                 if (item->cb == cb && item->data == data) {
477                         LIST_DEL(&item->link);
478                         free(item);
479                         return;
480                 }
481         }
482
483         TBM_LOG_E("Cannot find notifiy\n");
484 }
485
486 static void
487 _trace_remove_all(struct list_head *list)
488 {
489         queue_trace *item = NULL, *tmp;
490
491         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
492                 LIST_DEL(&item->link);
493                 free(item);
494         }
495 }
496
497 static void
498 _trace_emit(tbm_surface_queue_h surface_queue,
499              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
500 {
501         queue_trace *item = NULL, *tmp;;
502
503         /*
504                 The item->cb is the outside function of the libtbm.
505                 The tbm user may/can remove the item of the list,
506                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
507         */
508         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
509                 item->cb(surface_queue, surface, trace, item->data);
510 }
511
512 static int
513 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
514 {
515         queue_node *node = NULL;
516         int count = 0;
517
518         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
519                 if (node->type == type)
520                         count++;
521         }
522
523         return count;
524 }
525
526 static void
527 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
528                           tbm_surface_h surface)
529 {
530         queue_node *node;
531
532         node = _queue_node_create();
533         TBM_RETURN_IF_FAIL(node != NULL);
534
535         tbm_surface_internal_ref(surface);
536         node->surface = surface;
537
538         LIST_ADDTAIL(&node->link, &surface_queue->list);
539         surface_queue->num_attached++;
540         _queue_node_push_back(&surface_queue->free_queue, node);
541 }
542
543 static void
544 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
545                           tbm_surface_h surface)
546 {
547         queue_node *node;
548         int queue_type;
549
550         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
551         if (node) {
552                 _queue_delete_node(surface_queue, node);
553                 surface_queue->num_attached--;
554         }
555 }
556
557 static void
558 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
559                            queue_node *node, int push_back)
560 {
561         if (push_back)
562                 _queue_node_push_back(&surface_queue->dirty_queue, node);
563         else
564                 _queue_node_push_front(&surface_queue->dirty_queue, node);
565 }
566
567 static queue_node *
568 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
569 {
570         queue_node *node;
571
572         node = _queue_node_pop_front(&surface_queue->free_queue);
573
574         return node;
575 }
576
577 static queue_node *
578 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
579 {
580         queue_node *node;
581
582         if (_queue_is_empty(&surface_queue->dirty_queue))
583                 return NULL;
584
585         node = _queue_node_pop_front(&surface_queue->dirty_queue);
586
587         return node;
588 }
589
590 static void
591 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
592                            queue_node *node, int push_back)
593 {
594         if (push_back)
595                 _queue_node_push_back(&surface_queue->free_queue, node);
596         else
597                 _queue_node_push_front(&surface_queue->free_queue, node);
598 }
599
600 static void
601 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
602                         int queue_size,
603                         int width, int height, int format,
604                         const tbm_surface_queue_interface *impl, void *data)
605 {
606         TBM_RETURN_IF_FAIL(surface_queue != NULL);
607         TBM_RETURN_IF_FAIL(impl != NULL);
608
609         if (!g_surf_queue_bufmgr)
610                 _init_tbm_surf_queue_bufmgr();
611
612         pthread_mutex_init(&surface_queue->lock, NULL);
613         pthread_cond_init(&surface_queue->free_cond, NULL);
614         pthread_cond_init(&surface_queue->dirty_cond, NULL);
615
616         surface_queue->queue_size = queue_size;
617         surface_queue->width = width;
618         surface_queue->height = height;
619         surface_queue->format = format;
620         surface_queue->impl = impl;
621         surface_queue->impl_data = data;
622         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
623
624         _queue_init(&surface_queue->free_queue);
625         _queue_init(&surface_queue->dirty_queue);
626         LIST_INITHEAD(&surface_queue->list);
627
628         LIST_INITHEAD(&surface_queue->destory_noti);
629         LIST_INITHEAD(&surface_queue->dequeuable_noti);
630         LIST_INITHEAD(&surface_queue->dequeue_noti);
631         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
632         LIST_INITHEAD(&surface_queue->acquirable_noti);
633         LIST_INITHEAD(&surface_queue->reset_noti);
634         LIST_INITHEAD(&surface_queue->trace_noti);
635
636         if (surface_queue->impl && surface_queue->impl->init)
637                 surface_queue->impl->init(surface_queue);
638
639         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
640 }
641
642 tbm_surface_queue_error_e
643 tbm_surface_queue_add_destroy_cb(
644         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
645         void *data)
646 {
647         _tbm_surf_queue_mutex_lock();
648
649         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
650                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
651
652         pthread_mutex_lock(&surface_queue->lock);
653
654         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
655
656         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
657
658         pthread_mutex_unlock(&surface_queue->lock);
659
660         _tbm_surf_queue_mutex_unlock();
661
662         return TBM_SURFACE_QUEUE_ERROR_NONE;
663 }
664
665 tbm_surface_queue_error_e
666 tbm_surface_queue_remove_destroy_cb(
667         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
668         void *data)
669 {
670         _tbm_surf_queue_mutex_lock();
671
672         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
673                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
674
675         pthread_mutex_lock(&surface_queue->lock);
676
677         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
678
679         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
680
681         pthread_mutex_unlock(&surface_queue->lock);
682
683         _tbm_surf_queue_mutex_unlock();
684
685         return TBM_SURFACE_QUEUE_ERROR_NONE;
686 }
687
688 tbm_surface_queue_error_e
689 tbm_surface_queue_add_dequeuable_cb(
690         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
691         void *data)
692 {
693         _tbm_surf_queue_mutex_lock();
694
695         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
696                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
697
698         pthread_mutex_lock(&surface_queue->lock);
699
700         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
701
702         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
703
704         pthread_mutex_unlock(&surface_queue->lock);
705
706         _tbm_surf_queue_mutex_unlock();
707
708         return TBM_SURFACE_QUEUE_ERROR_NONE;
709 }
710
711 tbm_surface_queue_error_e
712 tbm_surface_queue_remove_dequeuable_cb(
713         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
714         void *data)
715 {
716         _tbm_surf_queue_mutex_lock();
717
718         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
719                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
720
721         pthread_mutex_lock(&surface_queue->lock);
722
723         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
724
725         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
726
727         pthread_mutex_unlock(&surface_queue->lock);
728
729         _tbm_surf_queue_mutex_unlock();
730
731         return TBM_SURFACE_QUEUE_ERROR_NONE;
732 }
733
734 tbm_surface_queue_error_e
735 tbm_surface_queue_add_dequeue_cb(
736         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
737         void *data)
738 {
739         _tbm_surf_queue_mutex_lock();
740
741         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
742                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
743
744         pthread_mutex_lock(&surface_queue->lock);
745
746         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
747
748         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
749
750         pthread_mutex_unlock(&surface_queue->lock);
751
752         _tbm_surf_queue_mutex_unlock();
753
754         return TBM_SURFACE_QUEUE_ERROR_NONE;
755 }
756
757 tbm_surface_queue_error_e
758 tbm_surface_queue_remove_dequeue_cb(
759         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
760         void *data)
761 {
762         _tbm_surf_queue_mutex_lock();
763
764         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
765                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
766
767         pthread_mutex_lock(&surface_queue->lock);
768
769         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
770
771         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
772
773         pthread_mutex_unlock(&surface_queue->lock);
774
775         _tbm_surf_queue_mutex_unlock();
776
777         return TBM_SURFACE_QUEUE_ERROR_NONE;
778 }
779
780 tbm_surface_queue_error_e
781 tbm_surface_queue_add_can_dequeue_cb(
782         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
783         void *data)
784 {
785         _tbm_surf_queue_mutex_lock();
786
787         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
788                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
789
790         pthread_mutex_lock(&surface_queue->lock);
791
792         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
793
794         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
795
796         pthread_mutex_unlock(&surface_queue->lock);
797
798         _tbm_surf_queue_mutex_unlock();
799
800         return TBM_SURFACE_QUEUE_ERROR_NONE;
801 }
802
803 tbm_surface_queue_error_e
804 tbm_surface_queue_remove_can_dequeue_cb(
805         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
806         void *data)
807 {
808         _tbm_surf_queue_mutex_lock();
809
810         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
811                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
812
813         pthread_mutex_lock(&surface_queue->lock);
814
815         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
816
817         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
818
819         pthread_mutex_unlock(&surface_queue->lock);
820
821         _tbm_surf_queue_mutex_unlock();
822
823         return TBM_SURFACE_QUEUE_ERROR_NONE;
824 }
825
826 tbm_surface_queue_error_e
827 tbm_surface_queue_add_acquirable_cb(
828         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
829         void *data)
830 {
831         _tbm_surf_queue_mutex_lock();
832
833         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
834                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
835
836         pthread_mutex_lock(&surface_queue->lock);
837
838         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
839
840         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
841
842         pthread_mutex_unlock(&surface_queue->lock);
843
844         _tbm_surf_queue_mutex_unlock();
845
846         return TBM_SURFACE_QUEUE_ERROR_NONE;
847 }
848
849 tbm_surface_queue_error_e
850 tbm_surface_queue_remove_acquirable_cb(
851         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
852         void *data)
853 {
854         _tbm_surf_queue_mutex_lock();
855
856         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
857                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
858
859         pthread_mutex_lock(&surface_queue->lock);
860
861         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
862
863         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
864
865         pthread_mutex_unlock(&surface_queue->lock);
866
867         _tbm_surf_queue_mutex_unlock();
868
869         return TBM_SURFACE_QUEUE_ERROR_NONE;
870 }
871
872 tbm_surface_queue_error_e
873 tbm_surface_queue_add_trace_cb(
874         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
875         void *data)
876 {
877         _tbm_surf_queue_mutex_lock();
878
879         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
880                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
881
882         pthread_mutex_lock(&surface_queue->lock);
883
884         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
885
886         _trace_add(&surface_queue->trace_noti, trace_cb, data);
887
888         pthread_mutex_unlock(&surface_queue->lock);
889
890         _tbm_surf_queue_mutex_unlock();
891
892         return TBM_SURFACE_QUEUE_ERROR_NONE;
893 }
894
895 tbm_surface_queue_error_e
896 tbm_surface_queue_remove_trace_cb(
897         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
898         void *data)
899 {
900         _tbm_surf_queue_mutex_lock();
901
902         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
903                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
904
905         pthread_mutex_lock(&surface_queue->lock);
906
907         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
908
909         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
910
911         pthread_mutex_unlock(&surface_queue->lock);
912
913         _tbm_surf_queue_mutex_unlock();
914
915         return TBM_SURFACE_QUEUE_ERROR_NONE;
916 }
917
918 tbm_surface_queue_error_e
919 tbm_surface_queue_set_alloc_cb(
920         tbm_surface_queue_h surface_queue,
921         tbm_surface_alloc_cb alloc_cb,
922         tbm_surface_free_cb free_cb,
923         void *data)
924 {
925         _tbm_surf_queue_mutex_lock();
926
927         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
928                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
929
930         pthread_mutex_lock(&surface_queue->lock);
931
932         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
933
934         surface_queue->alloc_cb = alloc_cb;
935         surface_queue->free_cb = free_cb;
936         surface_queue->alloc_cb_data = data;
937
938         pthread_mutex_unlock(&surface_queue->lock);
939
940         _tbm_surf_queue_mutex_unlock();
941
942         return TBM_SURFACE_QUEUE_ERROR_NONE;
943 }
944
945 int
946 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
947 {
948         int width;
949
950         _tbm_surf_queue_mutex_lock();
951
952         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
953
954         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
955
956         width = surface_queue->width;
957
958         _tbm_surf_queue_mutex_unlock();
959
960         return width;
961 }
962
963 int
964 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
965 {
966         int height;
967
968         _tbm_surf_queue_mutex_lock();
969
970         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
971
972         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
973
974         height = surface_queue->height;
975
976         _tbm_surf_queue_mutex_unlock();
977
978         return height;
979 }
980
981 int
982 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
983 {
984         int format;
985
986         _tbm_surf_queue_mutex_lock();
987
988         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
989
990         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
991
992         format = surface_queue->format;
993
994         _tbm_surf_queue_mutex_unlock();
995
996         return format;
997 }
998
999 int
1000 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1001 {
1002         int queue_size;
1003
1004         _tbm_surf_queue_mutex_lock();
1005
1006         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1007
1008         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1009
1010         queue_size = surface_queue->queue_size;
1011
1012         _tbm_surf_queue_mutex_unlock();
1013
1014         return queue_size;
1015 }
1016
1017 tbm_surface_queue_error_e
1018 tbm_surface_queue_add_reset_cb(
1019         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1020         void *data)
1021 {
1022         _tbm_surf_queue_mutex_lock();
1023
1024         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1025                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1026
1027         pthread_mutex_lock(&surface_queue->lock);
1028
1029         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1030
1031         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1032
1033         pthread_mutex_unlock(&surface_queue->lock);
1034
1035         _tbm_surf_queue_mutex_unlock();
1036
1037         return TBM_SURFACE_QUEUE_ERROR_NONE;
1038 }
1039
1040 tbm_surface_queue_error_e
1041 tbm_surface_queue_remove_reset_cb(
1042         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1043         void *data)
1044 {
1045         _tbm_surf_queue_mutex_lock();
1046
1047         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1048                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1049
1050         pthread_mutex_lock(&surface_queue->lock);
1051
1052         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1053
1054         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1055
1056         pthread_mutex_unlock(&surface_queue->lock);
1057
1058         _tbm_surf_queue_mutex_unlock();
1059
1060         return TBM_SURFACE_QUEUE_ERROR_NONE;
1061 }
1062
1063 tbm_surface_queue_error_e
1064 tbm_surface_queue_enqueue(tbm_surface_queue_h
1065                           surface_queue, tbm_surface_h surface)
1066 {
1067         queue_node *node;
1068         int queue_type;
1069
1070         _tbm_surf_queue_mutex_lock();
1071
1072         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1073                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1074         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1075                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1076
1077         if (b_dump_queue)
1078                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1079
1080         pthread_mutex_lock(&surface_queue->lock);
1081
1082         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1083
1084         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1085         if (node == NULL || queue_type != NODE_LIST) {
1086                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1087                         node, queue_type);
1088                 pthread_mutex_unlock(&surface_queue->lock);
1089
1090                 _tbm_surf_queue_mutex_unlock();
1091
1092                 if (!node)
1093                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1094                 else
1095                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1096         }
1097
1098         if (surface_queue->impl && surface_queue->impl->enqueue)
1099                 surface_queue->impl->enqueue(surface_queue, node);
1100         else
1101                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1102
1103         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1104                 TBM_LOG_E("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1105                 pthread_mutex_unlock(&surface_queue->lock);
1106
1107                 _tbm_surf_queue_mutex_unlock();
1108                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1109         }
1110
1111         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1112
1113         pthread_mutex_unlock(&surface_queue->lock);
1114         pthread_cond_signal(&surface_queue->dirty_cond);
1115
1116         _tbm_surf_queue_mutex_unlock();
1117
1118         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1119
1120         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1121
1122         return TBM_SURFACE_QUEUE_ERROR_NONE;
1123 }
1124
1125 tbm_surface_queue_error_e
1126 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1127                           surface_queue, tbm_surface_h surface)
1128 {
1129         queue_node *node;
1130         int queue_type;
1131
1132         _tbm_surf_queue_mutex_lock();
1133
1134         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1135                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1136         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1137                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1138
1139         pthread_mutex_lock(&surface_queue->lock);
1140
1141         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1142
1143         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1144         if (node == NULL || queue_type != NODE_LIST) {
1145                 TBM_LOG_E("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1146                         node, queue_type);
1147                 pthread_mutex_unlock(&surface_queue->lock);
1148
1149                 _tbm_surf_queue_mutex_unlock();
1150                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1151         }
1152
1153         if (node->delete_pending) {
1154                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1155
1156                 _queue_delete_node(surface_queue, node);
1157
1158                 pthread_mutex_unlock(&surface_queue->lock);
1159
1160                 _tbm_surf_queue_mutex_unlock();
1161
1162                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1163
1164                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1165         }
1166
1167         if (surface_queue->queue_size < surface_queue->num_attached) {
1168                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1169
1170                 if (surface_queue->impl && surface_queue->impl->need_detach)
1171                         surface_queue->impl->need_detach(surface_queue, node);
1172                 else
1173                         _tbm_surface_queue_detach(surface_queue, surface);
1174
1175                 pthread_mutex_unlock(&surface_queue->lock);
1176
1177                 _tbm_surf_queue_mutex_unlock();
1178
1179                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1180
1181                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1182         }
1183
1184         if (surface_queue->impl && surface_queue->impl->release)
1185                 surface_queue->impl->release(surface_queue, node);
1186         else
1187                 _tbm_surface_queue_release(surface_queue, node, 1);
1188
1189         if (_queue_is_empty(&surface_queue->free_queue)) {
1190                 pthread_mutex_unlock(&surface_queue->lock);
1191
1192                 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1193                 _tbm_surf_queue_mutex_unlock();
1194                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1195         }
1196
1197         node->type = QUEUE_NODE_TYPE_RELEASE;
1198
1199         pthread_mutex_unlock(&surface_queue->lock);
1200         pthread_cond_signal(&surface_queue->free_cond);
1201
1202         _tbm_surf_queue_mutex_unlock();
1203
1204         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1205
1206         return TBM_SURFACE_QUEUE_ERROR_NONE;
1207 }
1208
1209 tbm_surface_queue_error_e
1210 tbm_surface_queue_dequeue(tbm_surface_queue_h
1211                           surface_queue, tbm_surface_h *surface)
1212 {
1213         queue_node *node;
1214
1215         _tbm_surf_queue_mutex_lock();
1216
1217         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1218                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1219         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1220                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1221
1222         *surface = NULL;
1223
1224         pthread_mutex_lock(&surface_queue->lock);
1225
1226         if (_queue_is_empty(&surface_queue->free_queue)) {
1227                 if (surface_queue->impl && surface_queue->impl->need_attach)
1228                         surface_queue->impl->need_attach(surface_queue);
1229
1230                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1231                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1232                         pthread_mutex_unlock(&surface_queue->lock);
1233                         _tbm_surf_queue_mutex_unlock();
1234                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1235                 }
1236         }
1237
1238         if (surface_queue->impl && surface_queue->impl->dequeue)
1239                 node = surface_queue->impl->dequeue(surface_queue);
1240         else
1241                 node = _tbm_surface_queue_dequeue(surface_queue);
1242
1243         if (node == NULL || node->surface == NULL) {
1244                 TBM_LOG_E("_queue_node_pop_front failed\n");
1245                 pthread_mutex_unlock(&surface_queue->lock);
1246
1247                 _tbm_surf_queue_mutex_unlock();
1248                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1249         }
1250
1251         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1252         *surface = node->surface;
1253
1254         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1255
1256         pthread_mutex_unlock(&surface_queue->lock);
1257
1258         _tbm_surf_queue_mutex_unlock();
1259
1260         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1261
1262         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1263
1264         return TBM_SURFACE_QUEUE_ERROR_NONE;
1265 }
1266
1267 int
1268 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1269 {
1270         _tbm_surf_queue_mutex_lock();
1271
1272         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1273
1274         _tbm_surf_queue_mutex_unlock();
1275
1276         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1277
1278         _tbm_surf_queue_mutex_lock();
1279
1280         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1281
1282         pthread_mutex_lock(&surface_queue->lock);
1283
1284         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1285
1286         if (_queue_is_empty(&surface_queue->free_queue)) {
1287                 if (surface_queue->impl && surface_queue->impl->need_attach)
1288                         surface_queue->impl->need_attach(surface_queue);
1289
1290                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1291                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1292                         pthread_mutex_unlock(&surface_queue->lock);
1293                         _tbm_surf_queue_mutex_unlock();
1294                         return 0;
1295                 }
1296         }
1297
1298         if (!_queue_is_empty(&surface_queue->free_queue)) {
1299                 pthread_mutex_unlock(&surface_queue->lock);
1300                 _tbm_surf_queue_mutex_unlock();
1301                 return 1;
1302         }
1303
1304         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1305                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1306                 _tbm_surf_queue_mutex_unlock();
1307                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1308                 _tbm_surf_queue_mutex_lock();
1309
1310                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1311                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1312                         pthread_mutex_unlock(&surface_queue->lock);
1313                         _tbm_surf_queue_mutex_unlock();
1314                         return 0;
1315                 }
1316
1317                 pthread_mutex_unlock(&surface_queue->lock);
1318                 _tbm_surf_queue_mutex_unlock();
1319                 return 1;
1320         }
1321
1322         pthread_mutex_unlock(&surface_queue->lock);
1323         _tbm_surf_queue_mutex_unlock();
1324         return 0;
1325 }
1326
1327 tbm_surface_queue_error_e
1328 tbm_surface_queue_release(tbm_surface_queue_h
1329                           surface_queue, tbm_surface_h surface)
1330 {
1331         queue_node *node;
1332         int queue_type;
1333
1334         _tbm_surf_queue_mutex_lock();
1335
1336         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1337                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1338         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1339                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1340
1341         pthread_mutex_lock(&surface_queue->lock);
1342
1343         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1344
1345         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1346         if (node == NULL || queue_type != NODE_LIST) {
1347                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1348                         node, queue_type);
1349                 pthread_mutex_unlock(&surface_queue->lock);
1350
1351                 _tbm_surf_queue_mutex_unlock();
1352
1353                 if (!node)
1354                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1355                 else
1356                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1357         }
1358
1359         if (node->delete_pending) {
1360                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1361
1362                 _queue_delete_node(surface_queue, node);
1363
1364                 pthread_mutex_unlock(&surface_queue->lock);
1365
1366                 _tbm_surf_queue_mutex_unlock();
1367
1368                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1369
1370                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1371         }
1372
1373         if (surface_queue->queue_size < surface_queue->num_attached) {
1374                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1375
1376                 if (surface_queue->impl && surface_queue->impl->need_detach)
1377                         surface_queue->impl->need_detach(surface_queue, node);
1378                 else
1379                         _tbm_surface_queue_detach(surface_queue, surface);
1380
1381                 pthread_mutex_unlock(&surface_queue->lock);
1382
1383                 _tbm_surf_queue_mutex_unlock();
1384
1385                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1386
1387                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1388         }
1389
1390         if (surface_queue->impl && surface_queue->impl->release)
1391                 surface_queue->impl->release(surface_queue, node);
1392         else
1393                 _tbm_surface_queue_release(surface_queue, node, 1);
1394
1395         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1396                 TBM_LOG_E("release surface(%p) but surface isn't present in the free_queue\n", surface);
1397                 pthread_mutex_unlock(&surface_queue->lock);
1398
1399                 _tbm_surf_queue_mutex_unlock();
1400                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1401         }
1402
1403         node->type = QUEUE_NODE_TYPE_RELEASE;
1404
1405         pthread_mutex_unlock(&surface_queue->lock);
1406         pthread_cond_signal(&surface_queue->free_cond);
1407
1408         _tbm_surf_queue_mutex_unlock();
1409
1410         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1411
1412         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1413
1414         return TBM_SURFACE_QUEUE_ERROR_NONE;
1415 }
1416
1417 tbm_surface_queue_error_e
1418 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1419                         surface_queue, tbm_surface_h surface)
1420 {
1421         queue_node *node;
1422         int queue_type;
1423
1424         _tbm_surf_queue_mutex_lock();
1425
1426         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1427                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1428         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1429                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1430
1431         pthread_mutex_lock(&surface_queue->lock);
1432
1433         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1434
1435         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1436         if (node == NULL || queue_type != NODE_LIST) {
1437                 TBM_LOG_E("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1438                         node, queue_type);
1439                 pthread_mutex_unlock(&surface_queue->lock);
1440
1441                 _tbm_surf_queue_mutex_unlock();
1442                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1443         }
1444
1445         if (surface_queue->impl && surface_queue->impl->enqueue)
1446                 surface_queue->impl->enqueue(surface_queue, node);
1447         else
1448                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1449
1450         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1451                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1452                 pthread_mutex_unlock(&surface_queue->lock);
1453
1454                 _tbm_surf_queue_mutex_unlock();
1455                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1456         }
1457
1458         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1459
1460         pthread_mutex_unlock(&surface_queue->lock);
1461         pthread_cond_signal(&surface_queue->dirty_cond);
1462
1463         _tbm_surf_queue_mutex_unlock();
1464
1465         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1466
1467         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1468
1469         return TBM_SURFACE_QUEUE_ERROR_NONE;
1470 }
1471
1472 tbm_surface_queue_error_e
1473 tbm_surface_queue_acquire(tbm_surface_queue_h
1474                           surface_queue, tbm_surface_h *surface)
1475 {
1476         queue_node *node;
1477
1478         _tbm_surf_queue_mutex_lock();
1479
1480         *surface = NULL;
1481
1482         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1483                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1484         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1485                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1486
1487         pthread_mutex_lock(&surface_queue->lock);
1488
1489         if (surface_queue->impl && surface_queue->impl->acquire)
1490                 node = surface_queue->impl->acquire(surface_queue);
1491         else
1492                 node = _tbm_surface_queue_acquire(surface_queue);
1493
1494         if (node == NULL || node->surface == NULL) {
1495                 TBM_LOG_E("_queue_node_pop_front failed\n");
1496                 pthread_mutex_unlock(&surface_queue->lock);
1497
1498                 _tbm_surf_queue_mutex_unlock();
1499                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1500         }
1501
1502         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1503
1504         *surface = node->surface;
1505
1506         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1507
1508         pthread_mutex_unlock(&surface_queue->lock);
1509
1510         _tbm_surf_queue_mutex_unlock();
1511
1512         if (b_dump_queue)
1513                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1514
1515         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1516
1517         return TBM_SURFACE_QUEUE_ERROR_NONE;
1518 }
1519
1520 int
1521 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1522 {
1523         _tbm_surf_queue_mutex_lock();
1524
1525         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1526
1527         pthread_mutex_lock(&surface_queue->lock);
1528
1529         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1530
1531         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1532                 pthread_mutex_unlock(&surface_queue->lock);
1533                 _tbm_surf_queue_mutex_unlock();
1534                 return 1;
1535         }
1536
1537         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1538                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1539                 _tbm_surf_queue_mutex_unlock();
1540                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1541                 _tbm_surf_queue_mutex_lock();
1542
1543                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1544                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1545                         pthread_mutex_unlock(&surface_queue->lock);
1546                         _tbm_surf_queue_mutex_unlock();
1547                         return 0;
1548                 }
1549
1550                 pthread_mutex_unlock(&surface_queue->lock);
1551                 _tbm_surf_queue_mutex_unlock();
1552                 return 1;
1553         }
1554
1555         pthread_mutex_unlock(&surface_queue->lock);
1556         _tbm_surf_queue_mutex_unlock();
1557         return 0;
1558 }
1559
1560 void
1561 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1562 {
1563         queue_node *node = NULL, *tmp;
1564
1565         _tbm_surf_queue_mutex_lock();
1566
1567         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1568
1569         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1570
1571         LIST_DEL(&surface_queue->item_link);
1572
1573         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1574                 _queue_delete_node(surface_queue, node);
1575
1576         if (surface_queue->impl && surface_queue->impl->destroy)
1577                 surface_queue->impl->destroy(surface_queue);
1578
1579         _notify_emit(surface_queue, &surface_queue->destory_noti);
1580
1581         _notify_remove_all(&surface_queue->destory_noti);
1582         _notify_remove_all(&surface_queue->dequeuable_noti);
1583         _notify_remove_all(&surface_queue->dequeue_noti);
1584         _notify_remove_all(&surface_queue->can_dequeue_noti);
1585         _notify_remove_all(&surface_queue->acquirable_noti);
1586         _notify_remove_all(&surface_queue->reset_noti);
1587         _trace_remove_all(&surface_queue->trace_noti);
1588
1589         pthread_mutex_destroy(&surface_queue->lock);
1590
1591         free(surface_queue);
1592
1593         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1594                 _deinit_tbm_surf_queue_bufmgr();
1595
1596         _tbm_surf_queue_mutex_unlock();
1597 }
1598
1599 tbm_surface_queue_error_e
1600 tbm_surface_queue_reset(tbm_surface_queue_h
1601                         surface_queue, int width, int height, int format)
1602 {
1603         queue_node *node = NULL, *tmp;
1604
1605         _tbm_surf_queue_mutex_lock();
1606
1607         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1608                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1609
1610         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1611
1612         if (width == surface_queue->width && height == surface_queue->height &&
1613                 format == surface_queue->format) {
1614                 _tbm_surf_queue_mutex_unlock();
1615                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1616         }
1617
1618         pthread_mutex_lock(&surface_queue->lock);
1619
1620         surface_queue->width = width;
1621         surface_queue->height = height;
1622         surface_queue->format = format;
1623
1624         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1625                 /* Destory surface and Push to free_queue */
1626                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1627                         _queue_delete_node(surface_queue, node);
1628
1629                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1630                         node->delete_pending = 1;
1631         } else {
1632                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1633                         _queue_delete_node(surface_queue, node);
1634
1635                 _queue_init(&surface_queue->dirty_queue);
1636                 LIST_INITHEAD(&surface_queue->list);
1637         }
1638
1639         /* Reset queue */
1640         _queue_init(&surface_queue->free_queue);
1641
1642         surface_queue->num_attached = 0;
1643
1644         if (surface_queue->impl && surface_queue->impl->reset)
1645                 surface_queue->impl->reset(surface_queue);
1646
1647         pthread_mutex_unlock(&surface_queue->lock);
1648         pthread_cond_signal(&surface_queue->free_cond);
1649
1650         _tbm_surf_queue_mutex_unlock();
1651
1652         _notify_emit(surface_queue, &surface_queue->reset_noti);
1653
1654         return TBM_SURFACE_QUEUE_ERROR_NONE;
1655 }
1656
1657 tbm_surface_queue_error_e
1658 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1659 {
1660         _tbm_surf_queue_mutex_lock();
1661
1662         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1663                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1664
1665         _tbm_surf_queue_mutex_unlock();
1666
1667         _notify_emit(surface_queue, &surface_queue->reset_noti);
1668
1669         return TBM_SURFACE_QUEUE_ERROR_NONE;
1670 }
1671
1672 tbm_surface_queue_error_e
1673 tbm_surface_queue_set_size(tbm_surface_queue_h
1674                         surface_queue, int queue_size, int flush)
1675 {
1676         queue_node *node = NULL, *tmp;
1677
1678         _tbm_surf_queue_mutex_lock();
1679
1680         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1681                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1682         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1683                                         TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1684
1685         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1686
1687         if ((surface_queue->queue_size == queue_size) && !flush) {
1688                 _tbm_surf_queue_mutex_unlock();
1689                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1690         }
1691
1692         pthread_mutex_lock(&surface_queue->lock);
1693
1694         if (flush) {
1695                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1696                         /* Destory surface and Push to free_queue */
1697                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1698                                 _queue_delete_node(surface_queue, node);
1699
1700                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1701                                 node->delete_pending = 1;
1702                 } else {
1703                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1704                                 _queue_delete_node(surface_queue, node);
1705
1706                         _queue_init(&surface_queue->dirty_queue);
1707                         LIST_INITHEAD(&surface_queue->list);
1708                 }
1709
1710                 /* Reset queue */
1711                 _queue_init(&surface_queue->free_queue);
1712
1713                 surface_queue->num_attached = 0;
1714                 surface_queue->queue_size = queue_size;
1715
1716                 if (surface_queue->impl && surface_queue->impl->reset)
1717                         surface_queue->impl->reset(surface_queue);
1718
1719                 pthread_mutex_unlock(&surface_queue->lock);
1720                 pthread_cond_signal(&surface_queue->free_cond);
1721
1722                 _tbm_surf_queue_mutex_unlock();
1723
1724                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1725
1726                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1727         } else {
1728                 if (surface_queue->queue_size > queue_size) {
1729                         int need_del = surface_queue->queue_size - queue_size;
1730
1731                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1732                                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1733
1734                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1735                                         surface_queue->impl->need_detach(surface_queue, node);
1736                                 else
1737                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1738
1739                                 need_del--;
1740                                 if (need_del == 0)
1741                                         break;
1742                         }
1743                 }
1744
1745                 surface_queue->queue_size = queue_size;
1746
1747                 pthread_mutex_unlock(&surface_queue->lock);
1748
1749                 _tbm_surf_queue_mutex_unlock();
1750
1751                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1752         }
1753 }
1754
1755 tbm_surface_queue_error_e
1756 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1757 {
1758         queue_node *node = NULL;
1759
1760         _tbm_surf_queue_mutex_lock();
1761
1762         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1763                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1764
1765         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1766
1767         if (surface_queue->num_attached == 0) {
1768                 _tbm_surf_queue_mutex_unlock();
1769                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1770         }
1771
1772         pthread_mutex_lock(&surface_queue->lock);
1773
1774         /* Destory surface in free_queue */
1775         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1776                 if (surface_queue->impl && surface_queue->impl->need_detach)
1777                         surface_queue->impl->need_detach(surface_queue, node);
1778                 else
1779                         _tbm_surface_queue_detach(surface_queue, node->surface);
1780         }
1781
1782         /* Reset queue */
1783         _queue_init(&surface_queue->free_queue);
1784
1785         pthread_mutex_unlock(&surface_queue->lock);
1786         _tbm_surf_queue_mutex_unlock();
1787
1788         return TBM_SURFACE_QUEUE_ERROR_NONE;
1789 }
1790
1791 tbm_surface_queue_error_e
1792 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1793 {
1794         queue_node *node = NULL, *tmp;
1795
1796         _tbm_surf_queue_mutex_lock();
1797
1798         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1799                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1800
1801         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1802
1803         if (surface_queue->num_attached == 0) {
1804                 _tbm_surf_queue_mutex_unlock();
1805                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1806         }
1807
1808         pthread_mutex_lock(&surface_queue->lock);
1809
1810         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1811                 /* Destory surface and Push to free_queue */
1812                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1813                         _queue_delete_node(surface_queue, node);
1814
1815                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1816                         node->delete_pending = 1;
1817         } else {
1818                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1819                         _queue_delete_node(surface_queue, node);
1820
1821                 _queue_init(&surface_queue->dirty_queue);
1822                 LIST_INITHEAD(&surface_queue->list);
1823         }
1824
1825         /* Reset queue */
1826         _queue_init(&surface_queue->free_queue);
1827
1828         surface_queue->num_attached = 0;
1829
1830         if (surface_queue->impl && surface_queue->impl->reset)
1831                 surface_queue->impl->reset(surface_queue);
1832
1833         pthread_mutex_unlock(&surface_queue->lock);
1834         pthread_cond_signal(&surface_queue->free_cond);
1835
1836         _tbm_surf_queue_mutex_unlock();
1837
1838         _notify_emit(surface_queue, &surface_queue->reset_noti);
1839
1840         return TBM_SURFACE_QUEUE_ERROR_NONE;
1841 }
1842
1843 tbm_surface_queue_error_e
1844 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1845                         tbm_surface_h *surfaces, int *num)
1846 {
1847         queue_node *node = NULL;
1848
1849         _tbm_surf_queue_mutex_lock();
1850
1851         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1852                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1853         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1854                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1855
1856         *num = 0;
1857
1858         pthread_mutex_lock(&surface_queue->lock);
1859
1860         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1861                 if (surfaces)
1862                         surfaces[*num] = node->surface;
1863
1864                 *num = *num + 1;
1865         }
1866
1867         pthread_mutex_unlock(&surface_queue->lock);
1868
1869         _tbm_surf_queue_mutex_unlock();
1870
1871         return TBM_SURFACE_QUEUE_ERROR_NONE;
1872 }
1873
1874 tbm_surface_queue_error_e
1875 tbm_surface_queue_get_trace_surface_num(
1876                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1877 {
1878         _tbm_surf_queue_mutex_lock();
1879
1880         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1881                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1882         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1883                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1884
1885         *num = 0;
1886
1887         pthread_mutex_lock(&surface_queue->lock);
1888
1889         switch (trace) {
1890         case TBM_SURFACE_QUEUE_TRACE_NONE:
1891                 *num = 0;
1892                 break;
1893         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1894                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1895                 break;
1896         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1897                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1898                 break;
1899         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1900                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1901                 break;
1902         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1903                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1904                 break;
1905         default:
1906                 break;
1907         }
1908
1909         pthread_mutex_unlock(&surface_queue->lock);
1910
1911         _tbm_surf_queue_mutex_unlock();
1912
1913         return TBM_SURFACE_QUEUE_ERROR_NONE;
1914 }
1915
1916 typedef struct {
1917         int flags;
1918 } tbm_queue_default;
1919
1920 static void
1921 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1922 {
1923         free(surface_queue->impl_data);
1924 }
1925
1926 static void
1927 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1928 {
1929         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1930         tbm_surface_h surface;
1931
1932         if (surface_queue->queue_size == surface_queue->num_attached)
1933                 return;
1934
1935         if (surface_queue->alloc_cb) {
1936                 pthread_mutex_unlock(&surface_queue->lock);
1937                 _tbm_surf_queue_mutex_unlock();
1938                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1939                 _tbm_surf_queue_mutex_lock();
1940                 pthread_mutex_lock(&surface_queue->lock);
1941
1942                 /* silent return */
1943                 if (!surface)
1944                         return;
1945
1946                 tbm_surface_internal_ref(surface);
1947         } else {
1948                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1949                                 surface_queue->height,
1950                                 surface_queue->format,
1951                                 data->flags);
1952                 TBM_RETURN_IF_FAIL(surface != NULL);
1953         }
1954
1955         _tbm_surface_queue_attach(surface_queue, surface);
1956         tbm_surface_internal_unref(surface);
1957 }
1958
1959 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1960         NULL,                           /*__tbm_queue_default_init*/
1961         NULL,                           /*__tbm_queue_default_reset*/
1962         __tbm_queue_default_destroy,
1963         __tbm_queue_default_need_attach,
1964         NULL,                           /*__tbm_queue_default_enqueue*/
1965         NULL,                           /*__tbm_queue_default_release*/
1966         NULL,                           /*__tbm_queue_default_dequeue*/
1967         NULL,                           /*__tbm_queue_default_acquire*/
1968         NULL,                           /*__tbm_queue_default_need_detach*/
1969 };
1970
1971 tbm_surface_queue_h
1972 tbm_surface_queue_create(int queue_size, int width,
1973                          int height, int format, int flags)
1974 {
1975         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1976         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1977         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1978         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1979
1980         _tbm_surf_queue_mutex_lock();
1981
1982         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1983                                             sizeof(struct _tbm_surface_queue));
1984         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1985
1986         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1987
1988         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1989                                   sizeof(tbm_queue_default));
1990         if (data == NULL) {
1991                 TBM_LOG_E("cannot allocate the tbm_queue_default.\n");
1992                 free(surface_queue);
1993                 _tbm_surf_queue_mutex_unlock();
1994                 return NULL;
1995         }
1996
1997         data->flags = flags;
1998         _tbm_surface_queue_init(surface_queue,
1999                                 queue_size,
2000                                 width, height, format,
2001                                 &tbm_queue_default_impl, data);
2002
2003         _tbm_surf_queue_mutex_unlock();
2004
2005         return surface_queue;
2006 }
2007
2008 typedef struct {
2009         int flags;
2010         queue dequeue_list;
2011 } tbm_queue_sequence;
2012
2013 static void
2014 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2015 {
2016         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2017
2018         _queue_init(&data->dequeue_list);
2019 }
2020
2021 static void
2022 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2023 {
2024         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2025
2026         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2027                 return;
2028
2029         _queue_init(&data->dequeue_list);
2030 }
2031
2032 static void
2033 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2034 {
2035         free(surface_queue->impl_data);
2036 }
2037
2038 static void
2039 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2040 {
2041         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2042         tbm_surface_h surface;
2043
2044         if (surface_queue->queue_size == surface_queue->num_attached)
2045                 return;
2046
2047         if (surface_queue->alloc_cb) {
2048                 pthread_mutex_unlock(&surface_queue->lock);
2049                 _tbm_surf_queue_mutex_unlock();
2050                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2051                 _tbm_surf_queue_mutex_lock();
2052                 pthread_mutex_lock(&surface_queue->lock);
2053
2054                 /* silent return */
2055                 if (!surface)
2056                         return;
2057
2058                 tbm_surface_internal_ref(surface);
2059         } else {
2060                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2061                                 surface_queue->height,
2062                                 surface_queue->format,
2063                                 data->flags);
2064                 TBM_RETURN_IF_FAIL(surface != NULL);
2065         }
2066
2067         _tbm_surface_queue_attach(surface_queue, surface);
2068         tbm_surface_internal_unref(surface);
2069 }
2070
2071 static void
2072 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2073                              queue_node *node)
2074 {
2075         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2076         queue_node *first = NULL;
2077
2078         first = container_of(data->dequeue_list.head.next, first, item_link);
2079         if (first != node) {
2080                 return;
2081         }
2082
2083         node->priv_flags = 0;
2084
2085         _queue_node_pop(&data->dequeue_list, node);
2086         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2087 }
2088
2089 static void
2090 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2091                                 queue_node *node)
2092 {
2093         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2094
2095         if (node->priv_flags) {
2096                 node->priv_flags = 0;
2097                 _queue_node_pop(&data->dequeue_list, node);
2098         }
2099
2100         _tbm_surface_queue_release(surface_queue, node, 1);
2101 }
2102
2103 static queue_node *
2104 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2105                              surface_queue)
2106 {
2107         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2108         queue_node *node;
2109
2110         node = _tbm_surface_queue_dequeue(surface_queue);
2111         if (node) {
2112                 _queue_node_push_back(&data->dequeue_list, node);
2113                 node->priv_flags = 1;
2114         }
2115
2116         return node;
2117 }
2118
2119 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2120         __tbm_queue_sequence_init,
2121         __tbm_queue_sequence_reset,
2122         __tbm_queue_sequence_destroy,
2123         __tbm_queue_sequence_need_attach,
2124         __tbm_queue_sequence_enqueue,
2125         __tbm_queue_sequence_release,
2126         __tbm_queue_sequence_dequeue,
2127         NULL,                                   /*__tbm_queue_sequence_acquire*/
2128         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2129 };
2130
2131 tbm_surface_queue_h
2132 tbm_surface_queue_sequence_create(int queue_size, int width,
2133                                   int height, int format, int flags)
2134 {
2135         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2136         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2137         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2138         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2139
2140         _tbm_surf_queue_mutex_lock();
2141
2142         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2143                                             sizeof(struct _tbm_surface_queue));
2144         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2145
2146         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2147
2148         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2149                                    sizeof(tbm_queue_sequence));
2150         if (data == NULL) {
2151                 TBM_LOG_E("cannot allocate the tbm_queue_sequence.\n");
2152                 free(surface_queue);
2153                 _tbm_surf_queue_mutex_unlock();
2154                 return NULL;
2155         }
2156
2157         data->flags = flags;
2158         _tbm_surface_queue_init(surface_queue,
2159                                 queue_size,
2160                                 width, height, format,
2161                                 &tbm_queue_sequence_impl, data);
2162
2163         _tbm_surf_queue_mutex_unlock();
2164
2165         return surface_queue;
2166 }
2167
2168 tbm_surface_queue_error_e
2169 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2170                                   int modes)
2171 {
2172         _tbm_surf_queue_mutex_lock();
2173
2174         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2175                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2176
2177         pthread_mutex_lock(&surface_queue->lock);
2178
2179         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2180                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2181         else
2182                 surface_queue->modes |= modes;
2183
2184         pthread_mutex_unlock(&surface_queue->lock);
2185
2186         _tbm_surf_queue_mutex_unlock();
2187
2188         return TBM_SURFACE_QUEUE_ERROR_NONE;
2189 }