Fix atomicity svace problem
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 #define TBM_QUEUE_DEBUG 0
42
43 #ifdef TRACE
44 #define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
45 #else
46 #define TBM_QUEUE_TRACE(fmt, ...)
47 #endif /* TRACE */
48
49 #if TBM_QUEUE_DEBUG
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
52 #else
53 #define TBM_LOCK()
54 #define TBM_UNLOCK()
55 #endif
56
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
60
61 /* check condition */
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
63         if (!(cond)) {\
64                 TBM_LOG_E("'%s' failed.\n", #cond);\
65                 _tbm_surf_queue_mutex_unlock();\
66                 return;\
67         } \
68 }
69
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
71         if (!(cond)) {\
72                 TBM_LOG_E("'%s' failed.\n", #cond);\
73                 _tbm_surf_queue_mutex_unlock();\
74                 return val;\
75         } \
76 }
77
78 typedef enum _queue_node_type {
79         QUEUE_NODE_TYPE_NONE,
80         QUEUE_NODE_TYPE_DEQUEUE,
81         QUEUE_NODE_TYPE_ENQUEUE,
82         QUEUE_NODE_TYPE_ACQUIRE,
83         QUEUE_NODE_TYPE_RELEASE
84 } Queue_Node_Type;
85
86 typedef struct {
87         struct list_head head;
88         int count;
89 } queue;
90
91 typedef struct {
92         tbm_surface_h surface;
93
94         struct list_head item_link;
95         struct list_head link;
96
97         Queue_Node_Type type;
98
99         unsigned int priv_flags;        /*for each queue*/
100
101         int delete_pending;
102 } queue_node;
103
104 typedef struct {
105         struct list_head link;
106
107         tbm_surface_queue_notify_cb cb;
108         void *data;
109 } queue_notify;
110
111 typedef struct {
112         struct list_head link;
113
114         tbm_surface_queue_trace_cb cb;
115         void *data;
116 } queue_trace;
117
118 typedef struct _tbm_surface_queue_interface {
119         void (*init)(tbm_surface_queue_h queue);
120         void (*reset)(tbm_surface_queue_h queue);
121         void (*destroy)(tbm_surface_queue_h queue);
122         void (*need_attach)(tbm_surface_queue_h queue);
123
124         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
125         void (*release)(tbm_surface_queue_h queue, queue_node *node);
126         queue_node *(*dequeue)(tbm_surface_queue_h queue);
127         queue_node *(*acquire)(tbm_surface_queue_h queue);
128         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
129 } tbm_surface_queue_interface;
130
131 struct _tbm_surface_queue {
132         int width;
133         int height;
134         int format;
135         int queue_size;
136         int num_attached;
137
138         queue free_queue;
139         queue dirty_queue;
140         struct list_head list;
141
142         struct list_head destory_noti;
143         struct list_head dequeuable_noti;
144         struct list_head dequeue_noti;
145         struct list_head can_dequeue_noti;
146         struct list_head acquirable_noti;
147         struct list_head reset_noti;
148         struct list_head trace_noti;
149
150         pthread_mutex_t lock;
151         pthread_cond_t free_cond;
152         pthread_cond_t dirty_cond;
153
154         const tbm_surface_queue_interface *impl;
155         void *impl_data;
156
157         //For external buffer allocation
158         tbm_surface_alloc_cb alloc_cb;
159         tbm_surface_free_cb free_cb;
160         void *alloc_cb_data;
161
162         struct list_head item_link; /* link of surface queue */
163
164         int modes;
165 };
166
167 static bool
168 _tbm_surf_queue_mutex_init(void)
169 {
170         static bool tbm_surf_queue_mutex_init = false;
171
172         if (tbm_surf_queue_mutex_init)
173                 return true;
174
175         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
176                 TBM_LOG_E("fail: pthread_mutex_init\n");
177                 return false;
178         }
179
180         tbm_surf_queue_mutex_init = true;
181
182         return true;
183 }
184
185 static void
186 _tbm_surf_queue_mutex_lock(void)
187 {
188         if (!_tbm_surf_queue_mutex_init()) {
189                 TBM_LOG_E("fail: _tbm_surf_queue_mutex_init\n");
190                 return;
191         }
192
193         pthread_mutex_lock(&tbm_surf_queue_lock);
194 }
195
196 static void
197 _tbm_surf_queue_mutex_unlock(void)
198 {
199         pthread_mutex_unlock(&tbm_surf_queue_lock);
200 }
201
202 static void
203 _init_tbm_surf_queue_bufmgr(void)
204 {
205         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
206 }
207
208 static void
209 _deinit_tbm_surf_queue_bufmgr(void)
210 {
211         if (!g_surf_queue_bufmgr)
212                 return;
213
214         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
215         g_surf_queue_bufmgr = NULL;
216 }
217
218 static int
219 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
220 {
221         tbm_surface_queue_h old_data = NULL;
222
223         if (surface_queue == NULL) {
224                 TBM_LOG_E("error: surface_queue is NULL.\n");
225                 return 0;
226         }
227
228         if (g_surf_queue_bufmgr == NULL) {
229                 TBM_LOG_E("error: g_surf_queue_bufmgr is NULL.\n");
230                 return 0;
231         }
232
233         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
234                 TBM_LOG_E("error: surf_queue_list is empty\n");
235                 return 0;
236         }
237
238         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
239                                 item_link) {
240                 if (old_data == surface_queue) {
241                         TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
242                         return 1;
243                 }
244         }
245
246         TBM_LOG_E("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
247
248         return 0;
249 }
250
251 static queue_node *
252 _queue_node_create(void)
253 {
254         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
255
256         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
257
258         return node;
259 }
260
261 static void
262 _queue_node_delete(queue_node *node)
263 {
264         LIST_DEL(&node->item_link);
265         LIST_DEL(&node->link);
266         free(node);
267 }
268
269 static int
270 _queue_is_empty(queue *queue)
271 {
272         if (LIST_IS_EMPTY(&queue->head))
273                 return 1;
274
275         return 0;
276 }
277
278 static void
279 _queue_node_push_back(queue *queue, queue_node *node)
280 {
281         LIST_ADDTAIL(&node->item_link, &queue->head);
282         queue->count++;
283 }
284
285 static void
286 _queue_node_push_front(queue *queue, queue_node *node)
287 {
288         LIST_ADD(&node->item_link, &queue->head);
289         queue->count++;
290 }
291
292 static queue_node *
293 _queue_node_pop_front(queue *queue)
294 {
295         queue_node *node;
296
297         if (!queue->head.next) return NULL;
298         if (!queue->count) return NULL;
299
300         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
301
302         LIST_DELINIT(&node->item_link);
303         queue->count--;
304
305         return node;
306 }
307
308 static queue_node *
309 _queue_node_pop(queue *queue, queue_node *node)
310 {
311         LIST_DELINIT(&node->item_link);
312         queue->count--;
313
314         return node;
315 }
316
317 static queue_node *
318 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
319                 tbm_surface_h surface, int *out_type)
320 {
321         queue_node *node = NULL;
322
323         if (type == 0)
324                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
325         if (out_type)
326                 *out_type = 0;
327
328         if (type & FREE_QUEUE) {
329                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
330                                          item_link) {
331                         if (node->surface == surface) {
332                                 if (out_type)
333                                         *out_type = FREE_QUEUE;
334
335                                 return node;
336                         }
337                 }
338         }
339
340         if (type & DIRTY_QUEUE) {
341                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
342                                          item_link) {
343                         if (node->surface == surface) {
344                                 if (out_type)
345                                         *out_type = DIRTY_QUEUE;
346
347                                 return node;
348                         }
349                 }
350         }
351
352         if (type & NODE_LIST) {
353                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
354                         if (node->surface == surface) {
355                                 if (out_type)
356                                         *out_type = NODE_LIST;
357
358                                 return node;
359                         }
360                 }
361         }
362
363         TBM_LOG_E("fail to get the queue_node.\n");
364
365         return NULL;
366 }
367
368 static void
369 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
370 {
371         if (node->surface) {
372                 if (surface_queue->free_cb) {
373                         surface_queue->free_cb(surface_queue,
374                                         surface_queue->alloc_cb_data,
375                                         node->surface);
376                 }
377
378                 tbm_surface_destroy(node->surface);
379         }
380
381         _queue_node_delete(node);
382 }
383
384 static void
385 _queue_init(queue *queue)
386 {
387         LIST_INITHEAD(&queue->head);
388
389         queue->count = 0;
390 }
391
392 static void
393 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
394             void *data)
395 {
396         TBM_RETURN_IF_FAIL(cb != NULL);
397
398         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
399
400         TBM_RETURN_IF_FAIL(item != NULL);
401
402         LIST_INITHEAD(&item->link);
403         item->cb = cb;
404         item->data = data;
405
406         LIST_ADDTAIL(&item->link, list);
407 }
408
409 static void
410 _notify_remove(struct list_head *list,
411                tbm_surface_queue_notify_cb cb, void *data)
412 {
413         queue_notify *item = NULL, *tmp;
414
415         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
416                 if (item->cb == cb && item->data == data) {
417                         LIST_DEL(&item->link);
418                         free(item);
419                         return;
420                 }
421         }
422
423         TBM_LOG_E("Cannot find notifiy\n");
424 }
425
426 static void
427 _notify_remove_all(struct list_head *list)
428 {
429         queue_notify *item = NULL, *tmp;
430
431         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
432                 LIST_DEL(&item->link);
433                 free(item);
434         }
435 }
436
437 static void
438 _notify_emit(tbm_surface_queue_h surface_queue,
439              struct list_head *list)
440 {
441         queue_notify *item = NULL, *tmp;;
442
443         /*
444                 The item->cb is the outside function of the libtbm.
445                 The tbm user may/can remove the item of the list,
446                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
447         */
448         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
449                 item->cb(surface_queue, item->data);
450 }
451
452 static void
453 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
454             void *data)
455 {
456         TBM_RETURN_IF_FAIL(cb != NULL);
457
458         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
459
460         TBM_RETURN_IF_FAIL(item != NULL);
461
462         LIST_INITHEAD(&item->link);
463         item->cb = cb;
464         item->data = data;
465
466         LIST_ADDTAIL(&item->link, list);
467 }
468
469 static void
470 _trace_remove(struct list_head *list,
471                tbm_surface_queue_trace_cb cb, void *data)
472 {
473         queue_trace *item = NULL, *tmp;
474
475         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
476                 if (item->cb == cb && item->data == data) {
477                         LIST_DEL(&item->link);
478                         free(item);
479                         return;
480                 }
481         }
482
483         TBM_LOG_E("Cannot find notifiy\n");
484 }
485
486 static void
487 _trace_remove_all(struct list_head *list)
488 {
489         queue_trace *item = NULL, *tmp;
490
491         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
492                 LIST_DEL(&item->link);
493                 free(item);
494         }
495 }
496
497 static void
498 _trace_emit(tbm_surface_queue_h surface_queue,
499              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
500 {
501         queue_trace *item = NULL, *tmp;;
502
503         /*
504                 The item->cb is the outside function of the libtbm.
505                 The tbm user may/can remove the item of the list,
506                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
507         */
508         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
509                 item->cb(surface_queue, surface, trace, item->data);
510 }
511
512 static int
513 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
514 {
515         queue_node *node = NULL;
516         int count = 0;
517
518         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
519                 if (node->type == type)
520                         count++;
521         }
522
523         return count;
524 }
525
526 static void
527 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
528                           tbm_surface_h surface)
529 {
530         queue_node *node;
531
532         node = _queue_node_create();
533         TBM_RETURN_IF_FAIL(node != NULL);
534
535         tbm_surface_internal_ref(surface);
536         node->surface = surface;
537
538         LIST_ADDTAIL(&node->link, &surface_queue->list);
539         surface_queue->num_attached++;
540         _queue_node_push_back(&surface_queue->free_queue, node);
541 }
542
543 static void
544 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
545                           tbm_surface_h surface)
546 {
547         queue_node *node;
548         int queue_type;
549
550         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
551         if (node) {
552                 _queue_delete_node(surface_queue, node);
553                 surface_queue->num_attached--;
554         }
555 }
556
557 static void
558 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
559                            queue_node *node, int push_back)
560 {
561         if (push_back)
562                 _queue_node_push_back(&surface_queue->dirty_queue, node);
563         else
564                 _queue_node_push_front(&surface_queue->dirty_queue, node);
565 }
566
567 static queue_node *
568 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
569 {
570         queue_node *node;
571
572         node = _queue_node_pop_front(&surface_queue->free_queue);
573
574         return node;
575 }
576
577 static queue_node *
578 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
579 {
580         queue_node *node;
581
582         if (_queue_is_empty(&surface_queue->dirty_queue))
583                 return NULL;
584
585         node = _queue_node_pop_front(&surface_queue->dirty_queue);
586
587         return node;
588 }
589
590 static void
591 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
592                            queue_node *node, int push_back)
593 {
594         if (push_back)
595                 _queue_node_push_back(&surface_queue->free_queue, node);
596         else
597                 _queue_node_push_front(&surface_queue->free_queue, node);
598 }
599
600 static void
601 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
602                         int queue_size,
603                         int width, int height, int format,
604                         const tbm_surface_queue_interface *impl, void *data)
605 {
606         TBM_RETURN_IF_FAIL(surface_queue != NULL);
607         TBM_RETURN_IF_FAIL(impl != NULL);
608
609         if (!g_surf_queue_bufmgr)
610                 _init_tbm_surf_queue_bufmgr();
611
612         pthread_mutex_init(&surface_queue->lock, NULL);
613         pthread_cond_init(&surface_queue->free_cond, NULL);
614         pthread_cond_init(&surface_queue->dirty_cond, NULL);
615
616         surface_queue->queue_size = queue_size;
617         surface_queue->width = width;
618         surface_queue->height = height;
619         surface_queue->format = format;
620         surface_queue->impl = impl;
621         surface_queue->impl_data = data;
622         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
623
624         _queue_init(&surface_queue->free_queue);
625         _queue_init(&surface_queue->dirty_queue);
626         LIST_INITHEAD(&surface_queue->list);
627
628         LIST_INITHEAD(&surface_queue->destory_noti);
629         LIST_INITHEAD(&surface_queue->dequeuable_noti);
630         LIST_INITHEAD(&surface_queue->dequeue_noti);
631         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
632         LIST_INITHEAD(&surface_queue->acquirable_noti);
633         LIST_INITHEAD(&surface_queue->reset_noti);
634         LIST_INITHEAD(&surface_queue->trace_noti);
635
636         if (surface_queue->impl && surface_queue->impl->init)
637                 surface_queue->impl->init(surface_queue);
638
639         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
640 }
641
642 tbm_surface_queue_error_e
643 tbm_surface_queue_add_destroy_cb(
644         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
645         void *data)
646 {
647         _tbm_surf_queue_mutex_lock();
648
649         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
650                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
651
652         pthread_mutex_lock(&surface_queue->lock);
653
654         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
655
656         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
657
658         pthread_mutex_unlock(&surface_queue->lock);
659
660         _tbm_surf_queue_mutex_unlock();
661
662         return TBM_SURFACE_QUEUE_ERROR_NONE;
663 }
664
665 tbm_surface_queue_error_e
666 tbm_surface_queue_remove_destroy_cb(
667         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
668         void *data)
669 {
670         _tbm_surf_queue_mutex_lock();
671
672         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
673                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
674
675         pthread_mutex_lock(&surface_queue->lock);
676
677         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
678
679         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
680
681         pthread_mutex_unlock(&surface_queue->lock);
682
683         _tbm_surf_queue_mutex_unlock();
684
685         return TBM_SURFACE_QUEUE_ERROR_NONE;
686 }
687
688 tbm_surface_queue_error_e
689 tbm_surface_queue_add_dequeuable_cb(
690         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
691         void *data)
692 {
693         _tbm_surf_queue_mutex_lock();
694
695         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
696                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
697
698         pthread_mutex_lock(&surface_queue->lock);
699
700         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
701
702         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
703
704         pthread_mutex_unlock(&surface_queue->lock);
705
706         _tbm_surf_queue_mutex_unlock();
707
708         return TBM_SURFACE_QUEUE_ERROR_NONE;
709 }
710
711 tbm_surface_queue_error_e
712 tbm_surface_queue_remove_dequeuable_cb(
713         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
714         void *data)
715 {
716         _tbm_surf_queue_mutex_lock();
717
718         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
719                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
720
721         pthread_mutex_lock(&surface_queue->lock);
722
723         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
724
725         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
726
727         pthread_mutex_unlock(&surface_queue->lock);
728
729         _tbm_surf_queue_mutex_unlock();
730
731         return TBM_SURFACE_QUEUE_ERROR_NONE;
732 }
733
734 tbm_surface_queue_error_e
735 tbm_surface_queue_add_dequeue_cb(
736         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
737         void *data)
738 {
739         _tbm_surf_queue_mutex_lock();
740
741         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
742                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
743
744         pthread_mutex_lock(&surface_queue->lock);
745
746         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
747
748         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
749
750         pthread_mutex_unlock(&surface_queue->lock);
751
752         _tbm_surf_queue_mutex_unlock();
753
754         return TBM_SURFACE_QUEUE_ERROR_NONE;
755 }
756
757 tbm_surface_queue_error_e
758 tbm_surface_queue_remove_dequeue_cb(
759         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
760         void *data)
761 {
762         _tbm_surf_queue_mutex_lock();
763
764         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
765                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
766
767         pthread_mutex_lock(&surface_queue->lock);
768
769         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
770
771         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
772
773         pthread_mutex_unlock(&surface_queue->lock);
774
775         _tbm_surf_queue_mutex_unlock();
776
777         return TBM_SURFACE_QUEUE_ERROR_NONE;
778 }
779
780 tbm_surface_queue_error_e
781 tbm_surface_queue_add_can_dequeue_cb(
782         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
783         void *data)
784 {
785         _tbm_surf_queue_mutex_lock();
786
787         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
788                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
789
790         pthread_mutex_lock(&surface_queue->lock);
791
792         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
793
794         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
795
796         pthread_mutex_unlock(&surface_queue->lock);
797
798         _tbm_surf_queue_mutex_unlock();
799
800         return TBM_SURFACE_QUEUE_ERROR_NONE;
801 }
802
803 tbm_surface_queue_error_e
804 tbm_surface_queue_remove_can_dequeue_cb(
805         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
806         void *data)
807 {
808         _tbm_surf_queue_mutex_lock();
809
810         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
811                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
812
813         pthread_mutex_lock(&surface_queue->lock);
814
815         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
816
817         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
818
819         pthread_mutex_unlock(&surface_queue->lock);
820
821         _tbm_surf_queue_mutex_unlock();
822
823         return TBM_SURFACE_QUEUE_ERROR_NONE;
824 }
825
826 tbm_surface_queue_error_e
827 tbm_surface_queue_add_acquirable_cb(
828         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
829         void *data)
830 {
831         _tbm_surf_queue_mutex_lock();
832
833         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
834                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
835
836         pthread_mutex_lock(&surface_queue->lock);
837
838         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
839
840         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
841
842         pthread_mutex_unlock(&surface_queue->lock);
843
844         _tbm_surf_queue_mutex_unlock();
845
846         return TBM_SURFACE_QUEUE_ERROR_NONE;
847 }
848
849 tbm_surface_queue_error_e
850 tbm_surface_queue_remove_acquirable_cb(
851         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
852         void *data)
853 {
854         _tbm_surf_queue_mutex_lock();
855
856         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
857                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
858
859         pthread_mutex_lock(&surface_queue->lock);
860
861         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
862
863         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
864
865         pthread_mutex_unlock(&surface_queue->lock);
866
867         _tbm_surf_queue_mutex_unlock();
868
869         return TBM_SURFACE_QUEUE_ERROR_NONE;
870 }
871
872 tbm_surface_queue_error_e
873 tbm_surface_queue_add_trace_cb(
874         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
875         void *data)
876 {
877         _tbm_surf_queue_mutex_lock();
878
879         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
880                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
881
882         pthread_mutex_lock(&surface_queue->lock);
883
884         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
885
886         _trace_add(&surface_queue->trace_noti, trace_cb, data);
887
888         pthread_mutex_unlock(&surface_queue->lock);
889
890         _tbm_surf_queue_mutex_unlock();
891
892         return TBM_SURFACE_QUEUE_ERROR_NONE;
893 }
894
895 tbm_surface_queue_error_e
896 tbm_surface_queue_remove_trace_cb(
897         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
898         void *data)
899 {
900         _tbm_surf_queue_mutex_lock();
901
902         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
903                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
904
905         pthread_mutex_lock(&surface_queue->lock);
906
907         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
908
909         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
910
911         pthread_mutex_unlock(&surface_queue->lock);
912
913         _tbm_surf_queue_mutex_unlock();
914
915         return TBM_SURFACE_QUEUE_ERROR_NONE;
916 }
917
918 tbm_surface_queue_error_e
919 tbm_surface_queue_set_alloc_cb(
920         tbm_surface_queue_h surface_queue,
921         tbm_surface_alloc_cb alloc_cb,
922         tbm_surface_free_cb free_cb,
923         void *data)
924 {
925         _tbm_surf_queue_mutex_lock();
926
927         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
928                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
929
930         pthread_mutex_lock(&surface_queue->lock);
931
932         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
933
934         surface_queue->alloc_cb = alloc_cb;
935         surface_queue->free_cb = free_cb;
936         surface_queue->alloc_cb_data = data;
937
938         pthread_mutex_unlock(&surface_queue->lock);
939
940         _tbm_surf_queue_mutex_unlock();
941
942         return TBM_SURFACE_QUEUE_ERROR_NONE;
943 }
944
945 int
946 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
947 {
948         int width;
949
950         _tbm_surf_queue_mutex_lock();
951
952         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
953
954         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
955
956         width = surface_queue->width;
957
958         _tbm_surf_queue_mutex_unlock();
959
960         return width;
961 }
962
963 int
964 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
965 {
966         int height;
967
968         _tbm_surf_queue_mutex_lock();
969
970         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
971
972         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
973
974         height = surface_queue->height;
975
976         _tbm_surf_queue_mutex_unlock();
977
978         return height;
979 }
980
981 int
982 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
983 {
984         int format;
985
986         _tbm_surf_queue_mutex_lock();
987
988         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
989
990         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
991
992         format = surface_queue->format;
993
994         _tbm_surf_queue_mutex_unlock();
995
996         return format;
997 }
998
999 int
1000 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1001 {
1002         int queue_size;
1003
1004         _tbm_surf_queue_mutex_lock();
1005
1006         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1007
1008         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1009
1010         queue_size = surface_queue->queue_size;
1011
1012         _tbm_surf_queue_mutex_unlock();
1013
1014         return queue_size;
1015 }
1016
1017 tbm_surface_queue_error_e
1018 tbm_surface_queue_add_reset_cb(
1019         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1020         void *data)
1021 {
1022         _tbm_surf_queue_mutex_lock();
1023
1024         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1025                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1026
1027         pthread_mutex_lock(&surface_queue->lock);
1028
1029         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1030
1031         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1032
1033         pthread_mutex_unlock(&surface_queue->lock);
1034
1035         _tbm_surf_queue_mutex_unlock();
1036
1037         return TBM_SURFACE_QUEUE_ERROR_NONE;
1038 }
1039
1040 tbm_surface_queue_error_e
1041 tbm_surface_queue_remove_reset_cb(
1042         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1043         void *data)
1044 {
1045         _tbm_surf_queue_mutex_lock();
1046
1047         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1048                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1049
1050         pthread_mutex_lock(&surface_queue->lock);
1051
1052         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1053
1054         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1055
1056         pthread_mutex_unlock(&surface_queue->lock);
1057
1058         _tbm_surf_queue_mutex_unlock();
1059
1060         return TBM_SURFACE_QUEUE_ERROR_NONE;
1061 }
1062
1063 tbm_surface_queue_error_e
1064 tbm_surface_queue_enqueue(tbm_surface_queue_h
1065                           surface_queue, tbm_surface_h surface)
1066 {
1067         queue_node *node;
1068         int queue_type;
1069
1070         _tbm_surf_queue_mutex_lock();
1071
1072         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1073                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1074         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1075                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1076
1077         if (b_dump_queue)
1078                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1079
1080         pthread_mutex_lock(&surface_queue->lock);
1081
1082         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1083
1084         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1085         if (node == NULL || queue_type != NODE_LIST) {
1086                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1087                         node, queue_type);
1088                 pthread_mutex_unlock(&surface_queue->lock);
1089
1090                 _tbm_surf_queue_mutex_unlock();
1091
1092                 if (!node)
1093                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1094                 else
1095                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1096         }
1097
1098         if (surface_queue->impl && surface_queue->impl->enqueue)
1099                 surface_queue->impl->enqueue(surface_queue, node);
1100         else
1101                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1102
1103         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1104                 TBM_LOG_E("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1105                 pthread_mutex_unlock(&surface_queue->lock);
1106
1107                 _tbm_surf_queue_mutex_unlock();
1108                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1109         }
1110
1111         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1112
1113         pthread_mutex_unlock(&surface_queue->lock);
1114         pthread_cond_signal(&surface_queue->dirty_cond);
1115
1116         _tbm_surf_queue_mutex_unlock();
1117
1118         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1119
1120         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1121
1122         return TBM_SURFACE_QUEUE_ERROR_NONE;
1123 }
1124
1125 tbm_surface_queue_error_e
1126 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1127                           surface_queue, tbm_surface_h surface)
1128 {
1129         queue_node *node;
1130         int queue_type;
1131
1132         _tbm_surf_queue_mutex_lock();
1133
1134         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1135                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1136         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1137                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1138
1139         pthread_mutex_lock(&surface_queue->lock);
1140
1141         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1142
1143         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1144         if (node == NULL || queue_type != NODE_LIST) {
1145                 TBM_LOG_E("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1146                         node, queue_type);
1147                 pthread_mutex_unlock(&surface_queue->lock);
1148
1149                 _tbm_surf_queue_mutex_unlock();
1150                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1151         }
1152
1153         if (node->delete_pending) {
1154                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1155
1156                 _queue_delete_node(surface_queue, node);
1157
1158                 pthread_mutex_unlock(&surface_queue->lock);
1159
1160                 _tbm_surf_queue_mutex_unlock();
1161
1162                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1163
1164                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1165         }
1166
1167         if (surface_queue->queue_size < surface_queue->num_attached) {
1168                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1169
1170                 if (surface_queue->impl && surface_queue->impl->need_detach)
1171                         surface_queue->impl->need_detach(surface_queue, node);
1172                 else
1173                         _tbm_surface_queue_detach(surface_queue, surface);
1174
1175                 pthread_mutex_unlock(&surface_queue->lock);
1176
1177                 _tbm_surf_queue_mutex_unlock();
1178
1179                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1180
1181                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1182         }
1183
1184         if (surface_queue->impl && surface_queue->impl->release)
1185                 surface_queue->impl->release(surface_queue, node);
1186         else
1187                 _tbm_surface_queue_release(surface_queue, node, 1);
1188
1189         if (_queue_is_empty(&surface_queue->free_queue)) {
1190                 pthread_mutex_unlock(&surface_queue->lock);
1191
1192                 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1193                 _tbm_surf_queue_mutex_unlock();
1194                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1195         }
1196
1197         node->type = QUEUE_NODE_TYPE_RELEASE;
1198
1199         pthread_mutex_unlock(&surface_queue->lock);
1200         pthread_cond_signal(&surface_queue->free_cond);
1201
1202         _tbm_surf_queue_mutex_unlock();
1203
1204         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1205
1206         return TBM_SURFACE_QUEUE_ERROR_NONE;
1207 }
1208
1209 tbm_surface_queue_error_e
1210 tbm_surface_queue_dequeue(tbm_surface_queue_h
1211                           surface_queue, tbm_surface_h *surface)
1212 {
1213         queue_node *node;
1214
1215         _tbm_surf_queue_mutex_lock();
1216
1217         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1218                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1219         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1220                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1221
1222         *surface = NULL;
1223
1224         pthread_mutex_lock(&surface_queue->lock);
1225
1226         if (_queue_is_empty(&surface_queue->free_queue)) {
1227                 if (surface_queue->impl && surface_queue->impl->need_attach)
1228                         surface_queue->impl->need_attach(surface_queue);
1229
1230                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1231                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1232                         pthread_mutex_unlock(&surface_queue->lock);
1233                         _tbm_surf_queue_mutex_unlock();
1234                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1235                 }
1236         }
1237
1238         if (surface_queue->impl && surface_queue->impl->dequeue)
1239                 node = surface_queue->impl->dequeue(surface_queue);
1240         else
1241                 node = _tbm_surface_queue_dequeue(surface_queue);
1242
1243         if (node == NULL || node->surface == NULL) {
1244                 TBM_LOG_E("_queue_node_pop_front failed\n");
1245                 pthread_mutex_unlock(&surface_queue->lock);
1246
1247                 _tbm_surf_queue_mutex_unlock();
1248                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1249         }
1250
1251         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1252         *surface = node->surface;
1253
1254         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1255
1256         pthread_mutex_unlock(&surface_queue->lock);
1257
1258         _tbm_surf_queue_mutex_unlock();
1259
1260         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1261
1262         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1263
1264         return TBM_SURFACE_QUEUE_ERROR_NONE;
1265 }
1266
1267 int
1268 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1269 {
1270         _tbm_surf_queue_mutex_lock();
1271
1272         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1273
1274         _tbm_surf_queue_mutex_unlock();
1275
1276         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1277
1278         _tbm_surf_queue_mutex_lock();
1279
1280         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1281
1282         pthread_mutex_lock(&surface_queue->lock);
1283
1284         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1285
1286         if (_queue_is_empty(&surface_queue->free_queue)) {
1287                 if (surface_queue->impl && surface_queue->impl->need_attach)
1288                         surface_queue->impl->need_attach(surface_queue);
1289
1290                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1291                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1292                         pthread_mutex_unlock(&surface_queue->lock);
1293                         _tbm_surf_queue_mutex_unlock();
1294                         return 0;
1295                 }
1296         }
1297
1298         if (!_queue_is_empty(&surface_queue->free_queue)) {
1299                 pthread_mutex_unlock(&surface_queue->lock);
1300                 _tbm_surf_queue_mutex_unlock();
1301                 return 1;
1302         }
1303
1304         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1305                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1306                 _tbm_surf_queue_mutex_unlock();
1307                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1308                 pthread_mutex_unlock(&surface_queue->lock);
1309                 return 1;
1310         }
1311
1312         pthread_mutex_unlock(&surface_queue->lock);
1313         _tbm_surf_queue_mutex_unlock();
1314         return 0;
1315 }
1316
1317 tbm_surface_queue_error_e
1318 tbm_surface_queue_release(tbm_surface_queue_h
1319                           surface_queue, tbm_surface_h surface)
1320 {
1321         queue_node *node;
1322         int queue_type;
1323
1324         _tbm_surf_queue_mutex_lock();
1325
1326         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1327                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1328         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1329                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1330
1331         pthread_mutex_lock(&surface_queue->lock);
1332
1333         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1334
1335         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1336         if (node == NULL || queue_type != NODE_LIST) {
1337                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1338                         node, queue_type);
1339                 pthread_mutex_unlock(&surface_queue->lock);
1340
1341                 _tbm_surf_queue_mutex_unlock();
1342
1343                 if (!node)
1344                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1345                 else
1346                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1347         }
1348
1349         if (node->delete_pending) {
1350                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1351
1352                 _queue_delete_node(surface_queue, node);
1353
1354                 pthread_mutex_unlock(&surface_queue->lock);
1355
1356                 _tbm_surf_queue_mutex_unlock();
1357
1358                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1359
1360                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1361         }
1362
1363         if (surface_queue->queue_size < surface_queue->num_attached) {
1364                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1365
1366                 if (surface_queue->impl && surface_queue->impl->need_detach)
1367                         surface_queue->impl->need_detach(surface_queue, node);
1368                 else
1369                         _tbm_surface_queue_detach(surface_queue, surface);
1370
1371                 pthread_mutex_unlock(&surface_queue->lock);
1372
1373                 _tbm_surf_queue_mutex_unlock();
1374
1375                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1376
1377                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1378         }
1379
1380         if (surface_queue->impl && surface_queue->impl->release)
1381                 surface_queue->impl->release(surface_queue, node);
1382         else
1383                 _tbm_surface_queue_release(surface_queue, node, 1);
1384
1385         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1386                 TBM_LOG_E("release surface(%p) but surface isn't present in the free_queue\n", surface);
1387                 pthread_mutex_unlock(&surface_queue->lock);
1388
1389                 _tbm_surf_queue_mutex_unlock();
1390                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1391         }
1392
1393         node->type = QUEUE_NODE_TYPE_RELEASE;
1394
1395         pthread_mutex_unlock(&surface_queue->lock);
1396         pthread_cond_signal(&surface_queue->free_cond);
1397
1398         _tbm_surf_queue_mutex_unlock();
1399
1400         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1401
1402         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1403
1404         return TBM_SURFACE_QUEUE_ERROR_NONE;
1405 }
1406
1407 tbm_surface_queue_error_e
1408 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1409                         surface_queue, tbm_surface_h surface)
1410 {
1411         queue_node *node;
1412         int queue_type;
1413
1414         _tbm_surf_queue_mutex_lock();
1415
1416         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1417                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1418         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1419                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1420
1421         pthread_mutex_lock(&surface_queue->lock);
1422
1423         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1424
1425         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1426         if (node == NULL || queue_type != NODE_LIST) {
1427                 TBM_LOG_E("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1428                         node, queue_type);
1429                 pthread_mutex_unlock(&surface_queue->lock);
1430
1431                 _tbm_surf_queue_mutex_unlock();
1432                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1433         }
1434
1435         if (surface_queue->impl && surface_queue->impl->enqueue)
1436                 surface_queue->impl->enqueue(surface_queue, node);
1437         else
1438                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1439
1440         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1441                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1442                 pthread_mutex_unlock(&surface_queue->lock);
1443
1444                 _tbm_surf_queue_mutex_unlock();
1445                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1446         }
1447
1448         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1449
1450         pthread_mutex_unlock(&surface_queue->lock);
1451         pthread_cond_signal(&surface_queue->dirty_cond);
1452
1453         _tbm_surf_queue_mutex_unlock();
1454
1455         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1456
1457         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1458
1459         return TBM_SURFACE_QUEUE_ERROR_NONE;
1460 }
1461
1462 tbm_surface_queue_error_e
1463 tbm_surface_queue_acquire(tbm_surface_queue_h
1464                           surface_queue, tbm_surface_h *surface)
1465 {
1466         queue_node *node;
1467
1468         _tbm_surf_queue_mutex_lock();
1469
1470         *surface = NULL;
1471
1472         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1473                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1474         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1475                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1476
1477         pthread_mutex_lock(&surface_queue->lock);
1478
1479         if (surface_queue->impl && surface_queue->impl->acquire)
1480                 node = surface_queue->impl->acquire(surface_queue);
1481         else
1482                 node = _tbm_surface_queue_acquire(surface_queue);
1483
1484         if (node == NULL || node->surface == NULL) {
1485                 TBM_LOG_E("_queue_node_pop_front failed\n");
1486                 pthread_mutex_unlock(&surface_queue->lock);
1487
1488                 _tbm_surf_queue_mutex_unlock();
1489                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1490         }
1491
1492         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1493
1494         *surface = node->surface;
1495
1496         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1497
1498         pthread_mutex_unlock(&surface_queue->lock);
1499
1500         _tbm_surf_queue_mutex_unlock();
1501
1502         if (b_dump_queue)
1503                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1504
1505         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1506
1507         return TBM_SURFACE_QUEUE_ERROR_NONE;
1508 }
1509
1510 int
1511 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1512 {
1513         _tbm_surf_queue_mutex_lock();
1514
1515         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1516
1517         pthread_mutex_lock(&surface_queue->lock);
1518
1519         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1520
1521         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1522                 pthread_mutex_unlock(&surface_queue->lock);
1523                 _tbm_surf_queue_mutex_unlock();
1524                 return 1;
1525         }
1526
1527         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1528                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1529                 _tbm_surf_queue_mutex_unlock();
1530                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1531                 pthread_mutex_unlock(&surface_queue->lock);
1532                 return 1;
1533         }
1534
1535         pthread_mutex_unlock(&surface_queue->lock);
1536         _tbm_surf_queue_mutex_unlock();
1537         return 0;
1538 }
1539
1540 void
1541 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1542 {
1543         queue_node *node = NULL, *tmp;
1544
1545         _tbm_surf_queue_mutex_lock();
1546
1547         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1548
1549         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1550
1551         LIST_DEL(&surface_queue->item_link);
1552
1553         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1554                 _queue_delete_node(surface_queue, node);
1555
1556         if (surface_queue->impl && surface_queue->impl->destroy)
1557                 surface_queue->impl->destroy(surface_queue);
1558
1559         _notify_emit(surface_queue, &surface_queue->destory_noti);
1560
1561         _notify_remove_all(&surface_queue->destory_noti);
1562         _notify_remove_all(&surface_queue->dequeuable_noti);
1563         _notify_remove_all(&surface_queue->dequeue_noti);
1564         _notify_remove_all(&surface_queue->can_dequeue_noti);
1565         _notify_remove_all(&surface_queue->acquirable_noti);
1566         _notify_remove_all(&surface_queue->reset_noti);
1567         _trace_remove_all(&surface_queue->trace_noti);
1568
1569         pthread_mutex_destroy(&surface_queue->lock);
1570
1571         free(surface_queue);
1572
1573         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1574                 _deinit_tbm_surf_queue_bufmgr();
1575
1576         _tbm_surf_queue_mutex_unlock();
1577 }
1578
1579 tbm_surface_queue_error_e
1580 tbm_surface_queue_reset(tbm_surface_queue_h
1581                         surface_queue, int width, int height, int format)
1582 {
1583         queue_node *node = NULL, *tmp;
1584
1585         _tbm_surf_queue_mutex_lock();
1586
1587         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1588                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1589
1590         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1591
1592         if (width == surface_queue->width && height == surface_queue->height &&
1593                 format == surface_queue->format) {
1594                 _tbm_surf_queue_mutex_unlock();
1595                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1596         }
1597
1598         pthread_mutex_lock(&surface_queue->lock);
1599
1600         surface_queue->width = width;
1601         surface_queue->height = height;
1602         surface_queue->format = format;
1603
1604         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1605                 /* Destory surface and Push to free_queue */
1606                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1607                         _queue_delete_node(surface_queue, node);
1608
1609                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1610                         node->delete_pending = 1;
1611         } else {
1612                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1613                         _queue_delete_node(surface_queue, node);
1614
1615                 _queue_init(&surface_queue->dirty_queue);
1616                 LIST_INITHEAD(&surface_queue->list);
1617         }
1618
1619         /* Reset queue */
1620         _queue_init(&surface_queue->free_queue);
1621
1622         surface_queue->num_attached = 0;
1623
1624         if (surface_queue->impl && surface_queue->impl->reset)
1625                 surface_queue->impl->reset(surface_queue);
1626
1627         pthread_mutex_unlock(&surface_queue->lock);
1628         pthread_cond_signal(&surface_queue->free_cond);
1629
1630         _tbm_surf_queue_mutex_unlock();
1631
1632         _notify_emit(surface_queue, &surface_queue->reset_noti);
1633
1634         return TBM_SURFACE_QUEUE_ERROR_NONE;
1635 }
1636
1637 tbm_surface_queue_error_e
1638 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1639 {
1640         _tbm_surf_queue_mutex_lock();
1641
1642         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1643                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1644
1645         _tbm_surf_queue_mutex_unlock();
1646
1647         _notify_emit(surface_queue, &surface_queue->reset_noti);
1648
1649         return TBM_SURFACE_QUEUE_ERROR_NONE;
1650 }
1651
1652 tbm_surface_queue_error_e
1653 tbm_surface_queue_set_size(tbm_surface_queue_h
1654                         surface_queue, int queue_size, int flush)
1655 {
1656         queue_node *node = NULL, *tmp;
1657
1658         _tbm_surf_queue_mutex_lock();
1659
1660         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1661                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1662         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1663                                         TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1664
1665         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1666
1667         if ((surface_queue->queue_size == queue_size) && !flush) {
1668                 _tbm_surf_queue_mutex_unlock();
1669                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1670         }
1671
1672         pthread_mutex_lock(&surface_queue->lock);
1673
1674         if (flush) {
1675                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1676                         /* Destory surface and Push to free_queue */
1677                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1678                                 _queue_delete_node(surface_queue, node);
1679
1680                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1681                                 node->delete_pending = 1;
1682                 } else {
1683                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1684                                 _queue_delete_node(surface_queue, node);
1685
1686                         _queue_init(&surface_queue->dirty_queue);
1687                         LIST_INITHEAD(&surface_queue->list);
1688                 }
1689
1690                 /* Reset queue */
1691                 _queue_init(&surface_queue->free_queue);
1692
1693                 surface_queue->num_attached = 0;
1694                 surface_queue->queue_size = queue_size;
1695
1696                 if (surface_queue->impl && surface_queue->impl->reset)
1697                         surface_queue->impl->reset(surface_queue);
1698
1699                 pthread_mutex_unlock(&surface_queue->lock);
1700                 pthread_cond_signal(&surface_queue->free_cond);
1701
1702                 _tbm_surf_queue_mutex_unlock();
1703
1704                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1705
1706                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1707         } else {
1708                 if (surface_queue->queue_size > queue_size) {
1709                         int need_del = surface_queue->queue_size - queue_size;
1710
1711                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1712                                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1713
1714                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1715                                         surface_queue->impl->need_detach(surface_queue, node);
1716                                 else
1717                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1718
1719                                 need_del--;
1720                                 if (need_del == 0)
1721                                         break;
1722                         }
1723                 }
1724
1725                 surface_queue->queue_size = queue_size;
1726
1727                 pthread_mutex_unlock(&surface_queue->lock);
1728
1729                 _tbm_surf_queue_mutex_unlock();
1730
1731                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1732         }
1733 }
1734
1735 tbm_surface_queue_error_e
1736 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1737 {
1738         queue_node *node = NULL;
1739
1740         _tbm_surf_queue_mutex_lock();
1741
1742         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1743                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1744
1745         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1746
1747         if (surface_queue->num_attached == 0) {
1748                 _tbm_surf_queue_mutex_unlock();
1749                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1750         }
1751
1752         pthread_mutex_lock(&surface_queue->lock);
1753
1754         /* Destory surface in free_queue */
1755         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1756                 if (surface_queue->impl && surface_queue->impl->need_detach)
1757                         surface_queue->impl->need_detach(surface_queue, node);
1758                 else
1759                         _tbm_surface_queue_detach(surface_queue, node->surface);
1760         }
1761
1762         /* Reset queue */
1763         _queue_init(&surface_queue->free_queue);
1764
1765         pthread_mutex_unlock(&surface_queue->lock);
1766         _tbm_surf_queue_mutex_unlock();
1767
1768         return TBM_SURFACE_QUEUE_ERROR_NONE;
1769 }
1770
1771 tbm_surface_queue_error_e
1772 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1773 {
1774         queue_node *node = NULL, *tmp;
1775
1776         _tbm_surf_queue_mutex_lock();
1777
1778         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1779                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1780
1781         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1782
1783         if (surface_queue->num_attached == 0) {
1784                 _tbm_surf_queue_mutex_unlock();
1785                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1786         }
1787
1788         pthread_mutex_lock(&surface_queue->lock);
1789
1790         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1791                 /* Destory surface and Push to free_queue */
1792                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1793                         _queue_delete_node(surface_queue, node);
1794
1795                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1796                         node->delete_pending = 1;
1797         } else {
1798                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1799                         _queue_delete_node(surface_queue, node);
1800
1801                 _queue_init(&surface_queue->dirty_queue);
1802                 LIST_INITHEAD(&surface_queue->list);
1803         }
1804
1805         /* Reset queue */
1806         _queue_init(&surface_queue->free_queue);
1807
1808         surface_queue->num_attached = 0;
1809
1810         if (surface_queue->impl && surface_queue->impl->reset)
1811                 surface_queue->impl->reset(surface_queue);
1812
1813         pthread_mutex_unlock(&surface_queue->lock);
1814         pthread_cond_signal(&surface_queue->free_cond);
1815
1816         _tbm_surf_queue_mutex_unlock();
1817
1818         _notify_emit(surface_queue, &surface_queue->reset_noti);
1819
1820         return TBM_SURFACE_QUEUE_ERROR_NONE;
1821 }
1822
1823 tbm_surface_queue_error_e
1824 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1825                         tbm_surface_h *surfaces, int *num)
1826 {
1827         queue_node *node = NULL;
1828
1829         _tbm_surf_queue_mutex_lock();
1830
1831         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1832                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1833         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1834                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1835
1836         *num = 0;
1837
1838         pthread_mutex_lock(&surface_queue->lock);
1839
1840         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1841                 if (surfaces)
1842                         surfaces[*num] = node->surface;
1843
1844                 *num = *num + 1;
1845         }
1846
1847         pthread_mutex_unlock(&surface_queue->lock);
1848
1849         _tbm_surf_queue_mutex_unlock();
1850
1851         return TBM_SURFACE_QUEUE_ERROR_NONE;
1852 }
1853
1854 tbm_surface_queue_error_e
1855 tbm_surface_queue_get_trace_surface_num(
1856                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1857 {
1858         _tbm_surf_queue_mutex_lock();
1859
1860         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1861                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1862         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1863                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1864
1865         *num = 0;
1866
1867         pthread_mutex_lock(&surface_queue->lock);
1868
1869         switch (trace) {
1870         case TBM_SURFACE_QUEUE_TRACE_NONE:
1871                 *num = 0;
1872                 break;
1873         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1874                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1875                 break;
1876         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1877                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1878                 break;
1879         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1880                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1881                 break;
1882         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1883                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1884                 break;
1885         default:
1886                 break;
1887         }
1888
1889         pthread_mutex_unlock(&surface_queue->lock);
1890
1891         _tbm_surf_queue_mutex_unlock();
1892
1893         return TBM_SURFACE_QUEUE_ERROR_NONE;
1894 }
1895
1896 typedef struct {
1897         int flags;
1898 } tbm_queue_default;
1899
1900 static void
1901 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1902 {
1903         free(surface_queue->impl_data);
1904 }
1905
1906 static void
1907 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1908 {
1909         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1910         tbm_surface_h surface;
1911
1912         if (surface_queue->queue_size == surface_queue->num_attached)
1913                 return;
1914
1915         if (surface_queue->alloc_cb) {
1916                 pthread_mutex_unlock(&surface_queue->lock);
1917                 _tbm_surf_queue_mutex_unlock();
1918                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1919                 _tbm_surf_queue_mutex_lock();
1920                 pthread_mutex_lock(&surface_queue->lock);
1921
1922                 /* silent return */
1923                 if (!surface)
1924                         return;
1925
1926                 tbm_surface_internal_ref(surface);
1927         } else {
1928                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1929                                 surface_queue->height,
1930                                 surface_queue->format,
1931                                 data->flags);
1932                 TBM_RETURN_IF_FAIL(surface != NULL);
1933         }
1934
1935         _tbm_surface_queue_attach(surface_queue, surface);
1936         tbm_surface_internal_unref(surface);
1937 }
1938
1939 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1940         NULL,                           /*__tbm_queue_default_init*/
1941         NULL,                           /*__tbm_queue_default_reset*/
1942         __tbm_queue_default_destroy,
1943         __tbm_queue_default_need_attach,
1944         NULL,                           /*__tbm_queue_default_enqueue*/
1945         NULL,                           /*__tbm_queue_default_release*/
1946         NULL,                           /*__tbm_queue_default_dequeue*/
1947         NULL,                           /*__tbm_queue_default_acquire*/
1948         NULL,                           /*__tbm_queue_default_need_detach*/
1949 };
1950
1951 tbm_surface_queue_h
1952 tbm_surface_queue_create(int queue_size, int width,
1953                          int height, int format, int flags)
1954 {
1955         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1956         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1957         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1958         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1959
1960         _tbm_surf_queue_mutex_lock();
1961
1962         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1963                                             sizeof(struct _tbm_surface_queue));
1964         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1965
1966         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1967
1968         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1969                                   sizeof(tbm_queue_default));
1970         if (data == NULL) {
1971                 TBM_LOG_E("cannot allocate the tbm_queue_default.\n");
1972                 free(surface_queue);
1973                 _tbm_surf_queue_mutex_unlock();
1974                 return NULL;
1975         }
1976
1977         data->flags = flags;
1978         _tbm_surface_queue_init(surface_queue,
1979                                 queue_size,
1980                                 width, height, format,
1981                                 &tbm_queue_default_impl, data);
1982
1983         _tbm_surf_queue_mutex_unlock();
1984
1985         return surface_queue;
1986 }
1987
1988 typedef struct {
1989         int flags;
1990         queue dequeue_list;
1991 } tbm_queue_sequence;
1992
1993 static void
1994 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
1995 {
1996         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1997
1998         _queue_init(&data->dequeue_list);
1999 }
2000
2001 static void
2002 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2003 {
2004         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2005
2006         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2007                 return;
2008
2009         _queue_init(&data->dequeue_list);
2010 }
2011
2012 static void
2013 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2014 {
2015         free(surface_queue->impl_data);
2016 }
2017
2018 static void
2019 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2020 {
2021         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2022         tbm_surface_h surface;
2023
2024         if (surface_queue->queue_size == surface_queue->num_attached)
2025                 return;
2026
2027         if (surface_queue->alloc_cb) {
2028                 pthread_mutex_unlock(&surface_queue->lock);
2029                 _tbm_surf_queue_mutex_unlock();
2030                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2031                 _tbm_surf_queue_mutex_lock();
2032                 pthread_mutex_lock(&surface_queue->lock);
2033
2034                 /* silent return */
2035                 if (!surface)
2036                         return;
2037
2038                 tbm_surface_internal_ref(surface);
2039         } else {
2040                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2041                                 surface_queue->height,
2042                                 surface_queue->format,
2043                                 data->flags);
2044                 TBM_RETURN_IF_FAIL(surface != NULL);
2045         }
2046
2047         _tbm_surface_queue_attach(surface_queue, surface);
2048         tbm_surface_internal_unref(surface);
2049 }
2050
2051 static void
2052 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2053                              queue_node *node)
2054 {
2055         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2056         queue_node *first = NULL;
2057
2058         first = container_of(data->dequeue_list.head.next, first, item_link);
2059         if (first != node) {
2060                 return;
2061         }
2062
2063         node->priv_flags = 0;
2064
2065         _queue_node_pop(&data->dequeue_list, node);
2066         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2067 }
2068
2069 static void
2070 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2071                                 queue_node *node)
2072 {
2073         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2074
2075         if (node->priv_flags) {
2076                 node->priv_flags = 0;
2077                 _queue_node_pop(&data->dequeue_list, node);
2078         }
2079
2080         _tbm_surface_queue_release(surface_queue, node, 1);
2081 }
2082
2083 static queue_node *
2084 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2085                              surface_queue)
2086 {
2087         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2088         queue_node *node;
2089
2090         node = _tbm_surface_queue_dequeue(surface_queue);
2091         if (node) {
2092                 _queue_node_push_back(&data->dequeue_list, node);
2093                 node->priv_flags = 1;
2094         }
2095
2096         return node;
2097 }
2098
2099 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2100         __tbm_queue_sequence_init,
2101         __tbm_queue_sequence_reset,
2102         __tbm_queue_sequence_destroy,
2103         __tbm_queue_sequence_need_attach,
2104         __tbm_queue_sequence_enqueue,
2105         __tbm_queue_sequence_release,
2106         __tbm_queue_sequence_dequeue,
2107         NULL,                                   /*__tbm_queue_sequence_acquire*/
2108         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2109 };
2110
2111 tbm_surface_queue_h
2112 tbm_surface_queue_sequence_create(int queue_size, int width,
2113                                   int height, int format, int flags)
2114 {
2115         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2116         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2117         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2118         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2119
2120         _tbm_surf_queue_mutex_lock();
2121
2122         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2123                                             sizeof(struct _tbm_surface_queue));
2124         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2125
2126         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2127
2128         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2129                                    sizeof(tbm_queue_sequence));
2130         if (data == NULL) {
2131                 TBM_LOG_E("cannot allocate the tbm_queue_sequence.\n");
2132                 free(surface_queue);
2133                 _tbm_surf_queue_mutex_unlock();
2134                 return NULL;
2135         }
2136
2137         data->flags = flags;
2138         _tbm_surface_queue_init(surface_queue,
2139                                 queue_size,
2140                                 width, height, format,
2141                                 &tbm_queue_sequence_impl, data);
2142
2143         _tbm_surf_queue_mutex_unlock();
2144
2145         return surface_queue;
2146 }
2147
2148 tbm_surface_queue_error_e
2149 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2150                                   int modes)
2151 {
2152         _tbm_surf_queue_mutex_lock();
2153
2154         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2155                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2156
2157         pthread_mutex_lock(&surface_queue->lock);
2158
2159         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2160                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2161         else
2162                 surface_queue->modes |= modes;
2163
2164         pthread_mutex_unlock(&surface_queue->lock);
2165
2166         _tbm_surf_queue_mutex_unlock();
2167
2168         return TBM_SURFACE_QUEUE_ERROR_NONE;
2169 }