tbm_surface_queue: pending delete queue_node until released queue
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 #define TBM_QUEUE_DEBUG 0
42
43 #ifdef TRACE
44 #define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
45 #else
46 #define TBM_QUEUE_TRACE(fmt, ...)
47 #endif /* TRACE */
48
49 #if TBM_QUEUE_DEBUG
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
52 #else
53 #define TBM_LOCK()
54 #define TBM_UNLOCK()
55 #endif
56
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
60
61 /* check condition */
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
63         if (!(cond)) {\
64                 TBM_LOG_E("'%s' failed.\n", #cond);\
65                 _tbm_surf_queue_mutex_unlock();\
66                 return;\
67         } \
68 }
69
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
71         if (!(cond)) {\
72                 TBM_LOG_E("'%s' failed.\n", #cond);\
73                 _tbm_surf_queue_mutex_unlock();\
74                 return val;\
75         } \
76 }
77
78 typedef enum _queue_node_type {
79         QUEUE_NODE_TYPE_NONE,
80         QUEUE_NODE_TYPE_DEQUEUE,
81         QUEUE_NODE_TYPE_ENQUEUE,
82         QUEUE_NODE_TYPE_ACQUIRE,
83         QUEUE_NODE_TYPE_RELEASE
84 } Queue_Node_Type;
85
86 typedef struct {
87         struct list_head head;
88         int count;
89 } queue;
90
91 typedef struct {
92         tbm_surface_h surface;
93
94         struct list_head item_link;
95         struct list_head link;
96
97         Queue_Node_Type type;
98
99         unsigned int priv_flags;        /*for each queue*/
100
101         int delete_pending;
102 } queue_node;
103
104 typedef struct {
105         struct list_head link;
106
107         tbm_surface_queue_notify_cb cb;
108         void *data;
109 } queue_notify;
110
111 typedef struct {
112         struct list_head link;
113
114         tbm_surface_queue_trace_cb cb;
115         void *data;
116 } queue_trace;
117
118 typedef struct _tbm_surface_queue_interface {
119         void (*init)(tbm_surface_queue_h queue);
120         void (*reset)(tbm_surface_queue_h queue);
121         void (*destroy)(tbm_surface_queue_h queue);
122         void (*need_attach)(tbm_surface_queue_h queue);
123
124         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
125         void (*release)(tbm_surface_queue_h queue, queue_node *node);
126         queue_node *(*dequeue)(tbm_surface_queue_h queue);
127         queue_node *(*acquire)(tbm_surface_queue_h queue);
128         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
129 } tbm_surface_queue_interface;
130
131 struct _tbm_surface_queue {
132         int width;
133         int height;
134         int format;
135         int queue_size;
136         int num_attached;
137
138         queue free_queue;
139         queue dirty_queue;
140         struct list_head list;
141
142         struct list_head destory_noti;
143         struct list_head dequeuable_noti;
144         struct list_head dequeue_noti;
145         struct list_head can_dequeue_noti;
146         struct list_head acquirable_noti;
147         struct list_head reset_noti;
148         struct list_head trace_noti;
149
150         pthread_mutex_t lock;
151         pthread_cond_t free_cond;
152         pthread_cond_t dirty_cond;
153
154         const tbm_surface_queue_interface *impl;
155         void *impl_data;
156
157         //For external buffer allocation
158         tbm_surface_alloc_cb alloc_cb;
159         tbm_surface_free_cb free_cb;
160         void *alloc_cb_data;
161
162         struct list_head item_link; /* link of surface queue */
163 };
164
165 /* LCOV_EXCL_START */
166
167 static bool
168 _tbm_surf_queue_mutex_init(void)
169 {
170         static bool tbm_surf_queue_mutex_init = false;
171
172         if (tbm_surf_queue_mutex_init)
173                 return true;
174
175         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
176                 TBM_LOG_E("fail: pthread_mutex_init\n");
177                 return false;
178         }
179
180         tbm_surf_queue_mutex_init = true;
181
182         return true;
183 }
184
185 static void
186 _tbm_surf_queue_mutex_lock(void)
187 {
188         if (!_tbm_surf_queue_mutex_init()) {
189                 TBM_LOG_E("fail: _tbm_surf_queue_mutex_init\n");
190                 return;
191         }
192
193         pthread_mutex_lock(&tbm_surf_queue_lock);
194 }
195
196 static void
197 _tbm_surf_queue_mutex_unlock(void)
198 {
199         pthread_mutex_unlock(&tbm_surf_queue_lock);
200 }
201
202 static void
203 _init_tbm_surf_queue_bufmgr(void)
204 {
205         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
206 }
207
208 static void
209 _deinit_tbm_surf_queue_bufmgr(void)
210 {
211         if (!g_surf_queue_bufmgr)
212                 return;
213
214         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
215         g_surf_queue_bufmgr = NULL;
216 }
217
218 static int
219 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
220 {
221         tbm_surface_queue_h old_data = NULL;
222
223         if (surface_queue == NULL) {
224                 TBM_LOG_E("error: surface_queue is NULL.\n");
225                 return 0;
226         }
227
228         if (g_surf_queue_bufmgr == NULL) {
229                 TBM_LOG_E("error: g_surf_queue_bufmgr is NULL.\n");
230                 return 0;
231         }
232
233         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
234                 TBM_LOG_E("error: surf_queue_list is empty\n");
235                 return 0;
236         }
237
238         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
239                                 item_link) {
240                 if (old_data == surface_queue) {
241                         TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
242                         return 1;
243                 }
244         }
245
246         TBM_LOG_E("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
247
248         return 0;
249 }
250
251 static queue_node *
252 _queue_node_create(void)
253 {
254         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
255
256         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
257
258         return node;
259 }
260
261 static void
262 _queue_node_delete(queue_node *node)
263 {
264         LIST_DEL(&node->item_link);
265         LIST_DEL(&node->link);
266         free(node);
267 }
268
269 static int
270 _queue_is_empty(queue *queue)
271 {
272         if (LIST_IS_EMPTY(&queue->head))
273                 return 1;
274
275         return 0;
276 }
277
278 static void
279 _queue_node_push_back(queue *queue, queue_node *node)
280 {
281         LIST_ADDTAIL(&node->item_link, &queue->head);
282         queue->count++;
283 }
284
285 static void
286 _queue_node_push_front(queue *queue, queue_node *node)
287 {
288         LIST_ADD(&node->item_link, &queue->head);
289         queue->count++;
290 }
291
292 static queue_node *
293 _queue_node_pop_front(queue *queue)
294 {
295         queue_node *node;
296
297         if (!queue->head.next) return NULL;
298         if (!queue->count) return NULL;
299
300         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
301
302         LIST_DELINIT(&node->item_link);
303         queue->count--;
304
305         return node;
306 }
307
308 static queue_node *
309 _queue_node_pop(queue *queue, queue_node *node)
310 {
311         LIST_DELINIT(&node->item_link);
312         queue->count--;
313
314         return node;
315 }
316
317 static queue_node *
318 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
319                 tbm_surface_h surface, int *out_type)
320 {
321         queue_node *node = NULL;
322
323         if (type == 0)
324                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
325         if (out_type)
326                 *out_type = 0;
327
328         if (type & FREE_QUEUE) {
329                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
330                                          item_link) {
331                         if (node->surface == surface) {
332                                 if (out_type)
333                                         *out_type = FREE_QUEUE;
334
335                                 return node;
336                         }
337                 }
338         }
339
340         if (type & DIRTY_QUEUE) {
341                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
342                                          item_link) {
343                         if (node->surface == surface) {
344                                 if (out_type)
345                                         *out_type = DIRTY_QUEUE;
346
347                                 return node;
348                         }
349                 }
350         }
351
352         if (type & NODE_LIST) {
353                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
354                         if (node->surface == surface) {
355                                 if (out_type)
356                                         *out_type = NODE_LIST;
357
358                                 return node;
359                         }
360                 }
361         }
362
363         TBM_LOG_E("fail to get the queue_node.\n");
364
365         return NULL;
366 }
367
368 static void
369 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
370 {
371         if (node->surface) {
372                 if (surface_queue->free_cb) {
373                         surface_queue->free_cb(surface_queue,
374                                         surface_queue->alloc_cb_data,
375                                         node->surface);
376                 }
377
378                 tbm_surface_destroy(node->surface);
379         }
380
381         _queue_node_delete(node);
382 }
383
384 static void
385 _queue_init(queue *queue)
386 {
387         LIST_INITHEAD(&queue->head);
388
389         queue->count = 0;
390 }
391
392 static void
393 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
394             void *data)
395 {
396         TBM_RETURN_IF_FAIL(cb != NULL);
397
398         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
399
400         TBM_RETURN_IF_FAIL(item != NULL);
401
402         LIST_INITHEAD(&item->link);
403         item->cb = cb;
404         item->data = data;
405
406         LIST_ADDTAIL(&item->link, list);
407 }
408
409 static void
410 _notify_remove(struct list_head *list,
411                tbm_surface_queue_notify_cb cb, void *data)
412 {
413         queue_notify *item = NULL, *tmp;
414
415         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
416                 if (item->cb == cb && item->data == data) {
417                         LIST_DEL(&item->link);
418                         free(item);
419                         return;
420                 }
421         }
422
423         TBM_LOG_E("Cannot find notifiy\n");
424 }
425
426 static void
427 _notify_remove_all(struct list_head *list)
428 {
429         queue_notify *item = NULL, *tmp;
430
431         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
432                 LIST_DEL(&item->link);
433                 free(item);
434         }
435 }
436
437 static void
438 _notify_emit(tbm_surface_queue_h surface_queue,
439              struct list_head *list)
440 {
441         queue_notify *item = NULL, *tmp;;
442
443         /*
444                 The item->cb is the outside function of the libtbm.
445                 The tbm user may/can remove the item of the list,
446                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
447         */
448         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
449                 item->cb(surface_queue, item->data);
450 }
451
452 static void
453 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
454             void *data)
455 {
456         TBM_RETURN_IF_FAIL(cb != NULL);
457
458         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
459
460         TBM_RETURN_IF_FAIL(item != NULL);
461
462         LIST_INITHEAD(&item->link);
463         item->cb = cb;
464         item->data = data;
465
466         LIST_ADDTAIL(&item->link, list);
467 }
468
469 static void
470 _trace_remove(struct list_head *list,
471                tbm_surface_queue_trace_cb cb, void *data)
472 {
473         queue_trace *item = NULL, *tmp;
474
475         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
476                 if (item->cb == cb && item->data == data) {
477                         LIST_DEL(&item->link);
478                         free(item);
479                         return;
480                 }
481         }
482
483         TBM_LOG_E("Cannot find notifiy\n");
484 }
485
486 static void
487 _trace_remove_all(struct list_head *list)
488 {
489         queue_trace *item = NULL, *tmp;
490
491         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
492                 LIST_DEL(&item->link);
493                 free(item);
494         }
495 }
496
497 static void
498 _trace_emit(tbm_surface_queue_h surface_queue,
499              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
500 {
501         queue_trace *item = NULL, *tmp;;
502
503         /*
504                 The item->cb is the outside function of the libtbm.
505                 The tbm user may/can remove the item of the list,
506                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
507         */
508         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
509                 item->cb(surface_queue, surface, trace, item->data);
510 }
511
512 static int
513 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
514 {
515         queue_node *node = NULL;
516         int count = 0;
517
518         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
519                 if (node->type == type)
520                         count++;
521         }
522
523         return count;
524 }
525
526 static void
527 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
528                           tbm_surface_h surface)
529 {
530         queue_node *node;
531
532         node = _queue_node_create();
533         TBM_RETURN_IF_FAIL(node != NULL);
534
535         tbm_surface_internal_ref(surface);
536         node->surface = surface;
537
538         LIST_ADDTAIL(&node->link, &surface_queue->list);
539         surface_queue->num_attached++;
540         _queue_node_push_back(&surface_queue->free_queue, node);
541 }
542
543 static void
544 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
545                           tbm_surface_h surface)
546 {
547         queue_node *node;
548         int queue_type;
549
550         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
551         if (node) {
552                 _queue_delete_node(surface_queue, node);
553                 surface_queue->num_attached--;
554         }
555 }
556
557 static void
558 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
559                            queue_node *node, int push_back)
560 {
561         if (push_back)
562                 _queue_node_push_back(&surface_queue->dirty_queue, node);
563         else
564                 _queue_node_push_front(&surface_queue->dirty_queue, node);
565 }
566
567 static queue_node *
568 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
569 {
570         queue_node *node;
571
572         node = _queue_node_pop_front(&surface_queue->free_queue);
573
574         return node;
575 }
576
577 static queue_node *
578 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
579 {
580         queue_node *node;
581
582         if (_queue_is_empty(&surface_queue->dirty_queue))
583                 return NULL;
584
585         node = _queue_node_pop_front(&surface_queue->dirty_queue);
586
587         return node;
588 }
589
590 static void
591 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
592                            queue_node *node, int push_back)
593 {
594         if (push_back)
595                 _queue_node_push_back(&surface_queue->free_queue, node);
596         else
597                 _queue_node_push_front(&surface_queue->free_queue, node);
598 }
599
600 static void
601 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
602                         int queue_size,
603                         int width, int height, int format,
604                         const tbm_surface_queue_interface *impl, void *data)
605 {
606         TBM_RETURN_IF_FAIL(surface_queue != NULL);
607         TBM_RETURN_IF_FAIL(impl != NULL);
608
609         if (!g_surf_queue_bufmgr)
610                 _init_tbm_surf_queue_bufmgr();
611
612         pthread_mutex_init(&surface_queue->lock, NULL);
613         pthread_cond_init(&surface_queue->free_cond, NULL);
614         pthread_cond_init(&surface_queue->dirty_cond, NULL);
615
616         surface_queue->queue_size = queue_size;
617         surface_queue->width = width;
618         surface_queue->height = height;
619         surface_queue->format = format;
620         surface_queue->impl = impl;
621         surface_queue->impl_data = data;
622
623         _queue_init(&surface_queue->free_queue);
624         _queue_init(&surface_queue->dirty_queue);
625         LIST_INITHEAD(&surface_queue->list);
626
627         LIST_INITHEAD(&surface_queue->destory_noti);
628         LIST_INITHEAD(&surface_queue->dequeuable_noti);
629         LIST_INITHEAD(&surface_queue->dequeue_noti);
630         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
631         LIST_INITHEAD(&surface_queue->acquirable_noti);
632         LIST_INITHEAD(&surface_queue->reset_noti);
633         LIST_INITHEAD(&surface_queue->trace_noti);
634
635         if (surface_queue->impl && surface_queue->impl->init)
636                 surface_queue->impl->init(surface_queue);
637
638         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
639 }
640
641 tbm_surface_queue_error_e
642 tbm_surface_queue_add_destroy_cb(
643         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
644         void *data)
645 {
646         _tbm_surf_queue_mutex_lock();
647
648         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
649                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
650
651         pthread_mutex_lock(&surface_queue->lock);
652
653         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
654
655         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
656
657         pthread_mutex_unlock(&surface_queue->lock);
658
659         _tbm_surf_queue_mutex_unlock();
660
661         return TBM_SURFACE_QUEUE_ERROR_NONE;
662 }
663
664 tbm_surface_queue_error_e
665 tbm_surface_queue_remove_destroy_cb(
666         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
667         void *data)
668 {
669         _tbm_surf_queue_mutex_lock();
670
671         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
672                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
673
674         pthread_mutex_lock(&surface_queue->lock);
675
676         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
677
678         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
679
680         pthread_mutex_unlock(&surface_queue->lock);
681
682         _tbm_surf_queue_mutex_unlock();
683
684         return TBM_SURFACE_QUEUE_ERROR_NONE;
685 }
686
687 tbm_surface_queue_error_e
688 tbm_surface_queue_add_dequeuable_cb(
689         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
690         void *data)
691 {
692         _tbm_surf_queue_mutex_lock();
693
694         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
695                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
696
697         pthread_mutex_lock(&surface_queue->lock);
698
699         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
700
701         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
702
703         pthread_mutex_unlock(&surface_queue->lock);
704
705         _tbm_surf_queue_mutex_unlock();
706
707         return TBM_SURFACE_QUEUE_ERROR_NONE;
708 }
709
710 tbm_surface_queue_error_e
711 tbm_surface_queue_remove_dequeuable_cb(
712         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
713         void *data)
714 {
715         _tbm_surf_queue_mutex_lock();
716
717         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
718                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
719
720         pthread_mutex_lock(&surface_queue->lock);
721
722         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
723
724         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
725
726         pthread_mutex_unlock(&surface_queue->lock);
727
728         _tbm_surf_queue_mutex_unlock();
729
730         return TBM_SURFACE_QUEUE_ERROR_NONE;
731 }
732
733 tbm_surface_queue_error_e
734 tbm_surface_queue_add_dequeue_cb(
735         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
736         void *data)
737 {
738         _tbm_surf_queue_mutex_lock();
739
740         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
741                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
742
743         pthread_mutex_lock(&surface_queue->lock);
744
745         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
746
747         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
748
749         pthread_mutex_unlock(&surface_queue->lock);
750
751         _tbm_surf_queue_mutex_unlock();
752
753         return TBM_SURFACE_QUEUE_ERROR_NONE;
754 }
755
756 tbm_surface_queue_error_e
757 tbm_surface_queue_remove_dequeue_cb(
758         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
759         void *data)
760 {
761         _tbm_surf_queue_mutex_lock();
762
763         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
764                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
765
766         pthread_mutex_lock(&surface_queue->lock);
767
768         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
769
770         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
771
772         pthread_mutex_unlock(&surface_queue->lock);
773
774         _tbm_surf_queue_mutex_unlock();
775
776         return TBM_SURFACE_QUEUE_ERROR_NONE;
777 }
778
779 tbm_surface_queue_error_e
780 tbm_surface_queue_add_can_dequeue_cb(
781         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
782         void *data)
783 {
784         _tbm_surf_queue_mutex_lock();
785
786         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
787                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
788
789         pthread_mutex_lock(&surface_queue->lock);
790
791         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
792
793         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
794
795         pthread_mutex_unlock(&surface_queue->lock);
796
797         _tbm_surf_queue_mutex_unlock();
798
799         return TBM_SURFACE_QUEUE_ERROR_NONE;
800 }
801
802 tbm_surface_queue_error_e
803 tbm_surface_queue_remove_can_dequeue_cb(
804         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
805         void *data)
806 {
807         _tbm_surf_queue_mutex_lock();
808
809         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
810                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
811
812         pthread_mutex_lock(&surface_queue->lock);
813
814         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
815
816         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
817
818         pthread_mutex_unlock(&surface_queue->lock);
819
820         _tbm_surf_queue_mutex_unlock();
821
822         return TBM_SURFACE_QUEUE_ERROR_NONE;
823 }
824
825 tbm_surface_queue_error_e
826 tbm_surface_queue_add_acquirable_cb(
827         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
828         void *data)
829 {
830         _tbm_surf_queue_mutex_lock();
831
832         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
833                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
834
835         pthread_mutex_lock(&surface_queue->lock);
836
837         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
838
839         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
840
841         pthread_mutex_unlock(&surface_queue->lock);
842
843         _tbm_surf_queue_mutex_unlock();
844
845         return TBM_SURFACE_QUEUE_ERROR_NONE;
846 }
847
848 tbm_surface_queue_error_e
849 tbm_surface_queue_remove_acquirable_cb(
850         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
851         void *data)
852 {
853         _tbm_surf_queue_mutex_lock();
854
855         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
856                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
857
858         pthread_mutex_lock(&surface_queue->lock);
859
860         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
861
862         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
863
864         pthread_mutex_unlock(&surface_queue->lock);
865
866         _tbm_surf_queue_mutex_unlock();
867
868         return TBM_SURFACE_QUEUE_ERROR_NONE;
869 }
870
871 tbm_surface_queue_error_e
872 tbm_surface_queue_add_trace_cb(
873         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
874         void *data)
875 {
876         _tbm_surf_queue_mutex_lock();
877
878         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
879                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
880
881         pthread_mutex_lock(&surface_queue->lock);
882
883         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
884
885         _trace_add(&surface_queue->trace_noti, trace_cb, data);
886
887         pthread_mutex_unlock(&surface_queue->lock);
888
889         _tbm_surf_queue_mutex_unlock();
890
891         return TBM_SURFACE_QUEUE_ERROR_NONE;
892 }
893
894 tbm_surface_queue_error_e
895 tbm_surface_queue_remove_trace_cb(
896         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
897         void *data)
898 {
899         _tbm_surf_queue_mutex_lock();
900
901         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
902                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
903
904         pthread_mutex_lock(&surface_queue->lock);
905
906         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
907
908         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
909
910         pthread_mutex_unlock(&surface_queue->lock);
911
912         _tbm_surf_queue_mutex_unlock();
913
914         return TBM_SURFACE_QUEUE_ERROR_NONE;
915 }
916
917 tbm_surface_queue_error_e
918 tbm_surface_queue_set_alloc_cb(
919         tbm_surface_queue_h surface_queue,
920         tbm_surface_alloc_cb alloc_cb,
921         tbm_surface_free_cb free_cb,
922         void *data)
923 {
924         _tbm_surf_queue_mutex_lock();
925
926         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
927                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
928
929         pthread_mutex_lock(&surface_queue->lock);
930
931         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
932
933         surface_queue->alloc_cb = alloc_cb;
934         surface_queue->free_cb = free_cb;
935         surface_queue->alloc_cb_data = data;
936
937         pthread_mutex_unlock(&surface_queue->lock);
938
939         _tbm_surf_queue_mutex_unlock();
940
941         return TBM_SURFACE_QUEUE_ERROR_NONE;
942 }
943
944 int
945 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
946 {
947         int width;
948
949         _tbm_surf_queue_mutex_lock();
950
951         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
952
953         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
954
955         width = surface_queue->width;
956
957         _tbm_surf_queue_mutex_unlock();
958
959         return width;
960 }
961
962 int
963 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
964 {
965         int height;
966
967         _tbm_surf_queue_mutex_lock();
968
969         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
970
971         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
972
973         height = surface_queue->height;
974
975         _tbm_surf_queue_mutex_unlock();
976
977         return height;
978 }
979
980 int
981 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
982 {
983         int format;
984
985         _tbm_surf_queue_mutex_lock();
986
987         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
988
989         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
990
991         format = surface_queue->format;
992
993         _tbm_surf_queue_mutex_unlock();
994
995         return format;
996 }
997
998 int
999 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1000 {
1001         int queue_size;
1002
1003         _tbm_surf_queue_mutex_lock();
1004
1005         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1006
1007         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1008
1009         queue_size = surface_queue->queue_size;
1010
1011         _tbm_surf_queue_mutex_unlock();
1012
1013         return queue_size;
1014 }
1015
1016 tbm_surface_queue_error_e
1017 tbm_surface_queue_add_reset_cb(
1018         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1019         void *data)
1020 {
1021         _tbm_surf_queue_mutex_lock();
1022
1023         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1024                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1025
1026         pthread_mutex_lock(&surface_queue->lock);
1027
1028         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1029
1030         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1031
1032         pthread_mutex_unlock(&surface_queue->lock);
1033
1034         _tbm_surf_queue_mutex_unlock();
1035
1036         return TBM_SURFACE_QUEUE_ERROR_NONE;
1037 }
1038
1039 tbm_surface_queue_error_e
1040 tbm_surface_queue_remove_reset_cb(
1041         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1042         void *data)
1043 {
1044         _tbm_surf_queue_mutex_lock();
1045
1046         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1047                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1048
1049         pthread_mutex_lock(&surface_queue->lock);
1050
1051         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1052
1053         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1054
1055         pthread_mutex_unlock(&surface_queue->lock);
1056
1057         _tbm_surf_queue_mutex_unlock();
1058
1059         return TBM_SURFACE_QUEUE_ERROR_NONE;
1060 }
1061
1062 tbm_surface_queue_error_e
1063 tbm_surface_queue_enqueue(tbm_surface_queue_h
1064                           surface_queue, tbm_surface_h surface)
1065 {
1066         queue_node *node;
1067         int queue_type;
1068
1069         _tbm_surf_queue_mutex_lock();
1070
1071         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1072                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1073         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1074                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1075
1076         if (b_dump_queue)
1077                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1078
1079         pthread_mutex_lock(&surface_queue->lock);
1080
1081         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1082
1083         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1084         if (node == NULL || queue_type != NODE_LIST) {
1085                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1086                         node, queue_type);
1087                 pthread_mutex_unlock(&surface_queue->lock);
1088
1089                 _tbm_surf_queue_mutex_unlock();
1090
1091                 if (!node)
1092                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1093                 else
1094                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1095         }
1096
1097         if (surface_queue->impl && surface_queue->impl->enqueue)
1098                 surface_queue->impl->enqueue(surface_queue, node);
1099         else
1100                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1101
1102         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1103                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1104                 pthread_mutex_unlock(&surface_queue->lock);
1105
1106                 _tbm_surf_queue_mutex_unlock();
1107                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1108         }
1109
1110         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1111
1112         pthread_mutex_unlock(&surface_queue->lock);
1113         pthread_cond_signal(&surface_queue->dirty_cond);
1114
1115         _tbm_surf_queue_mutex_unlock();
1116
1117         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1118
1119         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1120
1121         return TBM_SURFACE_QUEUE_ERROR_NONE;
1122 }
1123
1124 tbm_surface_queue_error_e
1125 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1126                           surface_queue, tbm_surface_h surface)
1127 {
1128         queue_node *node;
1129         int queue_type;
1130
1131         _tbm_surf_queue_mutex_lock();
1132
1133         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1134                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1135         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1136                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1137
1138         pthread_mutex_lock(&surface_queue->lock);
1139
1140         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1141
1142         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1143         if (node == NULL || queue_type != NODE_LIST) {
1144                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1145                         node, queue_type);
1146                 pthread_mutex_unlock(&surface_queue->lock);
1147
1148                 _tbm_surf_queue_mutex_unlock();
1149                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1150         }
1151
1152         if (node->delete_pending) {
1153                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1154
1155                 _queue_delete_node(surface_queue, node);
1156
1157                 pthread_mutex_unlock(&surface_queue->lock);
1158
1159                 _tbm_surf_queue_mutex_unlock();
1160
1161                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1162
1163                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1164         }
1165
1166         if (surface_queue->queue_size < surface_queue->num_attached) {
1167                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1168
1169                 if (surface_queue->impl && surface_queue->impl->need_detach)
1170                         surface_queue->impl->need_detach(surface_queue, node);
1171                 else
1172                         _tbm_surface_queue_detach(surface_queue, surface);
1173
1174                 pthread_mutex_unlock(&surface_queue->lock);
1175
1176                 _tbm_surf_queue_mutex_unlock();
1177
1178                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1179
1180                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1181         }
1182
1183         if (surface_queue->impl && surface_queue->impl->release)
1184                 surface_queue->impl->release(surface_queue, node);
1185         else
1186                 _tbm_surface_queue_release(surface_queue, node, 1);
1187
1188         if (_queue_is_empty(&surface_queue->free_queue)) {
1189                 pthread_mutex_unlock(&surface_queue->lock);
1190
1191                 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1192                 _tbm_surf_queue_mutex_unlock();
1193                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1194         }
1195
1196         node->type = QUEUE_NODE_TYPE_RELEASE;
1197
1198         pthread_mutex_unlock(&surface_queue->lock);
1199         pthread_cond_signal(&surface_queue->free_cond);
1200
1201         _tbm_surf_queue_mutex_unlock();
1202
1203         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1204
1205         return TBM_SURFACE_QUEUE_ERROR_NONE;
1206 }
1207
1208 tbm_surface_queue_error_e
1209 tbm_surface_queue_dequeue(tbm_surface_queue_h
1210                           surface_queue, tbm_surface_h *surface)
1211 {
1212         queue_node *node;
1213
1214         _tbm_surf_queue_mutex_lock();
1215
1216         *surface = NULL;
1217
1218         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1219                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1220         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1221                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1222
1223         pthread_mutex_lock(&surface_queue->lock);
1224
1225         if (_queue_is_empty(&surface_queue->free_queue)) {
1226                 if (surface_queue->impl && surface_queue->impl->need_attach)
1227                         surface_queue->impl->need_attach(surface_queue);
1228
1229                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1230                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1231                         pthread_mutex_unlock(&surface_queue->lock);
1232                         _tbm_surf_queue_mutex_unlock();
1233                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1234                 }
1235         }
1236
1237         if (surface_queue->impl && surface_queue->impl->dequeue)
1238                 node = surface_queue->impl->dequeue(surface_queue);
1239         else
1240                 node = _tbm_surface_queue_dequeue(surface_queue);
1241
1242         if (node == NULL || node->surface == NULL) {
1243                 TBM_LOG_E("_queue_node_pop_front failed\n");
1244                 pthread_mutex_unlock(&surface_queue->lock);
1245
1246                 _tbm_surf_queue_mutex_unlock();
1247                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1248         }
1249
1250         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1251         *surface = node->surface;
1252
1253         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1254
1255         pthread_mutex_unlock(&surface_queue->lock);
1256
1257         _tbm_surf_queue_mutex_unlock();
1258
1259         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1260
1261         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1262
1263         return TBM_SURFACE_QUEUE_ERROR_NONE;
1264 }
1265
1266 int
1267 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1268 {
1269         _tbm_surf_queue_mutex_lock();
1270
1271         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1272
1273         _tbm_surf_queue_mutex_unlock();
1274
1275         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1276
1277         _tbm_surf_queue_mutex_lock();
1278
1279         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1280
1281         pthread_mutex_lock(&surface_queue->lock);
1282
1283         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1284
1285         if (_queue_is_empty(&surface_queue->free_queue)) {
1286                 if (surface_queue->impl && surface_queue->impl->need_attach)
1287                         surface_queue->impl->need_attach(surface_queue);
1288
1289                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1290                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1291                         pthread_mutex_unlock(&surface_queue->lock);
1292                         _tbm_surf_queue_mutex_unlock();
1293                         return 0;
1294                 }
1295         }
1296
1297         if (!_queue_is_empty(&surface_queue->free_queue)) {
1298                 pthread_mutex_unlock(&surface_queue->lock);
1299                 _tbm_surf_queue_mutex_unlock();
1300                 return 1;
1301         }
1302
1303         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1304                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1305                 _tbm_surf_queue_mutex_unlock();
1306                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1307                 _tbm_surf_queue_mutex_lock();
1308
1309                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1310                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1311                         pthread_mutex_unlock(&surface_queue->lock);
1312                         _tbm_surf_queue_mutex_unlock();
1313                         return 0;
1314                 }
1315
1316                 pthread_mutex_unlock(&surface_queue->lock);
1317                 _tbm_surf_queue_mutex_unlock();
1318                 return 1;
1319         }
1320
1321         pthread_mutex_unlock(&surface_queue->lock);
1322         _tbm_surf_queue_mutex_unlock();
1323         return 0;
1324 }
1325
1326 tbm_surface_queue_error_e
1327 tbm_surface_queue_release(tbm_surface_queue_h
1328                           surface_queue, tbm_surface_h surface)
1329 {
1330         queue_node *node;
1331         int queue_type;
1332
1333         _tbm_surf_queue_mutex_lock();
1334
1335         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1336                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1337         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1338                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1339
1340         pthread_mutex_lock(&surface_queue->lock);
1341
1342         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1343
1344         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1345         if (node == NULL || queue_type != NODE_LIST) {
1346                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1347                         node, queue_type);
1348                 pthread_mutex_unlock(&surface_queue->lock);
1349
1350                 _tbm_surf_queue_mutex_unlock();
1351
1352                 if (!node)
1353                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1354                 else
1355                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1356         }
1357
1358         if (node->delete_pending) {
1359                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1360
1361                 _queue_delete_node(surface_queue, node);
1362
1363                 pthread_mutex_unlock(&surface_queue->lock);
1364
1365                 _tbm_surf_queue_mutex_unlock();
1366
1367                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1368
1369                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1370         }
1371
1372         if (surface_queue->queue_size < surface_queue->num_attached) {
1373                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1374
1375                 if (surface_queue->impl && surface_queue->impl->need_detach)
1376                         surface_queue->impl->need_detach(surface_queue, node);
1377                 else
1378                         _tbm_surface_queue_detach(surface_queue, surface);
1379
1380                 pthread_mutex_unlock(&surface_queue->lock);
1381
1382                 _tbm_surf_queue_mutex_unlock();
1383
1384                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1385
1386                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1387         }
1388
1389         if (surface_queue->impl && surface_queue->impl->release)
1390                 surface_queue->impl->release(surface_queue, node);
1391         else
1392                 _tbm_surface_queue_release(surface_queue, node, 1);
1393
1394         if (_queue_is_empty(&surface_queue->free_queue)) {
1395                 pthread_mutex_unlock(&surface_queue->lock);
1396
1397                 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1398                 _tbm_surf_queue_mutex_unlock();
1399                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1400         }
1401
1402         node->type = QUEUE_NODE_TYPE_RELEASE;
1403
1404         pthread_mutex_unlock(&surface_queue->lock);
1405         pthread_cond_signal(&surface_queue->free_cond);
1406
1407         _tbm_surf_queue_mutex_unlock();
1408
1409         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1410
1411         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1412
1413         return TBM_SURFACE_QUEUE_ERROR_NONE;
1414 }
1415
1416 tbm_surface_queue_error_e
1417 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1418                         surface_queue, tbm_surface_h surface)
1419 {
1420         queue_node *node;
1421         int queue_type;
1422
1423         _tbm_surf_queue_mutex_lock();
1424
1425         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1426                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1427         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1428                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1429
1430         pthread_mutex_lock(&surface_queue->lock);
1431
1432         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1433
1434         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1435         if (node == NULL || queue_type != NODE_LIST) {
1436                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1437                         node, queue_type);
1438                 pthread_mutex_unlock(&surface_queue->lock);
1439
1440                 _tbm_surf_queue_mutex_unlock();
1441                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1442         }
1443
1444         if (surface_queue->impl && surface_queue->impl->enqueue)
1445                 surface_queue->impl->enqueue(surface_queue, node);
1446         else
1447                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1448
1449         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1450                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1451                 pthread_mutex_unlock(&surface_queue->lock);
1452
1453                 _tbm_surf_queue_mutex_unlock();
1454                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1455         }
1456
1457         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1458
1459         pthread_mutex_unlock(&surface_queue->lock);
1460         pthread_cond_signal(&surface_queue->dirty_cond);
1461
1462         _tbm_surf_queue_mutex_unlock();
1463
1464         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1465
1466         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1467
1468         return TBM_SURFACE_QUEUE_ERROR_NONE;
1469 }
1470
1471 tbm_surface_queue_error_e
1472 tbm_surface_queue_acquire(tbm_surface_queue_h
1473                           surface_queue, tbm_surface_h *surface)
1474 {
1475         queue_node *node;
1476
1477         _tbm_surf_queue_mutex_lock();
1478
1479         *surface = NULL;
1480
1481         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1482                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1483         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1484                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1485
1486         pthread_mutex_lock(&surface_queue->lock);
1487
1488         if (surface_queue->impl && surface_queue->impl->acquire)
1489                 node = surface_queue->impl->acquire(surface_queue);
1490         else
1491                 node = _tbm_surface_queue_acquire(surface_queue);
1492
1493         if (node == NULL || node->surface == NULL) {
1494                 TBM_LOG_E("_queue_node_pop_front failed\n");
1495                 pthread_mutex_unlock(&surface_queue->lock);
1496
1497                 _tbm_surf_queue_mutex_unlock();
1498                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1499         }
1500
1501         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1502
1503         *surface = node->surface;
1504
1505         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1506
1507         pthread_mutex_unlock(&surface_queue->lock);
1508
1509         _tbm_surf_queue_mutex_unlock();
1510
1511         if (b_dump_queue)
1512                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1513
1514         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1515
1516         return TBM_SURFACE_QUEUE_ERROR_NONE;
1517 }
1518
1519 int
1520 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1521 {
1522         _tbm_surf_queue_mutex_lock();
1523
1524         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1525
1526         pthread_mutex_lock(&surface_queue->lock);
1527
1528         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1529
1530         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1531                 pthread_mutex_unlock(&surface_queue->lock);
1532                 _tbm_surf_queue_mutex_unlock();
1533                 return 1;
1534         }
1535
1536         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1537                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1538                 _tbm_surf_queue_mutex_unlock();
1539                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1540                 _tbm_surf_queue_mutex_lock();
1541
1542                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1543                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1544                         pthread_mutex_unlock(&surface_queue->lock);
1545                         _tbm_surf_queue_mutex_unlock();
1546                         return 0;
1547                 }
1548
1549                 pthread_mutex_unlock(&surface_queue->lock);
1550                 _tbm_surf_queue_mutex_unlock();
1551                 return 1;
1552         }
1553
1554         pthread_mutex_unlock(&surface_queue->lock);
1555         _tbm_surf_queue_mutex_unlock();
1556         return 0;
1557 }
1558
1559 void
1560 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1561 {
1562         queue_node *node = NULL, *tmp;
1563
1564         _tbm_surf_queue_mutex_lock();
1565
1566         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1567
1568         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1569
1570         LIST_DEL(&surface_queue->item_link);
1571
1572         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1573                 _queue_delete_node(surface_queue, node);
1574
1575         if (surface_queue->impl && surface_queue->impl->destroy)
1576                 surface_queue->impl->destroy(surface_queue);
1577
1578         _notify_emit(surface_queue, &surface_queue->destory_noti);
1579
1580         _notify_remove_all(&surface_queue->destory_noti);
1581         _notify_remove_all(&surface_queue->dequeuable_noti);
1582         _notify_remove_all(&surface_queue->dequeue_noti);
1583         _notify_remove_all(&surface_queue->can_dequeue_noti);
1584         _notify_remove_all(&surface_queue->acquirable_noti);
1585         _notify_remove_all(&surface_queue->reset_noti);
1586         _trace_remove_all(&surface_queue->trace_noti);
1587
1588         pthread_mutex_destroy(&surface_queue->lock);
1589
1590         free(surface_queue);
1591
1592         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1593                 _deinit_tbm_surf_queue_bufmgr();
1594
1595         _tbm_surf_queue_mutex_unlock();
1596 }
1597
1598 tbm_surface_queue_error_e
1599 tbm_surface_queue_reset(tbm_surface_queue_h
1600                         surface_queue, int width, int height, int format)
1601 {
1602         queue_node *node = NULL, *tmp;
1603
1604         _tbm_surf_queue_mutex_lock();
1605
1606         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1607                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1608
1609         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1610
1611         if (width == surface_queue->width && height == surface_queue->height &&
1612                 format == surface_queue->format) {
1613                 _tbm_surf_queue_mutex_unlock();
1614                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1615         }
1616
1617         pthread_mutex_lock(&surface_queue->lock);
1618
1619         surface_queue->width = width;
1620         surface_queue->height = height;
1621         surface_queue->format = format;
1622
1623         /* Destory surface and Push to free_queue */
1624         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, link)
1625                 _queue_delete_node(surface_queue, node);
1626
1627         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1628                 node->delete_pending = 1;
1629
1630         /* Reset queue */
1631         _queue_init(&surface_queue->free_queue);
1632
1633         surface_queue->num_attached = 0;
1634
1635         if (surface_queue->impl && surface_queue->impl->reset)
1636                 surface_queue->impl->reset(surface_queue);
1637
1638         pthread_mutex_unlock(&surface_queue->lock);
1639         pthread_cond_signal(&surface_queue->free_cond);
1640
1641         _tbm_surf_queue_mutex_unlock();
1642
1643         _notify_emit(surface_queue, &surface_queue->reset_noti);
1644
1645         return TBM_SURFACE_QUEUE_ERROR_NONE;
1646 }
1647
1648 tbm_surface_queue_error_e
1649 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1650 {
1651         _tbm_surf_queue_mutex_lock();
1652
1653         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1654                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1655
1656         _tbm_surf_queue_mutex_unlock();
1657
1658         _notify_emit(surface_queue, &surface_queue->reset_noti);
1659
1660         return TBM_SURFACE_QUEUE_ERROR_NONE;
1661 }
1662
1663 tbm_surface_queue_error_e
1664 tbm_surface_queue_set_size(tbm_surface_queue_h
1665                         surface_queue, int queue_size, int flush)
1666 {
1667         queue_node *node = NULL, *tmp;
1668
1669         _tbm_surf_queue_mutex_lock();
1670
1671         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1672                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1673         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1674                                         TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1675
1676         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1677
1678         if ((surface_queue->queue_size == queue_size) && !flush) {
1679                 _tbm_surf_queue_mutex_unlock();
1680                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1681         }
1682
1683         pthread_mutex_lock(&surface_queue->lock);
1684
1685         if (flush) {
1686                 /* Destory surface and Push to free_queue */
1687                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1688                         _queue_delete_node(surface_queue, node);
1689
1690                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1691                         node->delete_pending = 1;
1692
1693                 /* Reset queue */
1694                 _queue_init(&surface_queue->free_queue);
1695
1696                 surface_queue->num_attached = 0;
1697                 surface_queue->queue_size = queue_size;
1698
1699                 if (surface_queue->impl && surface_queue->impl->reset)
1700                         surface_queue->impl->reset(surface_queue);
1701
1702                 pthread_mutex_unlock(&surface_queue->lock);
1703                 pthread_cond_signal(&surface_queue->free_cond);
1704
1705                 _tbm_surf_queue_mutex_unlock();
1706
1707                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1708
1709                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1710         } else {
1711                 if (surface_queue->queue_size > queue_size) {
1712                         int need_del = surface_queue->queue_size - queue_size;
1713
1714                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1715                                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1716
1717                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1718                                         surface_queue->impl->need_detach(surface_queue, node);
1719                                 else
1720                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1721
1722                                 need_del--;
1723                                 if (need_del == 0)
1724                                         break;
1725                         }
1726                 }
1727
1728                 surface_queue->queue_size = queue_size;
1729
1730                 pthread_mutex_unlock(&surface_queue->lock);
1731
1732                 _tbm_surf_queue_mutex_unlock();
1733
1734                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1735         }
1736 }
1737
1738 tbm_surface_queue_error_e
1739 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1740 {
1741         queue_node *node = NULL;
1742
1743         _tbm_surf_queue_mutex_lock();
1744
1745         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1746                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1747
1748         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1749
1750         if (surface_queue->num_attached == 0) {
1751                 _tbm_surf_queue_mutex_unlock();
1752                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1753         }
1754
1755         pthread_mutex_lock(&surface_queue->lock);
1756
1757         /* Destory surface in free_queue */
1758         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1759                 if (surface_queue->impl && surface_queue->impl->need_detach)
1760                         surface_queue->impl->need_detach(surface_queue, node);
1761                 else
1762                         _tbm_surface_queue_detach(surface_queue, node->surface);
1763         }
1764
1765         /* Reset queue */
1766         _queue_init(&surface_queue->free_queue);
1767
1768         pthread_mutex_unlock(&surface_queue->lock);
1769         _tbm_surf_queue_mutex_unlock();
1770
1771         return TBM_SURFACE_QUEUE_ERROR_NONE;
1772 }
1773
1774 tbm_surface_queue_error_e
1775 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1776 {
1777         queue_node *node = NULL, *tmp;
1778
1779         _tbm_surf_queue_mutex_lock();
1780
1781         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1782                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1783
1784         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1785
1786         if (surface_queue->num_attached == 0) {
1787                 _tbm_surf_queue_mutex_unlock();
1788                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1789         }
1790
1791         pthread_mutex_lock(&surface_queue->lock);
1792
1793         /* Destory surface and Push to free_queue */
1794         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1795                 _queue_delete_node(surface_queue, node);
1796
1797         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1798                 node->delete_pending = 1;
1799
1800         /* Reset queue */
1801         _queue_init(&surface_queue->free_queue);
1802
1803         surface_queue->num_attached = 0;
1804
1805         if (surface_queue->impl && surface_queue->impl->reset)
1806                 surface_queue->impl->reset(surface_queue);
1807
1808         pthread_mutex_unlock(&surface_queue->lock);
1809         pthread_cond_signal(&surface_queue->free_cond);
1810
1811         _tbm_surf_queue_mutex_unlock();
1812
1813         _notify_emit(surface_queue, &surface_queue->reset_noti);
1814
1815         return TBM_SURFACE_QUEUE_ERROR_NONE;
1816 }
1817
1818 tbm_surface_queue_error_e
1819 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1820                         tbm_surface_h *surfaces, int *num)
1821 {
1822         queue_node *node = NULL;
1823
1824         _tbm_surf_queue_mutex_lock();
1825
1826         *num = 0;
1827
1828         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1829                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1830         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1831                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1832
1833         pthread_mutex_lock(&surface_queue->lock);
1834
1835         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1836                 if (surfaces)
1837                         surfaces[*num] = node->surface;
1838
1839                 *num = *num + 1;
1840         }
1841
1842         pthread_mutex_unlock(&surface_queue->lock);
1843
1844         _tbm_surf_queue_mutex_unlock();
1845
1846         return TBM_SURFACE_QUEUE_ERROR_NONE;
1847 }
1848
1849 tbm_surface_queue_error_e
1850 tbm_surface_queue_get_trace_surface_num(
1851                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1852 {
1853         _tbm_surf_queue_mutex_lock();
1854
1855         *num = 0;
1856
1857         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1858                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1859         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1860                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1861
1862         pthread_mutex_lock(&surface_queue->lock);
1863
1864         switch (trace) {
1865         case TBM_SURFACE_QUEUE_TRACE_NONE:
1866                 *num = 0;
1867                 break;
1868         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1869                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1870                 break;
1871         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1872                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1873                 break;
1874         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1875                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1876                 break;
1877         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1878                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1879                 break;
1880         default:
1881                 break;
1882         }
1883
1884         pthread_mutex_unlock(&surface_queue->lock);
1885
1886         _tbm_surf_queue_mutex_unlock();
1887
1888         return TBM_SURFACE_QUEUE_ERROR_NONE;
1889 }
1890
1891 typedef struct {
1892         int flags;
1893 } tbm_queue_default;
1894
1895 static void
1896 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1897 {
1898         free(surface_queue->impl_data);
1899 }
1900
1901 static void
1902 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1903 {
1904         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1905         tbm_surface_h surface;
1906
1907         if (surface_queue->queue_size == surface_queue->num_attached)
1908                 return;
1909
1910         if (surface_queue->alloc_cb) {
1911                 pthread_mutex_unlock(&surface_queue->lock);
1912                 _tbm_surf_queue_mutex_unlock();
1913                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1914                 _tbm_surf_queue_mutex_lock();
1915                 pthread_mutex_lock(&surface_queue->lock);
1916
1917                 /* silent return */
1918                 if (!surface)
1919                         return;
1920
1921                 tbm_surface_internal_ref(surface);
1922         } else {
1923                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1924                                 surface_queue->height,
1925                                 surface_queue->format,
1926                                 data->flags);
1927                 TBM_RETURN_IF_FAIL(surface != NULL);
1928         }
1929
1930         _tbm_surface_queue_attach(surface_queue, surface);
1931         tbm_surface_internal_unref(surface);
1932 }
1933
1934 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1935         NULL,                           /*__tbm_queue_default_init*/
1936         NULL,                           /*__tbm_queue_default_reset*/
1937         __tbm_queue_default_destroy,
1938         __tbm_queue_default_need_attach,
1939         NULL,                           /*__tbm_queue_default_enqueue*/
1940         NULL,                           /*__tbm_queue_default_release*/
1941         NULL,                           /*__tbm_queue_default_dequeue*/
1942         NULL,                           /*__tbm_queue_default_acquire*/
1943         NULL,                           /*__tbm_queue_default_need_detach*/
1944 };
1945
1946 tbm_surface_queue_h
1947 tbm_surface_queue_create(int queue_size, int width,
1948                          int height, int format, int flags)
1949 {
1950         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1951         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1952         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1953         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1954
1955         _tbm_surf_queue_mutex_lock();
1956
1957         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1958                                             sizeof(struct _tbm_surface_queue));
1959         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1960
1961         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1962
1963         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1964                                   sizeof(tbm_queue_default));
1965         if (data == NULL) {
1966                 TBM_LOG_E("cannot allocate the tbm_queue_default.\n");
1967                 free(surface_queue);
1968                 _tbm_surf_queue_mutex_unlock();
1969                 return NULL;
1970         }
1971
1972         data->flags = flags;
1973         _tbm_surface_queue_init(surface_queue,
1974                                 queue_size,
1975                                 width, height, format,
1976                                 &tbm_queue_default_impl, data);
1977
1978         _tbm_surf_queue_mutex_unlock();
1979
1980         return surface_queue;
1981 }
1982
1983 typedef struct {
1984         int flags;
1985         queue dequeue_list;
1986 } tbm_queue_sequence;
1987
1988 static void
1989 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
1990 {
1991         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1992
1993         _queue_init(&data->dequeue_list);
1994 }
1995
1996 static void
1997 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
1998 {
1999         free(surface_queue->impl_data);
2000 }
2001
2002 static void
2003 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2004 {
2005         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2006         tbm_surface_h surface;
2007
2008         if (surface_queue->queue_size == surface_queue->num_attached)
2009                 return;
2010
2011         if (surface_queue->alloc_cb) {
2012                 pthread_mutex_unlock(&surface_queue->lock);
2013                 _tbm_surf_queue_mutex_unlock();
2014                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2015                 _tbm_surf_queue_mutex_lock();
2016                 pthread_mutex_lock(&surface_queue->lock);
2017
2018                 /* silent return */
2019                 if (!surface)
2020                         return;
2021
2022                 tbm_surface_internal_ref(surface);
2023         } else {
2024                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2025                                 surface_queue->height,
2026                                 surface_queue->format,
2027                                 data->flags);
2028                 TBM_RETURN_IF_FAIL(surface != NULL);
2029         }
2030
2031         _tbm_surface_queue_attach(surface_queue, surface);
2032         tbm_surface_internal_unref(surface);
2033 }
2034
2035 static void
2036 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2037                              queue_node *node)
2038 {
2039         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2040         queue_node *next = NULL, *tmp;
2041
2042         node->priv_flags = 0;
2043
2044         LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) {
2045                 if (next->priv_flags)
2046                         break;
2047                 _queue_node_pop(&data->dequeue_list, next);
2048                 _tbm_surface_queue_enqueue(surface_queue, next, 1);
2049         }
2050 }
2051
2052 static void
2053 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2054                                 queue_node *node)
2055 {
2056         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2057
2058         if (node->priv_flags) {
2059                 node->priv_flags = 0;
2060                 _queue_node_pop(&data->dequeue_list, node);
2061         }
2062
2063         _tbm_surface_queue_release(surface_queue, node, 1);
2064 }
2065
2066 static queue_node *
2067 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2068                              surface_queue)
2069 {
2070         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2071         queue_node *node;
2072
2073         node = _tbm_surface_queue_dequeue(surface_queue);
2074         if (node) {
2075                 _queue_node_push_back(&data->dequeue_list, node);
2076                 node->priv_flags = 1;
2077         }
2078
2079         return node;
2080 }
2081
2082 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2083         __tbm_queue_sequence_init,
2084         NULL,
2085         __tbm_queue_sequence_destroy,
2086         __tbm_queue_sequence_need_attach,
2087         __tbm_queue_sequence_enqueue,
2088         __tbm_queue_sequence_release,
2089         __tbm_queue_sequence_dequeue,
2090         NULL,                                   /*__tbm_queue_sequence_acquire*/
2091         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2092 };
2093
2094 tbm_surface_queue_h
2095 tbm_surface_queue_sequence_create(int queue_size, int width,
2096                                   int height, int format, int flags)
2097 {
2098         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2099         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2100         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2101         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2102
2103         _tbm_surf_queue_mutex_lock();
2104
2105         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2106                                             sizeof(struct _tbm_surface_queue));
2107         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2108
2109         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2110
2111         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2112                                    sizeof(tbm_queue_sequence));
2113         if (data == NULL) {
2114                 TBM_LOG_E("cannot allocate the tbm_queue_sequence.\n");
2115                 free(surface_queue);
2116                 _tbm_surf_queue_mutex_unlock();
2117                 return NULL;
2118         }
2119
2120         data->flags = flags;
2121         _tbm_surface_queue_init(surface_queue,
2122                                 queue_size,
2123                                 width, height, format,
2124                                 &tbm_queue_sequence_impl, data);
2125
2126         _tbm_surf_queue_mutex_unlock();
2127
2128         return surface_queue;
2129 }
2130 /* LCOV_EXCL_STOP */