0896edcbe3a13dd8c25c004a1c237cab959bfa9c
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 static tbm_bufmgr g_surf_queue_bufmgr;
42 static pthread_mutex_t tbm_surf_queue_lock;
43 void _tbm_surface_queue_mutex_unlock(void);
44
45 /* check condition */
46 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
47         if (!(cond)) {\
48                 TBM_ERR("'%s' failed.\n", #cond);\
49                 _tbm_surf_queue_mutex_unlock();\
50                 return;\
51         } \
52 }
53
54 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
55         if (!(cond)) {\
56                 TBM_ERR("'%s' failed.\n", #cond);\
57                 _tbm_surf_queue_mutex_unlock();\
58                 return val;\
59         } \
60 }
61
62 typedef enum _queue_node_type {
63         QUEUE_NODE_TYPE_NONE,
64         QUEUE_NODE_TYPE_DEQUEUE,
65         QUEUE_NODE_TYPE_ENQUEUE,
66         QUEUE_NODE_TYPE_ACQUIRE,
67         QUEUE_NODE_TYPE_RELEASE
68 } Queue_Node_Type;
69
70 typedef struct {
71         struct list_head head;
72         int count;
73 } queue;
74
75 typedef struct {
76         tbm_surface_h surface;
77
78         struct list_head item_link;
79         struct list_head link;
80
81         Queue_Node_Type type;
82
83         unsigned int priv_flags;        /*for each queue*/
84
85         int delete_pending;
86 } queue_node;
87
88 typedef struct {
89         struct list_head link;
90
91         tbm_surface_queue_notify_cb cb;
92         void *data;
93 } queue_notify;
94
95 typedef struct {
96         struct list_head link;
97
98         tbm_surface_queue_trace_cb cb;
99         void *data;
100 } queue_trace;
101
102 typedef struct _tbm_surface_queue_interface {
103         void (*init)(tbm_surface_queue_h queue);
104         void (*reset)(tbm_surface_queue_h queue);
105         void (*destroy)(tbm_surface_queue_h queue);
106         void (*need_attach)(tbm_surface_queue_h queue);
107
108         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
109         void (*release)(tbm_surface_queue_h queue, queue_node *node);
110         queue_node *(*dequeue)(tbm_surface_queue_h queue);
111         queue_node *(*acquire)(tbm_surface_queue_h queue);
112         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
113 } tbm_surface_queue_interface;
114
115 struct _tbm_surface_queue {
116         int width;
117         int height;
118         int format;
119         int queue_size;
120         int num_attached;
121
122         queue free_queue;
123         queue dirty_queue;
124         struct list_head list;
125
126         struct list_head destory_noti;
127         struct list_head dequeuable_noti;
128         struct list_head dequeue_noti;
129         struct list_head can_dequeue_noti;
130         struct list_head acquirable_noti;
131         struct list_head reset_noti;
132         struct list_head trace_noti;
133
134         pthread_mutex_t lock;
135         pthread_cond_t free_cond;
136         pthread_cond_t dirty_cond;
137
138         const tbm_surface_queue_interface *impl;
139         void *impl_data;
140
141         //For external buffer allocation
142         tbm_surface_alloc_cb alloc_cb;
143         tbm_surface_free_cb free_cb;
144         void *alloc_cb_data;
145
146         struct list_head item_link; /* link of surface queue */
147
148         int modes;
149         unsigned int enqueue_sync_count;
150         unsigned int acquire_sync_count;
151 };
152
153 static bool
154 _tbm_surf_queue_mutex_init(void)
155 {
156         static bool tbm_surf_queue_mutex_init = false;
157
158         if (tbm_surf_queue_mutex_init)
159                 return true;
160
161         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
162                 TBM_ERR("fail: pthread_mutex_init\n");
163                 return false;
164         }
165
166         tbm_surf_queue_mutex_init = true;
167
168         return true;
169 }
170
171 static void
172 _tbm_surf_queue_mutex_lock(void)
173 {
174         if (!_tbm_surf_queue_mutex_init()) {
175                 TBM_ERR("fail: _tbm_surf_queue_mutex_init\n");
176                 return;
177         }
178
179         pthread_mutex_lock(&tbm_surf_queue_lock);
180 }
181
182 static void
183 _tbm_surf_queue_mutex_unlock(void)
184 {
185         pthread_mutex_unlock(&tbm_surf_queue_lock);
186 }
187
188 static void
189 _init_tbm_surf_queue_bufmgr(void)
190 {
191         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
192 }
193
194 static void
195 _deinit_tbm_surf_queue_bufmgr(void)
196 {
197         if (!g_surf_queue_bufmgr)
198                 return;
199
200         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
201         g_surf_queue_bufmgr = NULL;
202 }
203
204 static int
205 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
206 {
207         tbm_surface_queue_h old_data = NULL;
208
209         if (surface_queue == NULL) {
210                 TBM_ERR("error: surface_queue is NULL.\n");
211                 return 0;
212         }
213
214         if (g_surf_queue_bufmgr == NULL) {
215                 TBM_ERR("error: g_surf_queue_bufmgr is NULL.\n");
216                 return 0;
217         }
218
219         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
220                 TBM_ERR("error: surf_queue_list is empty\n");
221                 return 0;
222         }
223
224         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
225                                 item_link) {
226                 if (old_data == surface_queue) {
227                         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
228                         return 1;
229                 }
230         }
231
232         TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
233
234         return 0;
235 }
236
237 static queue_node *
238 _queue_node_create(void)
239 {
240         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
241
242         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
243
244         return node;
245 }
246
247 static void
248 _queue_node_delete(queue_node *node)
249 {
250         LIST_DEL(&node->item_link);
251         LIST_DEL(&node->link);
252         free(node);
253 }
254
255 static int
256 _queue_is_empty(queue *queue)
257 {
258         if (LIST_IS_EMPTY(&queue->head))
259                 return 1;
260
261         return 0;
262 }
263
264 static void
265 _queue_node_push_back(queue *queue, queue_node *node)
266 {
267         LIST_ADDTAIL(&node->item_link, &queue->head);
268         queue->count++;
269 }
270
271 static void
272 _queue_node_push_front(queue *queue, queue_node *node)
273 {
274         LIST_ADD(&node->item_link, &queue->head);
275         queue->count++;
276 }
277
278 static queue_node *
279 _queue_node_pop_front(queue *queue)
280 {
281         queue_node *node;
282
283         if (!queue->head.next) return NULL;
284         if (!queue->count) return NULL;
285
286         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
287
288         LIST_DELINIT(&node->item_link);
289         queue->count--;
290
291         return node;
292 }
293
294 static queue_node *
295 _queue_node_pop(queue *queue, queue_node *node)
296 {
297         LIST_DELINIT(&node->item_link);
298         queue->count--;
299
300         return node;
301 }
302
303 static queue_node *
304 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
305                 tbm_surface_h surface, int *out_type)
306 {
307         queue_node *node = NULL;
308
309         if (type == 0)
310                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
311         if (out_type)
312                 *out_type = 0;
313
314         if (type & FREE_QUEUE) {
315                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
316                                          item_link) {
317                         if (node->surface == surface) {
318                                 if (out_type)
319                                         *out_type = FREE_QUEUE;
320
321                                 return node;
322                         }
323                 }
324         }
325
326         if (type & DIRTY_QUEUE) {
327                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
328                                          item_link) {
329                         if (node->surface == surface) {
330                                 if (out_type)
331                                         *out_type = DIRTY_QUEUE;
332
333                                 return node;
334                         }
335                 }
336         }
337
338         if (type & NODE_LIST) {
339                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
340                         if (node->surface == surface) {
341                                 if (out_type)
342                                         *out_type = NODE_LIST;
343
344                                 return node;
345                         }
346                 }
347         }
348
349         TBM_ERR("fail to get the queue_node.\n");
350
351         return NULL;
352 }
353
354 static void
355 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
356 {
357         if (node->surface) {
358                 if (surface_queue->free_cb) {
359                         surface_queue->free_cb(surface_queue,
360                                         surface_queue->alloc_cb_data,
361                                         node->surface);
362                 }
363
364                 tbm_surface_destroy(node->surface);
365         }
366
367         _queue_node_delete(node);
368 }
369
370 static void
371 _queue_init(queue *queue)
372 {
373         LIST_INITHEAD(&queue->head);
374
375         queue->count = 0;
376 }
377
378 static void
379 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
380             void *data)
381 {
382         TBM_RETURN_IF_FAIL(cb != NULL);
383
384         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
385
386         TBM_RETURN_IF_FAIL(item != NULL);
387
388         LIST_INITHEAD(&item->link);
389         item->cb = cb;
390         item->data = data;
391
392         LIST_ADDTAIL(&item->link, list);
393 }
394
395 static void
396 _notify_remove(struct list_head *list,
397                tbm_surface_queue_notify_cb cb, void *data)
398 {
399         queue_notify *item = NULL, *tmp;
400
401         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
402                 if (item->cb == cb && item->data == data) {
403                         LIST_DEL(&item->link);
404                         free(item);
405                         return;
406                 }
407         }
408
409         TBM_ERR("Cannot find notifiy\n");
410 }
411
412 static void
413 _notify_remove_all(struct list_head *list)
414 {
415         queue_notify *item = NULL, *tmp;
416
417         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
418                 LIST_DEL(&item->link);
419                 free(item);
420         }
421 }
422
423 static void
424 _notify_emit(tbm_surface_queue_h surface_queue,
425              struct list_head *list)
426 {
427         queue_notify *item = NULL, *tmp;;
428
429         /*
430                 The item->cb is the outside function of the libtbm.
431                 The tbm user may/can remove the item of the list,
432                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
433         */
434         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
435                 item->cb(surface_queue, item->data);
436 }
437
438 static void
439 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
440             void *data)
441 {
442         TBM_RETURN_IF_FAIL(cb != NULL);
443
444         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
445
446         TBM_RETURN_IF_FAIL(item != NULL);
447
448         LIST_INITHEAD(&item->link);
449         item->cb = cb;
450         item->data = data;
451
452         LIST_ADDTAIL(&item->link, list);
453 }
454
455 static void
456 _trace_remove(struct list_head *list,
457                tbm_surface_queue_trace_cb cb, void *data)
458 {
459         queue_trace *item = NULL, *tmp;
460
461         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
462                 if (item->cb == cb && item->data == data) {
463                         LIST_DEL(&item->link);
464                         free(item);
465                         return;
466                 }
467         }
468
469         TBM_ERR("Cannot find notifiy\n");
470 }
471
472 static void
473 _trace_remove_all(struct list_head *list)
474 {
475         queue_trace *item = NULL, *tmp;
476
477         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
478                 LIST_DEL(&item->link);
479                 free(item);
480         }
481 }
482
483 static void
484 _trace_emit(tbm_surface_queue_h surface_queue,
485              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
486 {
487         queue_trace *item = NULL, *tmp;;
488
489         /*
490                 The item->cb is the outside function of the libtbm.
491                 The tbm user may/can remove the item of the list,
492                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
493         */
494         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
495                 item->cb(surface_queue, surface, trace, item->data);
496 }
497
498 static int
499 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
500 {
501         queue_node *node = NULL;
502         int count = 0;
503
504         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
505                 if (node->type == type)
506                         count++;
507         }
508
509         return count;
510 }
511
512 static void
513 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
514                           tbm_surface_h surface)
515 {
516         queue_node *node;
517
518         node = _queue_node_create();
519         TBM_RETURN_IF_FAIL(node != NULL);
520
521         tbm_surface_internal_ref(surface);
522         node->surface = surface;
523
524         LIST_ADDTAIL(&node->link, &surface_queue->list);
525         surface_queue->num_attached++;
526         _queue_node_push_back(&surface_queue->free_queue, node);
527 }
528
529 static void
530 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
531                           tbm_surface_h surface)
532 {
533         queue_node *node;
534         int queue_type;
535
536         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
537         if (node) {
538                 _queue_delete_node(surface_queue, node);
539                 surface_queue->num_attached--;
540         }
541 }
542
543 static void
544 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
545                            queue_node *node, int push_back)
546 {
547         if (push_back)
548                 _queue_node_push_back(&surface_queue->dirty_queue, node);
549         else
550                 _queue_node_push_front(&surface_queue->dirty_queue, node);
551 }
552
553 static queue_node *
554 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
555 {
556         queue_node *node;
557
558         node = _queue_node_pop_front(&surface_queue->free_queue);
559
560         return node;
561 }
562
563 static queue_node *
564 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
565 {
566         queue_node *node;
567
568         if (_queue_is_empty(&surface_queue->dirty_queue))
569                 return NULL;
570
571         node = _queue_node_pop_front(&surface_queue->dirty_queue);
572
573         return node;
574 }
575
576 static void
577 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
578                            queue_node *node, int push_back)
579 {
580         if (push_back)
581                 _queue_node_push_back(&surface_queue->free_queue, node);
582         else
583                 _queue_node_push_front(&surface_queue->free_queue, node);
584 }
585
586 static void
587 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
588                         int queue_size,
589                         int width, int height, int format,
590                         const tbm_surface_queue_interface *impl, void *data)
591 {
592         TBM_RETURN_IF_FAIL(surface_queue != NULL);
593         TBM_RETURN_IF_FAIL(impl != NULL);
594
595         if (!g_surf_queue_bufmgr)
596                 _init_tbm_surf_queue_bufmgr();
597
598         pthread_mutex_init(&surface_queue->lock, NULL);
599         pthread_cond_init(&surface_queue->free_cond, NULL);
600         pthread_cond_init(&surface_queue->dirty_cond, NULL);
601
602         surface_queue->queue_size = queue_size;
603         surface_queue->width = width;
604         surface_queue->height = height;
605         surface_queue->format = format;
606         surface_queue->impl = impl;
607         surface_queue->impl_data = data;
608         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
609
610         _queue_init(&surface_queue->free_queue);
611         _queue_init(&surface_queue->dirty_queue);
612         LIST_INITHEAD(&surface_queue->list);
613
614         LIST_INITHEAD(&surface_queue->destory_noti);
615         LIST_INITHEAD(&surface_queue->dequeuable_noti);
616         LIST_INITHEAD(&surface_queue->dequeue_noti);
617         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
618         LIST_INITHEAD(&surface_queue->acquirable_noti);
619         LIST_INITHEAD(&surface_queue->reset_noti);
620         LIST_INITHEAD(&surface_queue->trace_noti);
621
622         if (surface_queue->impl && surface_queue->impl->init)
623                 surface_queue->impl->init(surface_queue);
624
625         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
626 }
627
628 tbm_surface_queue_error_e
629 tbm_surface_queue_add_destroy_cb(
630         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
631         void *data)
632 {
633         _tbm_surf_queue_mutex_lock();
634
635         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
636                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
637         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
638                                TBM_ERROR_INVALID_PARAMETER);
639
640         pthread_mutex_lock(&surface_queue->lock);
641
642         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
643
644         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
645
646         pthread_mutex_unlock(&surface_queue->lock);
647
648         _tbm_surf_queue_mutex_unlock();
649
650         return TBM_SURFACE_QUEUE_ERROR_NONE;
651 }
652
653 tbm_surface_queue_error_e
654 tbm_surface_queue_remove_destroy_cb(
655         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
656         void *data)
657 {
658         _tbm_surf_queue_mutex_lock();
659
660         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
661                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
662
663         pthread_mutex_lock(&surface_queue->lock);
664
665         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
666
667         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
668
669         pthread_mutex_unlock(&surface_queue->lock);
670
671         _tbm_surf_queue_mutex_unlock();
672
673         return TBM_SURFACE_QUEUE_ERROR_NONE;
674 }
675
676 tbm_surface_queue_error_e
677 tbm_surface_queue_add_dequeuable_cb(
678         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
679         void *data)
680 {
681         _tbm_surf_queue_mutex_lock();
682
683         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
684                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
685         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
686                                TBM_ERROR_INVALID_PARAMETER);
687
688         pthread_mutex_lock(&surface_queue->lock);
689
690         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
691
692         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
693
694         pthread_mutex_unlock(&surface_queue->lock);
695
696         _tbm_surf_queue_mutex_unlock();
697
698         return TBM_SURFACE_QUEUE_ERROR_NONE;
699 }
700
701 tbm_surface_queue_error_e
702 tbm_surface_queue_remove_dequeuable_cb(
703         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
704         void *data)
705 {
706         _tbm_surf_queue_mutex_lock();
707
708         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
709                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
710
711         pthread_mutex_lock(&surface_queue->lock);
712
713         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
714
715         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
716
717         pthread_mutex_unlock(&surface_queue->lock);
718
719         _tbm_surf_queue_mutex_unlock();
720
721         return TBM_SURFACE_QUEUE_ERROR_NONE;
722 }
723
724 tbm_surface_queue_error_e
725 tbm_surface_queue_add_dequeue_cb(
726         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
727         void *data)
728 {
729         _tbm_surf_queue_mutex_lock();
730
731         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
732                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
733         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
734                                TBM_ERROR_INVALID_PARAMETER);
735
736         pthread_mutex_lock(&surface_queue->lock);
737
738         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
739
740         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
741
742         pthread_mutex_unlock(&surface_queue->lock);
743
744         _tbm_surf_queue_mutex_unlock();
745
746         return TBM_SURFACE_QUEUE_ERROR_NONE;
747 }
748
749 tbm_surface_queue_error_e
750 tbm_surface_queue_remove_dequeue_cb(
751         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
752         void *data)
753 {
754         _tbm_surf_queue_mutex_lock();
755
756         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
757                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
758
759         pthread_mutex_lock(&surface_queue->lock);
760
761         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
762
763         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
764
765         pthread_mutex_unlock(&surface_queue->lock);
766
767         _tbm_surf_queue_mutex_unlock();
768
769         return TBM_SURFACE_QUEUE_ERROR_NONE;
770 }
771
772 tbm_surface_queue_error_e
773 tbm_surface_queue_add_can_dequeue_cb(
774         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
775         void *data)
776 {
777         _tbm_surf_queue_mutex_lock();
778
779         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
780                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
781         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
782                                TBM_ERROR_INVALID_PARAMETER);
783
784         pthread_mutex_lock(&surface_queue->lock);
785
786         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
787
788         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
789
790         pthread_mutex_unlock(&surface_queue->lock);
791
792         _tbm_surf_queue_mutex_unlock();
793
794         return TBM_SURFACE_QUEUE_ERROR_NONE;
795 }
796
797 tbm_surface_queue_error_e
798 tbm_surface_queue_remove_can_dequeue_cb(
799         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
800         void *data)
801 {
802         _tbm_surf_queue_mutex_lock();
803
804         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
805                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
806
807         pthread_mutex_lock(&surface_queue->lock);
808
809         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
810
811         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
812
813         pthread_mutex_unlock(&surface_queue->lock);
814
815         _tbm_surf_queue_mutex_unlock();
816
817         return TBM_SURFACE_QUEUE_ERROR_NONE;
818 }
819
820 tbm_surface_queue_error_e
821 tbm_surface_queue_add_acquirable_cb(
822         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
823         void *data)
824 {
825         _tbm_surf_queue_mutex_lock();
826
827         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
828                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
829         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
830                                TBM_ERROR_INVALID_PARAMETER);
831
832         pthread_mutex_lock(&surface_queue->lock);
833
834         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
835
836         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
837
838         pthread_mutex_unlock(&surface_queue->lock);
839
840         _tbm_surf_queue_mutex_unlock();
841
842         return TBM_SURFACE_QUEUE_ERROR_NONE;
843 }
844
845 tbm_surface_queue_error_e
846 tbm_surface_queue_remove_acquirable_cb(
847         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
848         void *data)
849 {
850         _tbm_surf_queue_mutex_lock();
851
852         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
853                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
854
855         pthread_mutex_lock(&surface_queue->lock);
856
857         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
858
859         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
860
861         pthread_mutex_unlock(&surface_queue->lock);
862
863         _tbm_surf_queue_mutex_unlock();
864
865         return TBM_SURFACE_QUEUE_ERROR_NONE;
866 }
867
868 tbm_surface_queue_error_e
869 tbm_surface_queue_add_trace_cb(
870         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
871         void *data)
872 {
873         _tbm_surf_queue_mutex_lock();
874
875         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
876                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
877         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
878                                TBM_ERROR_INVALID_PARAMETER);
879
880         pthread_mutex_lock(&surface_queue->lock);
881
882         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
883
884         _trace_add(&surface_queue->trace_noti, trace_cb, data);
885
886         pthread_mutex_unlock(&surface_queue->lock);
887
888         _tbm_surf_queue_mutex_unlock();
889
890         return TBM_SURFACE_QUEUE_ERROR_NONE;
891 }
892
893 tbm_surface_queue_error_e
894 tbm_surface_queue_remove_trace_cb(
895         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
896         void *data)
897 {
898         _tbm_surf_queue_mutex_lock();
899
900         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
901                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
902
903         pthread_mutex_lock(&surface_queue->lock);
904
905         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
906
907         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
908
909         pthread_mutex_unlock(&surface_queue->lock);
910
911         _tbm_surf_queue_mutex_unlock();
912
913         return TBM_SURFACE_QUEUE_ERROR_NONE;
914 }
915
916 tbm_surface_queue_error_e
917 tbm_surface_queue_set_alloc_cb(
918         tbm_surface_queue_h surface_queue,
919         tbm_surface_alloc_cb alloc_cb,
920         tbm_surface_free_cb free_cb,
921         void *data)
922 {
923         _tbm_surf_queue_mutex_lock();
924
925         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
926                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
927
928         pthread_mutex_lock(&surface_queue->lock);
929
930         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
931
932         surface_queue->alloc_cb = alloc_cb;
933         surface_queue->free_cb = free_cb;
934         surface_queue->alloc_cb_data = data;
935
936         pthread_mutex_unlock(&surface_queue->lock);
937
938         _tbm_surf_queue_mutex_unlock();
939
940         return TBM_SURFACE_QUEUE_ERROR_NONE;
941 }
942
943 int
944 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
945 {
946         int width;
947
948         _tbm_surf_queue_mutex_lock();
949
950         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
951
952         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
953
954         width = surface_queue->width;
955
956         _tbm_surf_queue_mutex_unlock();
957
958         return width;
959 }
960
961 int
962 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
963 {
964         int height;
965
966         _tbm_surf_queue_mutex_lock();
967
968         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
969
970         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
971
972         height = surface_queue->height;
973
974         _tbm_surf_queue_mutex_unlock();
975
976         return height;
977 }
978
979 int
980 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
981 {
982         int format;
983
984         _tbm_surf_queue_mutex_lock();
985
986         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
987
988         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
989
990         format = surface_queue->format;
991
992         _tbm_surf_queue_mutex_unlock();
993
994         return format;
995 }
996
997 int
998 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
999 {
1000         int queue_size;
1001
1002         _tbm_surf_queue_mutex_lock();
1003
1004         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1005
1006         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1007
1008         queue_size = surface_queue->queue_size;
1009
1010         _tbm_surf_queue_mutex_unlock();
1011
1012         return queue_size;
1013 }
1014
1015 tbm_surface_queue_error_e
1016 tbm_surface_queue_add_reset_cb(
1017         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1018         void *data)
1019 {
1020         _tbm_surf_queue_mutex_lock();
1021
1022         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1023                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1024         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
1025                                TBM_ERROR_INVALID_PARAMETER);
1026
1027         pthread_mutex_lock(&surface_queue->lock);
1028
1029         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1030
1031         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1032
1033         pthread_mutex_unlock(&surface_queue->lock);
1034
1035         _tbm_surf_queue_mutex_unlock();
1036
1037         return TBM_SURFACE_QUEUE_ERROR_NONE;
1038 }
1039
1040 tbm_surface_queue_error_e
1041 tbm_surface_queue_remove_reset_cb(
1042         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1043         void *data)
1044 {
1045         _tbm_surf_queue_mutex_lock();
1046
1047         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1048                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1049
1050         pthread_mutex_lock(&surface_queue->lock);
1051
1052         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1053
1054         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1055
1056         pthread_mutex_unlock(&surface_queue->lock);
1057
1058         _tbm_surf_queue_mutex_unlock();
1059
1060         return TBM_SURFACE_QUEUE_ERROR_NONE;
1061 }
1062
1063 tbm_surface_queue_error_e
1064 tbm_surface_queue_enqueue(tbm_surface_queue_h
1065                           surface_queue, tbm_surface_h surface)
1066 {
1067         queue_node *node;
1068         int queue_type;
1069
1070         _tbm_surf_queue_mutex_lock();
1071
1072         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1073                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1074         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1075                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1076
1077         if (b_dump_queue)
1078                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1079
1080         pthread_mutex_lock(&surface_queue->lock);
1081
1082         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1083
1084         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1085         if (node == NULL || queue_type != NODE_LIST) {
1086                 TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1087                         node, queue_type);
1088                 pthread_mutex_unlock(&surface_queue->lock);
1089
1090                 _tbm_surf_queue_mutex_unlock();
1091
1092                 if (!node)
1093                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1094                 else
1095                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1096         }
1097
1098         if (surface_queue->impl && surface_queue->impl->enqueue)
1099                 surface_queue->impl->enqueue(surface_queue, node);
1100         else
1101                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1102
1103         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1104                 TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1105                 pthread_mutex_unlock(&surface_queue->lock);
1106
1107                 _tbm_surf_queue_mutex_unlock();
1108                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1109         }
1110
1111         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1112
1113         if (surface_queue->enqueue_sync_count == 1) {
1114                 tbm_surface_info_s info;
1115                 int ret;
1116
1117                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1118                 if (ret == TBM_SURFACE_ERROR_NONE)
1119                         tbm_surface_unmap(surface);
1120         }
1121
1122         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1123
1124         pthread_mutex_unlock(&surface_queue->lock);
1125         pthread_cond_signal(&surface_queue->dirty_cond);
1126
1127         _tbm_surf_queue_mutex_unlock();
1128
1129         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1130
1131         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1132
1133         return TBM_SURFACE_QUEUE_ERROR_NONE;
1134 }
1135
1136 tbm_surface_queue_error_e
1137 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1138                           surface_queue, tbm_surface_h surface)
1139 {
1140         queue_node *node;
1141         int queue_type;
1142
1143         _tbm_surf_queue_mutex_lock();
1144
1145         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1146                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1147         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1148                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1149
1150         pthread_mutex_lock(&surface_queue->lock);
1151
1152         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1153
1154         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1155         if (node == NULL || queue_type != NODE_LIST) {
1156                 TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1157                         node, queue_type);
1158                 pthread_mutex_unlock(&surface_queue->lock);
1159
1160                 _tbm_surf_queue_mutex_unlock();
1161                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1162         }
1163
1164         if (node->delete_pending) {
1165                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1166
1167                 _queue_delete_node(surface_queue, node);
1168
1169                 pthread_mutex_unlock(&surface_queue->lock);
1170
1171                 _tbm_surf_queue_mutex_unlock();
1172
1173                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1174
1175                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1176         }
1177
1178         if (surface_queue->queue_size < surface_queue->num_attached) {
1179                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1180
1181                 if (surface_queue->impl && surface_queue->impl->need_detach)
1182                         surface_queue->impl->need_detach(surface_queue, node);
1183                 else
1184                         _tbm_surface_queue_detach(surface_queue, surface);
1185
1186                 pthread_mutex_unlock(&surface_queue->lock);
1187
1188                 _tbm_surf_queue_mutex_unlock();
1189
1190                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1191
1192                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1193         }
1194
1195         if (surface_queue->impl && surface_queue->impl->release)
1196                 surface_queue->impl->release(surface_queue, node);
1197         else
1198                 _tbm_surface_queue_release(surface_queue, node, 1);
1199
1200         if (_queue_is_empty(&surface_queue->free_queue)) {
1201                 pthread_mutex_unlock(&surface_queue->lock);
1202
1203                 TBM_ERR("surface_queue->free_queue is empty.\n");
1204                 _tbm_surf_queue_mutex_unlock();
1205                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1206         }
1207
1208         node->type = QUEUE_NODE_TYPE_RELEASE;
1209
1210         pthread_mutex_unlock(&surface_queue->lock);
1211         pthread_cond_signal(&surface_queue->free_cond);
1212
1213         _tbm_surf_queue_mutex_unlock();
1214
1215         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1216
1217         return TBM_SURFACE_QUEUE_ERROR_NONE;
1218 }
1219
1220 tbm_surface_queue_error_e
1221 tbm_surface_queue_dequeue(tbm_surface_queue_h
1222                           surface_queue, tbm_surface_h *surface)
1223 {
1224         queue_node *node;
1225
1226         _tbm_surf_queue_mutex_lock();
1227
1228         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1229                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1230         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1231                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1232
1233         *surface = NULL;
1234
1235         pthread_mutex_lock(&surface_queue->lock);
1236
1237         if (_queue_is_empty(&surface_queue->free_queue)) {
1238                 if (surface_queue->impl && surface_queue->impl->need_attach)
1239                         surface_queue->impl->need_attach(surface_queue);
1240
1241                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1242                         TBM_ERR("surface_queue:%p is invalid", surface_queue);
1243                         pthread_mutex_unlock(&surface_queue->lock);
1244                         _tbm_surf_queue_mutex_unlock();
1245                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1246                 }
1247         }
1248
1249         if (surface_queue->impl && surface_queue->impl->dequeue)
1250                 node = surface_queue->impl->dequeue(surface_queue);
1251         else
1252                 node = _tbm_surface_queue_dequeue(surface_queue);
1253
1254         if (node == NULL || node->surface == NULL) {
1255                 TBM_ERR("_queue_node_pop_front failed\n");
1256                 pthread_mutex_unlock(&surface_queue->lock);
1257
1258                 _tbm_surf_queue_mutex_unlock();
1259                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1260         }
1261
1262         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1263         *surface = node->surface;
1264
1265         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1266
1267         pthread_mutex_unlock(&surface_queue->lock);
1268
1269         _tbm_surf_queue_mutex_unlock();
1270
1271         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1272
1273         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1274
1275         return TBM_SURFACE_QUEUE_ERROR_NONE;
1276 }
1277
1278 int
1279 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1280 {
1281         _tbm_surf_queue_mutex_lock();
1282
1283         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1284
1285         _tbm_surf_queue_mutex_unlock();
1286
1287         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1288
1289         _tbm_surf_queue_mutex_lock();
1290
1291         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1292
1293         pthread_mutex_lock(&surface_queue->lock);
1294
1295         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1296
1297         if (_queue_is_empty(&surface_queue->free_queue)) {
1298                 if (surface_queue->impl && surface_queue->impl->need_attach)
1299                         surface_queue->impl->need_attach(surface_queue);
1300
1301                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1302                         TBM_ERR("surface_queue:%p is invalid", surface_queue);
1303                         pthread_mutex_unlock(&surface_queue->lock);
1304                         _tbm_surf_queue_mutex_unlock();
1305                         return 0;
1306                 }
1307         }
1308
1309         if (!_queue_is_empty(&surface_queue->free_queue)) {
1310                 pthread_mutex_unlock(&surface_queue->lock);
1311                 _tbm_surf_queue_mutex_unlock();
1312                 return 1;
1313         }
1314
1315         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1316                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1317                 _tbm_surf_queue_mutex_unlock();
1318                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1319                 pthread_mutex_unlock(&surface_queue->lock);
1320                 return 1;
1321         }
1322
1323         pthread_mutex_unlock(&surface_queue->lock);
1324         _tbm_surf_queue_mutex_unlock();
1325         return 0;
1326 }
1327
1328 tbm_surface_queue_error_e
1329 tbm_surface_queue_release(tbm_surface_queue_h
1330                           surface_queue, tbm_surface_h surface)
1331 {
1332         queue_node *node;
1333         int queue_type;
1334
1335         _tbm_surf_queue_mutex_lock();
1336
1337         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1338                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1339         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1340                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1341
1342         pthread_mutex_lock(&surface_queue->lock);
1343
1344         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1345
1346         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1347         if (node == NULL || queue_type != NODE_LIST) {
1348                 TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1349                         node, queue_type);
1350                 pthread_mutex_unlock(&surface_queue->lock);
1351
1352                 _tbm_surf_queue_mutex_unlock();
1353
1354                 if (!node)
1355                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1356                 else
1357                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1358         }
1359
1360         if (node->delete_pending) {
1361                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1362
1363                 _queue_delete_node(surface_queue, node);
1364
1365                 pthread_mutex_unlock(&surface_queue->lock);
1366
1367                 _tbm_surf_queue_mutex_unlock();
1368
1369                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1370
1371                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1372         }
1373
1374         if (surface_queue->queue_size < surface_queue->num_attached) {
1375                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1376
1377                 if (surface_queue->impl && surface_queue->impl->need_detach)
1378                         surface_queue->impl->need_detach(surface_queue, node);
1379                 else
1380                         _tbm_surface_queue_detach(surface_queue, surface);
1381
1382                 pthread_mutex_unlock(&surface_queue->lock);
1383
1384                 _tbm_surf_queue_mutex_unlock();
1385
1386                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1387
1388                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1389         }
1390
1391         if (surface_queue->impl && surface_queue->impl->release)
1392                 surface_queue->impl->release(surface_queue, node);
1393         else
1394                 _tbm_surface_queue_release(surface_queue, node, 1);
1395
1396         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1397                 TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
1398                 pthread_mutex_unlock(&surface_queue->lock);
1399
1400                 _tbm_surf_queue_mutex_unlock();
1401                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1402         }
1403
1404         node->type = QUEUE_NODE_TYPE_RELEASE;
1405
1406         pthread_mutex_unlock(&surface_queue->lock);
1407         pthread_cond_signal(&surface_queue->free_cond);
1408
1409         _tbm_surf_queue_mutex_unlock();
1410
1411         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1412
1413         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1414
1415         return TBM_SURFACE_QUEUE_ERROR_NONE;
1416 }
1417
1418 tbm_surface_queue_error_e
1419 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1420                         surface_queue, tbm_surface_h surface)
1421 {
1422         queue_node *node;
1423         int queue_type;
1424
1425         _tbm_surf_queue_mutex_lock();
1426
1427         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1428                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1429         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1430                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1431
1432         pthread_mutex_lock(&surface_queue->lock);
1433
1434         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1435
1436         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1437         if (node == NULL || queue_type != NODE_LIST) {
1438                 TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1439                         node, queue_type);
1440                 pthread_mutex_unlock(&surface_queue->lock);
1441
1442                 _tbm_surf_queue_mutex_unlock();
1443                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1444         }
1445
1446         if (surface_queue->impl && surface_queue->impl->enqueue)
1447                 surface_queue->impl->enqueue(surface_queue, node);
1448         else
1449                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1450
1451         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1452                 TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
1453                 pthread_mutex_unlock(&surface_queue->lock);
1454
1455                 _tbm_surf_queue_mutex_unlock();
1456                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1457         }
1458
1459         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1460
1461         pthread_mutex_unlock(&surface_queue->lock);
1462         pthread_cond_signal(&surface_queue->dirty_cond);
1463
1464         _tbm_surf_queue_mutex_unlock();
1465
1466         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1467
1468         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1469
1470         return TBM_SURFACE_QUEUE_ERROR_NONE;
1471 }
1472
1473 tbm_surface_queue_error_e
1474 tbm_surface_queue_acquire(tbm_surface_queue_h
1475                           surface_queue, tbm_surface_h *surface)
1476 {
1477         queue_node *node;
1478
1479         _tbm_surf_queue_mutex_lock();
1480
1481         *surface = NULL;
1482
1483         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1484                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1485         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1486                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1487
1488         pthread_mutex_lock(&surface_queue->lock);
1489
1490         if (surface_queue->impl && surface_queue->impl->acquire)
1491                 node = surface_queue->impl->acquire(surface_queue);
1492         else
1493                 node = _tbm_surface_queue_acquire(surface_queue);
1494
1495         if (node == NULL || node->surface == NULL) {
1496                 TBM_ERR("_queue_node_pop_front failed\n");
1497                 pthread_mutex_unlock(&surface_queue->lock);
1498
1499                 _tbm_surf_queue_mutex_unlock();
1500                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1501         }
1502
1503         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1504
1505         *surface = node->surface;
1506
1507         if (surface_queue->acquire_sync_count == 1) {
1508                 tbm_surface_info_s info;
1509                 int ret;
1510
1511                 TBM_ERR("start map surface:%p", *surface);
1512                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1513                 TBM_ERR("end map surface:%p", *surface);
1514                 if (ret == TBM_SURFACE_ERROR_NONE)
1515                         tbm_surface_unmap(*surface);
1516         }
1517
1518         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1519
1520         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1521
1522         pthread_mutex_unlock(&surface_queue->lock);
1523
1524         _tbm_surf_queue_mutex_unlock();
1525
1526         if (b_dump_queue)
1527                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1528
1529         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1530
1531         return TBM_SURFACE_QUEUE_ERROR_NONE;
1532 }
1533
1534 int
1535 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1536 {
1537         _tbm_surf_queue_mutex_lock();
1538
1539         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1540
1541         pthread_mutex_lock(&surface_queue->lock);
1542
1543         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1544
1545         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1546                 pthread_mutex_unlock(&surface_queue->lock);
1547                 _tbm_surf_queue_mutex_unlock();
1548                 return 1;
1549         }
1550
1551         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1552                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1553                 _tbm_surf_queue_mutex_unlock();
1554                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1555                 pthread_mutex_unlock(&surface_queue->lock);
1556                 return 1;
1557         }
1558
1559         pthread_mutex_unlock(&surface_queue->lock);
1560         _tbm_surf_queue_mutex_unlock();
1561         return 0;
1562 }
1563
1564 void
1565 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1566 {
1567         queue_node *node = NULL, *tmp;
1568
1569         _tbm_surf_queue_mutex_lock();
1570
1571         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1572
1573         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1574
1575         LIST_DEL(&surface_queue->item_link);
1576
1577         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1578                 _queue_delete_node(surface_queue, node);
1579
1580         if (surface_queue->impl && surface_queue->impl->destroy)
1581                 surface_queue->impl->destroy(surface_queue);
1582
1583         _notify_emit(surface_queue, &surface_queue->destory_noti);
1584
1585         _notify_remove_all(&surface_queue->destory_noti);
1586         _notify_remove_all(&surface_queue->dequeuable_noti);
1587         _notify_remove_all(&surface_queue->dequeue_noti);
1588         _notify_remove_all(&surface_queue->can_dequeue_noti);
1589         _notify_remove_all(&surface_queue->acquirable_noti);
1590         _notify_remove_all(&surface_queue->reset_noti);
1591         _trace_remove_all(&surface_queue->trace_noti);
1592
1593         pthread_mutex_destroy(&surface_queue->lock);
1594
1595         free(surface_queue);
1596
1597         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1598                 _deinit_tbm_surf_queue_bufmgr();
1599
1600         _tbm_surf_queue_mutex_unlock();
1601 }
1602
1603 tbm_surface_queue_error_e
1604 tbm_surface_queue_reset(tbm_surface_queue_h
1605                         surface_queue, int width, int height, int format)
1606 {
1607         queue_node *node = NULL, *tmp;
1608
1609         _tbm_surf_queue_mutex_lock();
1610
1611         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1612                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1613
1614         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1615
1616         if (width == surface_queue->width && height == surface_queue->height &&
1617                 format == surface_queue->format) {
1618                 _tbm_surf_queue_mutex_unlock();
1619                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1620         }
1621
1622         pthread_mutex_lock(&surface_queue->lock);
1623
1624         surface_queue->width = width;
1625         surface_queue->height = height;
1626         surface_queue->format = format;
1627
1628         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1629                 /* Destory surface and Push to free_queue */
1630                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1631                         _queue_delete_node(surface_queue, node);
1632
1633                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1634                         node->delete_pending = 1;
1635         } else {
1636                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1637                         _queue_delete_node(surface_queue, node);
1638
1639                 _queue_init(&surface_queue->dirty_queue);
1640                 LIST_INITHEAD(&surface_queue->list);
1641         }
1642
1643         /* Reset queue */
1644         _queue_init(&surface_queue->free_queue);
1645
1646         surface_queue->num_attached = 0;
1647
1648         if (surface_queue->impl && surface_queue->impl->reset)
1649                 surface_queue->impl->reset(surface_queue);
1650
1651         pthread_mutex_unlock(&surface_queue->lock);
1652         pthread_cond_signal(&surface_queue->free_cond);
1653
1654         _tbm_surf_queue_mutex_unlock();
1655
1656         _notify_emit(surface_queue, &surface_queue->reset_noti);
1657
1658         return TBM_SURFACE_QUEUE_ERROR_NONE;
1659 }
1660
1661 tbm_surface_queue_error_e
1662 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1663 {
1664         _tbm_surf_queue_mutex_lock();
1665
1666         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1667                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1668
1669         _tbm_surf_queue_mutex_unlock();
1670
1671         _notify_emit(surface_queue, &surface_queue->reset_noti);
1672
1673         return TBM_SURFACE_QUEUE_ERROR_NONE;
1674 }
1675
1676 tbm_surface_queue_error_e
1677 tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
1678 {
1679         _tbm_surf_queue_mutex_lock();
1680
1681         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1682                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1683
1684         _tbm_surf_queue_mutex_unlock();
1685
1686         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1687
1688         return TBM_SURFACE_QUEUE_ERROR_NONE;
1689 }
1690
1691 tbm_surface_queue_error_e
1692 tbm_surface_queue_set_size(tbm_surface_queue_h
1693                         surface_queue, int queue_size, int flush)
1694 {
1695         queue_node *node = NULL, *tmp;
1696
1697         _tbm_surf_queue_mutex_lock();
1698
1699         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1700                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1701         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1702                                         TBM_ERROR_INVALID_PARAMETER);
1703
1704         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1705
1706         if ((surface_queue->queue_size == queue_size) && !flush) {
1707                 _tbm_surf_queue_mutex_unlock();
1708                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1709         }
1710
1711         pthread_mutex_lock(&surface_queue->lock);
1712
1713         if (flush) {
1714                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1715                         /* Destory surface and Push to free_queue */
1716                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1717                                 _queue_delete_node(surface_queue, node);
1718
1719                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1720                                 node->delete_pending = 1;
1721                 } else {
1722                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1723                                 _queue_delete_node(surface_queue, node);
1724
1725                         _queue_init(&surface_queue->dirty_queue);
1726                         LIST_INITHEAD(&surface_queue->list);
1727                 }
1728
1729                 /* Reset queue */
1730                 _queue_init(&surface_queue->free_queue);
1731
1732                 surface_queue->num_attached = 0;
1733                 surface_queue->queue_size = queue_size;
1734
1735                 if (surface_queue->impl && surface_queue->impl->reset)
1736                         surface_queue->impl->reset(surface_queue);
1737
1738                 pthread_mutex_unlock(&surface_queue->lock);
1739                 pthread_cond_signal(&surface_queue->free_cond);
1740
1741                 _tbm_surf_queue_mutex_unlock();
1742
1743                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1744
1745                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1746         } else {
1747                 if (surface_queue->queue_size > queue_size) {
1748                         int need_del = surface_queue->queue_size - queue_size;
1749
1750                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1751                                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1752
1753                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1754                                         surface_queue->impl->need_detach(surface_queue, node);
1755                                 else
1756                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1757
1758                                 need_del--;
1759                                 if (need_del == 0)
1760                                         break;
1761                         }
1762                 }
1763
1764                 surface_queue->queue_size = queue_size;
1765
1766                 pthread_mutex_unlock(&surface_queue->lock);
1767
1768                 _tbm_surf_queue_mutex_unlock();
1769
1770                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1771         }
1772 }
1773
1774 tbm_surface_queue_error_e
1775 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1776 {
1777         queue_node *node = NULL;
1778
1779         _tbm_surf_queue_mutex_lock();
1780
1781         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1782                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1783
1784         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1785
1786         if (surface_queue->num_attached == 0) {
1787                 _tbm_surf_queue_mutex_unlock();
1788                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1789         }
1790
1791         pthread_mutex_lock(&surface_queue->lock);
1792
1793         /* Destory surface in free_queue */
1794         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1795                 if (surface_queue->impl && surface_queue->impl->need_detach)
1796                         surface_queue->impl->need_detach(surface_queue, node);
1797                 else
1798                         _tbm_surface_queue_detach(surface_queue, node->surface);
1799         }
1800
1801         /* Reset queue */
1802         _queue_init(&surface_queue->free_queue);
1803
1804         pthread_mutex_unlock(&surface_queue->lock);
1805         _tbm_surf_queue_mutex_unlock();
1806
1807         return TBM_SURFACE_QUEUE_ERROR_NONE;
1808 }
1809
1810 tbm_surface_queue_error_e
1811 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1812 {
1813         queue_node *node = NULL, *tmp;
1814
1815         _tbm_surf_queue_mutex_lock();
1816
1817         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1818                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1819
1820         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1821
1822         if (surface_queue->num_attached == 0) {
1823                 _tbm_surf_queue_mutex_unlock();
1824                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1825         }
1826
1827         pthread_mutex_lock(&surface_queue->lock);
1828
1829         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1830                 /* Destory surface and Push to free_queue */
1831                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1832                         _queue_delete_node(surface_queue, node);
1833
1834                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1835                         node->delete_pending = 1;
1836         } else {
1837                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1838                         _queue_delete_node(surface_queue, node);
1839
1840                 _queue_init(&surface_queue->dirty_queue);
1841                 LIST_INITHEAD(&surface_queue->list);
1842         }
1843
1844         /* Reset queue */
1845         _queue_init(&surface_queue->free_queue);
1846
1847         surface_queue->num_attached = 0;
1848
1849         if (surface_queue->impl && surface_queue->impl->reset)
1850                 surface_queue->impl->reset(surface_queue);
1851
1852         pthread_mutex_unlock(&surface_queue->lock);
1853         pthread_cond_signal(&surface_queue->free_cond);
1854
1855         _tbm_surf_queue_mutex_unlock();
1856
1857         _notify_emit(surface_queue, &surface_queue->reset_noti);
1858
1859         return TBM_SURFACE_QUEUE_ERROR_NONE;
1860 }
1861
1862 tbm_surface_queue_error_e
1863 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1864                         tbm_surface_h *surfaces, int *num)
1865 {
1866         queue_node *node = NULL;
1867
1868         _tbm_surf_queue_mutex_lock();
1869
1870         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1871                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1872         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1873                                TBM_ERROR_INVALID_PARAMETER);
1874
1875         *num = 0;
1876
1877         pthread_mutex_lock(&surface_queue->lock);
1878
1879         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1880                 if (node->delete_pending) continue;
1881
1882                 if (surfaces)
1883                         surfaces[*num] = node->surface;
1884
1885                 *num = *num + 1;
1886         }
1887
1888         pthread_mutex_unlock(&surface_queue->lock);
1889
1890         _tbm_surf_queue_mutex_unlock();
1891
1892         return TBM_SURFACE_QUEUE_ERROR_NONE;
1893 }
1894
1895 tbm_surface_queue_error_e
1896 tbm_surface_queue_get_trace_surface_num(
1897                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1898 {
1899         _tbm_surf_queue_mutex_lock();
1900
1901         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1902                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1903         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1904                                TBM_ERROR_INVALID_PARAMETER);
1905
1906         *num = 0;
1907
1908         pthread_mutex_lock(&surface_queue->lock);
1909
1910         switch (trace) {
1911         case TBM_SURFACE_QUEUE_TRACE_NONE:
1912                 *num = 0;
1913                 break;
1914         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1915                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1916                 break;
1917         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1918                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1919                 break;
1920         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1921                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1922                 break;
1923         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1924                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1925                 break;
1926         default:
1927                 break;
1928         }
1929
1930         pthread_mutex_unlock(&surface_queue->lock);
1931
1932         _tbm_surf_queue_mutex_unlock();
1933
1934         return TBM_SURFACE_QUEUE_ERROR_NONE;
1935 }
1936
1937 typedef struct {
1938         int flags;
1939 } tbm_queue_default;
1940
1941 static void
1942 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1943 {
1944         free(surface_queue->impl_data);
1945 }
1946
1947 static void
1948 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1949 {
1950         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1951         tbm_surface_h surface;
1952
1953         if (surface_queue->queue_size == surface_queue->num_attached)
1954                 return;
1955
1956         if (surface_queue->alloc_cb) {
1957                 pthread_mutex_unlock(&surface_queue->lock);
1958                 _tbm_surf_queue_mutex_unlock();
1959                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1960                 _tbm_surf_queue_mutex_lock();
1961                 pthread_mutex_lock(&surface_queue->lock);
1962
1963                 /* silent return */
1964                 if (!surface)
1965                         return;
1966
1967                 tbm_surface_internal_ref(surface);
1968         } else {
1969                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1970                                 surface_queue->height,
1971                                 surface_queue->format,
1972                                 data->flags);
1973                 TBM_RETURN_IF_FAIL(surface != NULL);
1974         }
1975
1976         _tbm_surface_queue_attach(surface_queue, surface);
1977         tbm_surface_internal_unref(surface);
1978 }
1979
1980 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1981         NULL,                           /*__tbm_queue_default_init*/
1982         NULL,                           /*__tbm_queue_default_reset*/
1983         __tbm_queue_default_destroy,
1984         __tbm_queue_default_need_attach,
1985         NULL,                           /*__tbm_queue_default_enqueue*/
1986         NULL,                           /*__tbm_queue_default_release*/
1987         NULL,                           /*__tbm_queue_default_dequeue*/
1988         NULL,                           /*__tbm_queue_default_acquire*/
1989         NULL,                           /*__tbm_queue_default_need_detach*/
1990 };
1991
1992 tbm_surface_queue_h
1993 tbm_surface_queue_create(int queue_size, int width,
1994                          int height, int format, int flags)
1995 {
1996         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1997         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1998         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1999         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2000
2001         _tbm_surf_queue_mutex_lock();
2002
2003         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2004                                             sizeof(struct _tbm_surface_queue));
2005         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2006
2007         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2008
2009         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
2010                                   sizeof(tbm_queue_default));
2011         if (data == NULL) {
2012                 TBM_ERR("cannot allocate the tbm_queue_default.\n");
2013                 free(surface_queue);
2014                 _tbm_surf_queue_mutex_unlock();
2015                 return NULL;
2016         }
2017
2018         data->flags = flags;
2019         _tbm_surface_queue_init(surface_queue,
2020                                 queue_size,
2021                                 width, height, format,
2022                                 &tbm_queue_default_impl, data);
2023
2024         _tbm_surf_queue_mutex_unlock();
2025
2026         return surface_queue;
2027 }
2028
2029 typedef struct {
2030         int flags;
2031         queue dequeue_list;
2032 } tbm_queue_sequence;
2033
2034 static void
2035 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2036 {
2037         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2038
2039         _queue_init(&data->dequeue_list);
2040 }
2041
2042 static void
2043 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2044 {
2045         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2046
2047         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2048                 return;
2049
2050         _queue_init(&data->dequeue_list);
2051 }
2052
2053 static void
2054 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2055 {
2056         free(surface_queue->impl_data);
2057 }
2058
2059 static void
2060 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2061 {
2062         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2063         tbm_surface_h surface;
2064
2065         if (surface_queue->queue_size == surface_queue->num_attached)
2066                 return;
2067
2068         if (surface_queue->alloc_cb) {
2069                 pthread_mutex_unlock(&surface_queue->lock);
2070                 _tbm_surf_queue_mutex_unlock();
2071                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2072                 _tbm_surf_queue_mutex_lock();
2073                 pthread_mutex_lock(&surface_queue->lock);
2074
2075                 /* silent return */
2076                 if (!surface)
2077                         return;
2078
2079                 tbm_surface_internal_ref(surface);
2080         } else {
2081                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2082                                 surface_queue->height,
2083                                 surface_queue->format,
2084                                 data->flags);
2085                 TBM_RETURN_IF_FAIL(surface != NULL);
2086         }
2087
2088         _tbm_surface_queue_attach(surface_queue, surface);
2089         tbm_surface_internal_unref(surface);
2090 }
2091
2092 static void
2093 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2094                              queue_node *node)
2095 {
2096         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2097         queue_node *first = NULL;
2098
2099         first = container_of(data->dequeue_list.head.next, first, item_link);
2100         if (first != node) {
2101                 return;
2102         }
2103
2104         node->priv_flags = 0;
2105
2106         _queue_node_pop(&data->dequeue_list, node);
2107         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2108 }
2109
2110 static void
2111 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2112                                 queue_node *node)
2113 {
2114         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2115
2116         if (node->priv_flags) {
2117                 node->priv_flags = 0;
2118                 _queue_node_pop(&data->dequeue_list, node);
2119         }
2120
2121         _tbm_surface_queue_release(surface_queue, node, 1);
2122 }
2123
2124 static queue_node *
2125 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2126                              surface_queue)
2127 {
2128         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2129         queue_node *node;
2130
2131         node = _tbm_surface_queue_dequeue(surface_queue);
2132         if (node) {
2133                 _queue_node_push_back(&data->dequeue_list, node);
2134                 node->priv_flags = 1;
2135         }
2136
2137         return node;
2138 }
2139
2140 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2141         __tbm_queue_sequence_init,
2142         __tbm_queue_sequence_reset,
2143         __tbm_queue_sequence_destroy,
2144         __tbm_queue_sequence_need_attach,
2145         __tbm_queue_sequence_enqueue,
2146         __tbm_queue_sequence_release,
2147         __tbm_queue_sequence_dequeue,
2148         NULL,                                   /*__tbm_queue_sequence_acquire*/
2149         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2150 };
2151
2152 tbm_surface_queue_h
2153 tbm_surface_queue_sequence_create(int queue_size, int width,
2154                                   int height, int format, int flags)
2155 {
2156         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2157         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2158         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2159         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2160
2161         _tbm_surf_queue_mutex_lock();
2162
2163         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2164                                             sizeof(struct _tbm_surface_queue));
2165         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2166
2167         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2168
2169         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2170                                    sizeof(tbm_queue_sequence));
2171         if (data == NULL) {
2172                 TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
2173                 free(surface_queue);
2174                 _tbm_surf_queue_mutex_unlock();
2175                 return NULL;
2176         }
2177
2178         data->flags = flags;
2179         _tbm_surface_queue_init(surface_queue,
2180                                 queue_size,
2181                                 width, height, format,
2182                                 &tbm_queue_sequence_impl, data);
2183
2184         _tbm_surf_queue_mutex_unlock();
2185
2186         return surface_queue;
2187 }
2188
2189 tbm_surface_queue_error_e
2190 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2191                                   int modes)
2192 {
2193         _tbm_surf_queue_mutex_lock();
2194
2195         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2196                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2197
2198         pthread_mutex_lock(&surface_queue->lock);
2199
2200         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2201                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2202         else
2203                 surface_queue->modes |= modes;
2204
2205         pthread_mutex_unlock(&surface_queue->lock);
2206
2207         _tbm_surf_queue_mutex_unlock();
2208
2209         return TBM_SURFACE_QUEUE_ERROR_NONE;
2210 }
2211
2212 tbm_surface_queue_error_e
2213 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2214                                   unsigned int sync_count)
2215 {
2216         int dequeue_num, enqueue_num;
2217
2218         _tbm_surf_queue_mutex_lock();
2219
2220         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2221                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2222
2223         pthread_mutex_lock(&surface_queue->lock);
2224
2225         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2226         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2227
2228         if (dequeue_num + sync_count == 0)
2229                 surface_queue->acquire_sync_count = enqueue_num;
2230         else
2231                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2232
2233         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2234                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2235
2236         pthread_mutex_unlock(&surface_queue->lock);
2237
2238         _tbm_surf_queue_mutex_unlock();
2239
2240         return TBM_SURFACE_QUEUE_ERROR_NONE;
2241 }