queue: emit signal free_cond in queue_notify_dequeuable
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 static tbm_bufmgr g_surf_queue_bufmgr;
42 static pthread_mutex_t tbm_surf_queue_lock;
43 void _tbm_surface_queue_mutex_unlock(void);
44
45 /* check condition */
46 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
47         if (!(cond)) {\
48                 TBM_ERR("'%s' failed.\n", #cond);\
49                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
50                 _tbm_surf_queue_mutex_unlock();\
51                 return;\
52         } \
53 }
54
55 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
56         if (!(cond)) {\
57                 TBM_ERR("'%s' failed.\n", #cond);\
58                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
59                 _tbm_surf_queue_mutex_unlock();\
60                 return val;\
61         } \
62 }
63
64 typedef enum _queue_node_type {
65         QUEUE_NODE_TYPE_NONE,
66         QUEUE_NODE_TYPE_DEQUEUE,
67         QUEUE_NODE_TYPE_ENQUEUE,
68         QUEUE_NODE_TYPE_ACQUIRE,
69         QUEUE_NODE_TYPE_RELEASE
70 } Queue_Node_Type;
71
72 typedef struct {
73         struct list_head head;
74         int count;
75 } queue;
76
77 typedef struct {
78         tbm_surface_h surface;
79
80         struct list_head item_link;
81         struct list_head link;
82
83         Queue_Node_Type type;
84
85         unsigned int priv_flags;        /*for each queue*/
86
87         int delete_pending;
88 } queue_node;
89
90 typedef struct {
91         struct list_head link;
92
93         tbm_surface_queue_notify_cb cb;
94         void *data;
95 } queue_notify;
96
97 typedef struct {
98         struct list_head link;
99
100         tbm_surface_queue_trace_cb cb;
101         void *data;
102 } queue_trace;
103
104 typedef struct _tbm_surface_queue_interface {
105         void (*init)(tbm_surface_queue_h queue);
106         void (*reset)(tbm_surface_queue_h queue);
107         void (*destroy)(tbm_surface_queue_h queue);
108         void (*need_attach)(tbm_surface_queue_h queue);
109
110         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
111         void (*release)(tbm_surface_queue_h queue, queue_node *node);
112         queue_node *(*dequeue)(tbm_surface_queue_h queue);
113         queue_node *(*acquire)(tbm_surface_queue_h queue);
114         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
115 } tbm_surface_queue_interface;
116
117 struct _tbm_surface_queue {
118         int width;
119         int height;
120         int format;
121         int queue_size;
122         int num_attached;
123
124         queue free_queue;
125         queue dirty_queue;
126         struct list_head list;
127
128         struct list_head destory_noti;
129         struct list_head dequeuable_noti;
130         struct list_head dequeue_noti;
131         struct list_head can_dequeue_noti;
132         struct list_head acquirable_noti;
133         struct list_head reset_noti;
134         struct list_head trace_noti;
135
136         pthread_mutex_t lock;
137         pthread_cond_t free_cond;
138         pthread_cond_t dirty_cond;
139
140         const tbm_surface_queue_interface *impl;
141         void *impl_data;
142
143         //For external buffer allocation
144         tbm_surface_alloc_cb alloc_cb;
145         tbm_surface_free_cb free_cb;
146         void *alloc_cb_data;
147
148         struct list_head item_link; /* link of surface queue */
149
150         int modes;
151         unsigned int enqueue_sync_count;
152         unsigned int acquire_sync_count;
153 };
154
155 static bool
156 _tbm_surf_queue_mutex_init(void)
157 {
158         static bool tbm_surf_queue_mutex_init = false;
159
160         if (tbm_surf_queue_mutex_init)
161                 return true;
162
163         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
164                 TBM_ERR("fail: pthread_mutex_init\n");
165                 return false;
166         }
167
168         tbm_surf_queue_mutex_init = true;
169
170         return true;
171 }
172
173 static void
174 _tbm_surf_queue_mutex_lock(void)
175 {
176         if (!_tbm_surf_queue_mutex_init()) {
177                 TBM_ERR("fail: _tbm_surf_queue_mutex_init\n");
178                 return;
179         }
180
181         pthread_mutex_lock(&tbm_surf_queue_lock);
182 }
183
184 static void
185 _tbm_surf_queue_mutex_unlock(void)
186 {
187         pthread_mutex_unlock(&tbm_surf_queue_lock);
188 }
189
190 static void
191 _init_tbm_surf_queue_bufmgr(void)
192 {
193         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
194 }
195
196 static void
197 _deinit_tbm_surf_queue_bufmgr(void)
198 {
199         if (!g_surf_queue_bufmgr)
200                 return;
201
202         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
203         g_surf_queue_bufmgr = NULL;
204 }
205
206 static int
207 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
208 {
209         tbm_surface_queue_h old_data = NULL;
210
211         if (surface_queue == NULL) {
212                 TBM_ERR("error: surface_queue is NULL.\n");
213                 return 0;
214         }
215
216         if (g_surf_queue_bufmgr == NULL) {
217                 TBM_ERR("error: g_surf_queue_bufmgr is NULL.\n");
218                 return 0;
219         }
220
221         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
222                 TBM_ERR("error: surf_queue_list is empty\n");
223                 return 0;
224         }
225
226         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
227                                 item_link) {
228                 if (old_data == surface_queue) {
229                         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
230                         return 1;
231                 }
232         }
233
234         TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
235
236         return 0;
237 }
238
239 static queue_node *
240 _queue_node_create(void)
241 {
242         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
243
244         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
245
246         return node;
247 }
248
249 static void
250 _queue_node_delete(queue_node *node)
251 {
252         LIST_DEL(&node->item_link);
253         LIST_DEL(&node->link);
254         free(node);
255 }
256
257 static int
258 _queue_is_empty(queue *queue)
259 {
260         if (LIST_IS_EMPTY(&queue->head))
261                 return 1;
262
263         return 0;
264 }
265
266 static void
267 _queue_node_push_back(queue *queue, queue_node *node)
268 {
269         LIST_ADDTAIL(&node->item_link, &queue->head);
270         queue->count++;
271 }
272
273 static void
274 _queue_node_push_front(queue *queue, queue_node *node)
275 {
276         LIST_ADD(&node->item_link, &queue->head);
277         queue->count++;
278 }
279
280 static queue_node *
281 _queue_node_pop_front(queue *queue)
282 {
283         queue_node *node;
284
285         if (!queue->head.next) return NULL;
286         if (!queue->count) return NULL;
287
288         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
289
290         LIST_DELINIT(&node->item_link);
291         queue->count--;
292
293         return node;
294 }
295
296 static queue_node *
297 _queue_node_pop(queue *queue, queue_node *node)
298 {
299         LIST_DELINIT(&node->item_link);
300         queue->count--;
301
302         return node;
303 }
304
305 static queue_node *
306 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
307                 tbm_surface_h surface, int *out_type)
308 {
309         queue_node *node = NULL;
310
311         if (type == 0)
312                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
313         if (out_type)
314                 *out_type = 0;
315
316         if (type & FREE_QUEUE) {
317                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
318                                          item_link) {
319                         if (node->surface == surface) {
320                                 if (out_type)
321                                         *out_type = FREE_QUEUE;
322
323                                 return node;
324                         }
325                 }
326         }
327
328         if (type & DIRTY_QUEUE) {
329                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
330                                          item_link) {
331                         if (node->surface == surface) {
332                                 if (out_type)
333                                         *out_type = DIRTY_QUEUE;
334
335                                 return node;
336                         }
337                 }
338         }
339
340         if (type & NODE_LIST) {
341                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
342                         if (node->surface == surface) {
343                                 if (out_type)
344                                         *out_type = NODE_LIST;
345
346                                 return node;
347                         }
348                 }
349         }
350
351         TBM_ERR("fail to get the queue_node.\n");
352
353         return NULL;
354 }
355
356 static void
357 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
358 {
359         if (node->surface) {
360                 if (surface_queue->free_cb) {
361                         surface_queue->free_cb(surface_queue,
362                                         surface_queue->alloc_cb_data,
363                                         node->surface);
364                 }
365
366                 tbm_surface_destroy(node->surface);
367         }
368
369         _queue_node_delete(node);
370 }
371
372 static void
373 _queue_init(queue *queue)
374 {
375         LIST_INITHEAD(&queue->head);
376
377         queue->count = 0;
378 }
379
380 static void
381 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
382             void *data)
383 {
384         TBM_RETURN_IF_FAIL(cb != NULL);
385
386         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
387
388         TBM_RETURN_IF_FAIL(item != NULL);
389
390         LIST_INITHEAD(&item->link);
391         item->cb = cb;
392         item->data = data;
393
394         LIST_ADDTAIL(&item->link, list);
395 }
396
397 static void
398 _notify_remove(struct list_head *list,
399                tbm_surface_queue_notify_cb cb, void *data)
400 {
401         queue_notify *item = NULL, *tmp;
402
403         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
404                 if (item->cb == cb && item->data == data) {
405                         LIST_DEL(&item->link);
406                         free(item);
407                         return;
408                 }
409         }
410
411         TBM_ERR("Cannot find notifiy\n");
412 }
413
414 static void
415 _notify_remove_all(struct list_head *list)
416 {
417         queue_notify *item = NULL, *tmp;
418
419         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
420                 LIST_DEL(&item->link);
421                 free(item);
422         }
423 }
424
425 static void
426 _notify_emit(tbm_surface_queue_h surface_queue,
427              struct list_head *list)
428 {
429         queue_notify *item = NULL, *tmp;;
430
431         /*
432                 The item->cb is the outside function of the libtbm.
433                 The tbm user may/can remove the item of the list,
434                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
435         */
436         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
437                 item->cb(surface_queue, item->data);
438 }
439
440 static void
441 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
442             void *data)
443 {
444         TBM_RETURN_IF_FAIL(cb != NULL);
445
446         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
447
448         TBM_RETURN_IF_FAIL(item != NULL);
449
450         LIST_INITHEAD(&item->link);
451         item->cb = cb;
452         item->data = data;
453
454         LIST_ADDTAIL(&item->link, list);
455 }
456
457 static void
458 _trace_remove(struct list_head *list,
459                tbm_surface_queue_trace_cb cb, void *data)
460 {
461         queue_trace *item = NULL, *tmp;
462
463         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
464                 if (item->cb == cb && item->data == data) {
465                         LIST_DEL(&item->link);
466                         free(item);
467                         return;
468                 }
469         }
470
471         TBM_ERR("Cannot find notifiy\n");
472 }
473
474 static void
475 _trace_remove_all(struct list_head *list)
476 {
477         queue_trace *item = NULL, *tmp;
478
479         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
480                 LIST_DEL(&item->link);
481                 free(item);
482         }
483 }
484
485 static void
486 _trace_emit(tbm_surface_queue_h surface_queue,
487              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
488 {
489         queue_trace *item = NULL, *tmp;;
490
491         /*
492                 The item->cb is the outside function of the libtbm.
493                 The tbm user may/can remove the item of the list,
494                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
495         */
496         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
497                 item->cb(surface_queue, surface, trace, item->data);
498 }
499
500 static int
501 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
502 {
503         queue_node *node = NULL;
504         int count = 0;
505
506         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
507                 if (node->type == type)
508                         count++;
509         }
510
511         return count;
512 }
513
514 static void
515 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
516                           tbm_surface_h surface)
517 {
518         queue_node *node;
519
520         node = _queue_node_create();
521         TBM_RETURN_IF_FAIL(node != NULL);
522
523         tbm_surface_internal_ref(surface);
524         node->surface = surface;
525
526         LIST_ADDTAIL(&node->link, &surface_queue->list);
527         surface_queue->num_attached++;
528         _queue_node_push_back(&surface_queue->free_queue, node);
529 }
530
531 static void
532 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
533                           tbm_surface_h surface)
534 {
535         queue_node *node;
536         int queue_type;
537
538         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
539         if (node) {
540                 _queue_delete_node(surface_queue, node);
541                 surface_queue->num_attached--;
542         }
543 }
544
545 static void
546 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
547                            queue_node *node, int push_back)
548 {
549         if (push_back)
550                 _queue_node_push_back(&surface_queue->dirty_queue, node);
551         else
552                 _queue_node_push_front(&surface_queue->dirty_queue, node);
553 }
554
555 static queue_node *
556 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
557 {
558         queue_node *node;
559
560         node = _queue_node_pop_front(&surface_queue->free_queue);
561
562         return node;
563 }
564
565 static queue_node *
566 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
567 {
568         queue_node *node;
569
570         if (_queue_is_empty(&surface_queue->dirty_queue))
571                 return NULL;
572
573         node = _queue_node_pop_front(&surface_queue->dirty_queue);
574
575         return node;
576 }
577
578 static void
579 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
580                            queue_node *node, int push_back)
581 {
582         if (push_back)
583                 _queue_node_push_back(&surface_queue->free_queue, node);
584         else
585                 _queue_node_push_front(&surface_queue->free_queue, node);
586 }
587
588 static void
589 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
590                         int queue_size,
591                         int width, int height, int format,
592                         const tbm_surface_queue_interface *impl, void *data)
593 {
594         TBM_RETURN_IF_FAIL(surface_queue != NULL);
595         TBM_RETURN_IF_FAIL(impl != NULL);
596
597         if (!g_surf_queue_bufmgr)
598                 _init_tbm_surf_queue_bufmgr();
599
600         pthread_mutex_init(&surface_queue->lock, NULL);
601         pthread_cond_init(&surface_queue->free_cond, NULL);
602         pthread_cond_init(&surface_queue->dirty_cond, NULL);
603
604         surface_queue->queue_size = queue_size;
605         surface_queue->width = width;
606         surface_queue->height = height;
607         surface_queue->format = format;
608         surface_queue->impl = impl;
609         surface_queue->impl_data = data;
610         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
611
612         _queue_init(&surface_queue->free_queue);
613         _queue_init(&surface_queue->dirty_queue);
614         LIST_INITHEAD(&surface_queue->list);
615
616         LIST_INITHEAD(&surface_queue->destory_noti);
617         LIST_INITHEAD(&surface_queue->dequeuable_noti);
618         LIST_INITHEAD(&surface_queue->dequeue_noti);
619         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
620         LIST_INITHEAD(&surface_queue->acquirable_noti);
621         LIST_INITHEAD(&surface_queue->reset_noti);
622         LIST_INITHEAD(&surface_queue->trace_noti);
623
624         if (surface_queue->impl && surface_queue->impl->init)
625                 surface_queue->impl->init(surface_queue);
626
627         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
628 }
629
630 tbm_surface_queue_error_e
631 tbm_surface_queue_add_destroy_cb(
632         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
633         void *data)
634 {
635         _tbm_surf_queue_mutex_lock();
636         _tbm_set_last_result(TBM_ERROR_NONE);
637
638         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
639                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
640         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
641                                TBM_ERROR_INVALID_PARAMETER);
642
643         pthread_mutex_lock(&surface_queue->lock);
644
645         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
646
647         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
648
649         pthread_mutex_unlock(&surface_queue->lock);
650
651         _tbm_surf_queue_mutex_unlock();
652
653         return TBM_SURFACE_QUEUE_ERROR_NONE;
654 }
655
656 tbm_surface_queue_error_e
657 tbm_surface_queue_remove_destroy_cb(
658         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
659         void *data)
660 {
661         _tbm_surf_queue_mutex_lock();
662         _tbm_set_last_result(TBM_ERROR_NONE);
663
664         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
665                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
666
667         pthread_mutex_lock(&surface_queue->lock);
668
669         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
670
671         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
672
673         pthread_mutex_unlock(&surface_queue->lock);
674
675         _tbm_surf_queue_mutex_unlock();
676
677         return TBM_SURFACE_QUEUE_ERROR_NONE;
678 }
679
680 tbm_surface_queue_error_e
681 tbm_surface_queue_add_dequeuable_cb(
682         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
683         void *data)
684 {
685         _tbm_surf_queue_mutex_lock();
686         _tbm_set_last_result(TBM_ERROR_NONE);
687
688         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
689                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
690         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
691                                TBM_ERROR_INVALID_PARAMETER);
692
693         pthread_mutex_lock(&surface_queue->lock);
694
695         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
696
697         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
698
699         pthread_mutex_unlock(&surface_queue->lock);
700
701         _tbm_surf_queue_mutex_unlock();
702
703         return TBM_SURFACE_QUEUE_ERROR_NONE;
704 }
705
706 tbm_surface_queue_error_e
707 tbm_surface_queue_remove_dequeuable_cb(
708         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
709         void *data)
710 {
711         _tbm_surf_queue_mutex_lock();
712         _tbm_set_last_result(TBM_ERROR_NONE);
713
714         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
715                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
716
717         pthread_mutex_lock(&surface_queue->lock);
718
719         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
720
721         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
722
723         pthread_mutex_unlock(&surface_queue->lock);
724
725         _tbm_surf_queue_mutex_unlock();
726
727         return TBM_SURFACE_QUEUE_ERROR_NONE;
728 }
729
730 tbm_surface_queue_error_e
731 tbm_surface_queue_add_dequeue_cb(
732         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
733         void *data)
734 {
735         _tbm_surf_queue_mutex_lock();
736         _tbm_set_last_result(TBM_ERROR_NONE);
737
738         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
739                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
740         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
741                                TBM_ERROR_INVALID_PARAMETER);
742
743         pthread_mutex_lock(&surface_queue->lock);
744
745         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
746
747         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
748
749         pthread_mutex_unlock(&surface_queue->lock);
750
751         _tbm_surf_queue_mutex_unlock();
752
753         return TBM_SURFACE_QUEUE_ERROR_NONE;
754 }
755
756 tbm_surface_queue_error_e
757 tbm_surface_queue_remove_dequeue_cb(
758         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
759         void *data)
760 {
761         _tbm_surf_queue_mutex_lock();
762         _tbm_set_last_result(TBM_ERROR_NONE);
763
764         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
765                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
766
767         pthread_mutex_lock(&surface_queue->lock);
768
769         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
770
771         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
772
773         pthread_mutex_unlock(&surface_queue->lock);
774
775         _tbm_surf_queue_mutex_unlock();
776
777         return TBM_SURFACE_QUEUE_ERROR_NONE;
778 }
779
780 tbm_surface_queue_error_e
781 tbm_surface_queue_add_can_dequeue_cb(
782         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
783         void *data)
784 {
785         _tbm_surf_queue_mutex_lock();
786         _tbm_set_last_result(TBM_ERROR_NONE);
787
788         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
789                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
790         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
791                                TBM_ERROR_INVALID_PARAMETER);
792
793         pthread_mutex_lock(&surface_queue->lock);
794
795         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
796
797         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
798
799         pthread_mutex_unlock(&surface_queue->lock);
800
801         _tbm_surf_queue_mutex_unlock();
802
803         return TBM_SURFACE_QUEUE_ERROR_NONE;
804 }
805
806 tbm_surface_queue_error_e
807 tbm_surface_queue_remove_can_dequeue_cb(
808         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
809         void *data)
810 {
811         _tbm_surf_queue_mutex_lock();
812         _tbm_set_last_result(TBM_ERROR_NONE);
813
814         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
815                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
816
817         pthread_mutex_lock(&surface_queue->lock);
818
819         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
820
821         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
822
823         pthread_mutex_unlock(&surface_queue->lock);
824
825         _tbm_surf_queue_mutex_unlock();
826
827         return TBM_SURFACE_QUEUE_ERROR_NONE;
828 }
829
830 tbm_surface_queue_error_e
831 tbm_surface_queue_add_acquirable_cb(
832         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
833         void *data)
834 {
835         _tbm_surf_queue_mutex_lock();
836         _tbm_set_last_result(TBM_ERROR_NONE);
837
838         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
839                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
840         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
841                                TBM_ERROR_INVALID_PARAMETER);
842
843         pthread_mutex_lock(&surface_queue->lock);
844
845         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
846
847         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
848
849         pthread_mutex_unlock(&surface_queue->lock);
850
851         _tbm_surf_queue_mutex_unlock();
852
853         return TBM_SURFACE_QUEUE_ERROR_NONE;
854 }
855
856 tbm_surface_queue_error_e
857 tbm_surface_queue_remove_acquirable_cb(
858         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
859         void *data)
860 {
861         _tbm_surf_queue_mutex_lock();
862         _tbm_set_last_result(TBM_ERROR_NONE);
863
864         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
865                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
866
867         pthread_mutex_lock(&surface_queue->lock);
868
869         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
870
871         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
872
873         pthread_mutex_unlock(&surface_queue->lock);
874
875         _tbm_surf_queue_mutex_unlock();
876
877         return TBM_SURFACE_QUEUE_ERROR_NONE;
878 }
879
880 tbm_surface_queue_error_e
881 tbm_surface_queue_add_trace_cb(
882         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
883         void *data)
884 {
885         _tbm_surf_queue_mutex_lock();
886         _tbm_set_last_result(TBM_ERROR_NONE);
887
888         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
889                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
890         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
891                                TBM_ERROR_INVALID_PARAMETER);
892
893         pthread_mutex_lock(&surface_queue->lock);
894
895         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
896
897         _trace_add(&surface_queue->trace_noti, trace_cb, data);
898
899         pthread_mutex_unlock(&surface_queue->lock);
900
901         _tbm_surf_queue_mutex_unlock();
902
903         return TBM_SURFACE_QUEUE_ERROR_NONE;
904 }
905
906 tbm_surface_queue_error_e
907 tbm_surface_queue_remove_trace_cb(
908         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
909         void *data)
910 {
911         _tbm_surf_queue_mutex_lock();
912         _tbm_set_last_result(TBM_ERROR_NONE);
913
914         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
915                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
916
917         pthread_mutex_lock(&surface_queue->lock);
918
919         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
920
921         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
922
923         pthread_mutex_unlock(&surface_queue->lock);
924
925         _tbm_surf_queue_mutex_unlock();
926
927         return TBM_SURFACE_QUEUE_ERROR_NONE;
928 }
929
930 tbm_surface_queue_error_e
931 tbm_surface_queue_set_alloc_cb(
932         tbm_surface_queue_h surface_queue,
933         tbm_surface_alloc_cb alloc_cb,
934         tbm_surface_free_cb free_cb,
935         void *data)
936 {
937         _tbm_surf_queue_mutex_lock();
938         _tbm_set_last_result(TBM_ERROR_NONE);
939
940         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
941                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
942
943         pthread_mutex_lock(&surface_queue->lock);
944
945         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
946
947         surface_queue->alloc_cb = alloc_cb;
948         surface_queue->free_cb = free_cb;
949         surface_queue->alloc_cb_data = data;
950
951         pthread_mutex_unlock(&surface_queue->lock);
952
953         _tbm_surf_queue_mutex_unlock();
954
955         return TBM_SURFACE_QUEUE_ERROR_NONE;
956 }
957
958 int
959 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
960 {
961         int width;
962
963         _tbm_surf_queue_mutex_lock();
964         _tbm_set_last_result(TBM_ERROR_NONE);
965
966         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
967
968         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
969
970         width = surface_queue->width;
971
972         _tbm_surf_queue_mutex_unlock();
973
974         return width;
975 }
976
977 int
978 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
979 {
980         int height;
981
982         _tbm_surf_queue_mutex_lock();
983         _tbm_set_last_result(TBM_ERROR_NONE);
984
985         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
986
987         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
988
989         height = surface_queue->height;
990
991         _tbm_surf_queue_mutex_unlock();
992
993         return height;
994 }
995
996 int
997 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
998 {
999         int format;
1000
1001         _tbm_surf_queue_mutex_lock();
1002         _tbm_set_last_result(TBM_ERROR_NONE);
1003
1004         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1005
1006         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1007
1008         format = surface_queue->format;
1009
1010         _tbm_surf_queue_mutex_unlock();
1011
1012         return format;
1013 }
1014
1015 int
1016 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1017 {
1018         int queue_size;
1019
1020         _tbm_surf_queue_mutex_lock();
1021         _tbm_set_last_result(TBM_ERROR_NONE);
1022
1023         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1024
1025         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1026
1027         queue_size = surface_queue->queue_size;
1028
1029         _tbm_surf_queue_mutex_unlock();
1030
1031         return queue_size;
1032 }
1033
1034 tbm_surface_queue_error_e
1035 tbm_surface_queue_add_reset_cb(
1036         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1037         void *data)
1038 {
1039         _tbm_surf_queue_mutex_lock();
1040         _tbm_set_last_result(TBM_ERROR_NONE);
1041
1042         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1043                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1044         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
1045                                TBM_ERROR_INVALID_PARAMETER);
1046
1047         pthread_mutex_lock(&surface_queue->lock);
1048
1049         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1050
1051         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1052
1053         pthread_mutex_unlock(&surface_queue->lock);
1054
1055         _tbm_surf_queue_mutex_unlock();
1056
1057         return TBM_SURFACE_QUEUE_ERROR_NONE;
1058 }
1059
1060 tbm_surface_queue_error_e
1061 tbm_surface_queue_remove_reset_cb(
1062         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1063         void *data)
1064 {
1065         _tbm_surf_queue_mutex_lock();
1066         _tbm_set_last_result(TBM_ERROR_NONE);
1067
1068         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1069                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1070
1071         pthread_mutex_lock(&surface_queue->lock);
1072
1073         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1074
1075         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1076
1077         pthread_mutex_unlock(&surface_queue->lock);
1078
1079         _tbm_surf_queue_mutex_unlock();
1080
1081         return TBM_SURFACE_QUEUE_ERROR_NONE;
1082 }
1083
1084 tbm_surface_queue_error_e
1085 tbm_surface_queue_enqueue(tbm_surface_queue_h
1086                           surface_queue, tbm_surface_h surface)
1087 {
1088         queue_node *node;
1089         int queue_type;
1090
1091         _tbm_surf_queue_mutex_lock();
1092         _tbm_set_last_result(TBM_ERROR_NONE);
1093
1094         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1095                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1096         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1097                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1098
1099         if (b_dump_queue)
1100                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1101
1102         pthread_mutex_lock(&surface_queue->lock);
1103
1104         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1105
1106         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1107         if (node == NULL || queue_type != NODE_LIST) {
1108                 TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1109                         node, queue_type);
1110                 pthread_mutex_unlock(&surface_queue->lock);
1111
1112                 _tbm_surf_queue_mutex_unlock();
1113
1114                 if (!node) {
1115                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1116                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1117                 } else {
1118                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1119                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1120                 }
1121         }
1122
1123         if (surface_queue->impl && surface_queue->impl->enqueue)
1124                 surface_queue->impl->enqueue(surface_queue, node);
1125         else
1126                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1127
1128         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1129                 TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1130                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE);
1131                 pthread_mutex_unlock(&surface_queue->lock);
1132
1133                 _tbm_surf_queue_mutex_unlock();
1134                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE;
1135         }
1136
1137         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1138
1139         if (surface_queue->enqueue_sync_count == 1) {
1140                 tbm_surface_info_s info;
1141                 int ret;
1142
1143                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1144                 if (ret == TBM_SURFACE_ERROR_NONE)
1145                         tbm_surface_unmap(surface);
1146         }
1147
1148         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1149
1150         pthread_mutex_unlock(&surface_queue->lock);
1151         pthread_cond_signal(&surface_queue->dirty_cond);
1152
1153         _tbm_surf_queue_mutex_unlock();
1154
1155         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1156
1157         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1158
1159         return TBM_SURFACE_QUEUE_ERROR_NONE;
1160 }
1161
1162 tbm_surface_queue_error_e
1163 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1164                           surface_queue, tbm_surface_h surface)
1165 {
1166         queue_node *node;
1167         int queue_type;
1168
1169         _tbm_surf_queue_mutex_lock();
1170         _tbm_set_last_result(TBM_ERROR_NONE);
1171
1172         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1173                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1174         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1175                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1176
1177         pthread_mutex_lock(&surface_queue->lock);
1178
1179         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1180
1181         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1182         if (node == NULL || queue_type != NODE_LIST) {
1183                 TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1184                         node, queue_type);
1185                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1186                 pthread_mutex_unlock(&surface_queue->lock);
1187
1188                 _tbm_surf_queue_mutex_unlock();
1189                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1190         }
1191
1192         if (node->delete_pending) {
1193                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1194
1195                 _queue_delete_node(surface_queue, node);
1196
1197                 pthread_mutex_unlock(&surface_queue->lock);
1198
1199                 _tbm_surf_queue_mutex_unlock();
1200
1201                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1202
1203                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1204         }
1205
1206         if (surface_queue->queue_size < surface_queue->num_attached) {
1207                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1208
1209                 if (surface_queue->impl && surface_queue->impl->need_detach)
1210                         surface_queue->impl->need_detach(surface_queue, node);
1211                 else
1212                         _tbm_surface_queue_detach(surface_queue, surface);
1213
1214                 pthread_mutex_unlock(&surface_queue->lock);
1215
1216                 _tbm_surf_queue_mutex_unlock();
1217
1218                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1219
1220                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1221         }
1222
1223         if (surface_queue->impl && surface_queue->impl->release)
1224                 surface_queue->impl->release(surface_queue, node);
1225         else
1226                 _tbm_surface_queue_release(surface_queue, node, 1);
1227
1228         if (_queue_is_empty(&surface_queue->free_queue)) {
1229                 TBM_ERR("surface_queue->free_queue is empty.\n");
1230                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1231                 pthread_mutex_unlock(&surface_queue->lock);
1232
1233                 _tbm_surf_queue_mutex_unlock();
1234                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1235         }
1236
1237         node->type = QUEUE_NODE_TYPE_RELEASE;
1238
1239         pthread_mutex_unlock(&surface_queue->lock);
1240         pthread_cond_signal(&surface_queue->free_cond);
1241
1242         _tbm_surf_queue_mutex_unlock();
1243
1244         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1245
1246         return TBM_SURFACE_QUEUE_ERROR_NONE;
1247 }
1248
1249 tbm_surface_queue_error_e
1250 tbm_surface_queue_dequeue(tbm_surface_queue_h
1251                           surface_queue, tbm_surface_h *surface)
1252 {
1253         queue_node *node;
1254
1255         _tbm_surf_queue_mutex_lock();
1256         _tbm_set_last_result(TBM_ERROR_NONE);
1257
1258         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1259                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1260         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1261                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1262
1263         *surface = NULL;
1264
1265         pthread_mutex_lock(&surface_queue->lock);
1266
1267         if (_queue_is_empty(&surface_queue->free_queue)) {
1268                 if (surface_queue->impl && surface_queue->impl->need_attach)
1269                         surface_queue->impl->need_attach(surface_queue);
1270
1271                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1272                         TBM_ERR("surface_queue:%p is invalid", surface_queue);
1273                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1274                         pthread_mutex_unlock(&surface_queue->lock);
1275                         _tbm_surf_queue_mutex_unlock();
1276                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1277                 }
1278         }
1279
1280         if (surface_queue->impl && surface_queue->impl->dequeue)
1281                 node = surface_queue->impl->dequeue(surface_queue);
1282         else
1283                 node = _tbm_surface_queue_dequeue(surface_queue);
1284
1285         if (node == NULL || node->surface == NULL) {
1286                 TBM_ERR("_queue_node_pop_front failed\n");
1287                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1288                 pthread_mutex_unlock(&surface_queue->lock);
1289
1290                 _tbm_surf_queue_mutex_unlock();
1291                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1292         }
1293
1294         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1295         *surface = node->surface;
1296
1297         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1298
1299         pthread_mutex_unlock(&surface_queue->lock);
1300
1301         _tbm_surf_queue_mutex_unlock();
1302
1303         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1304
1305         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1306
1307         return TBM_SURFACE_QUEUE_ERROR_NONE;
1308 }
1309
1310 int
1311 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1312 {
1313         _tbm_surf_queue_mutex_lock();
1314         _tbm_set_last_result(TBM_ERROR_NONE);
1315
1316         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1317
1318         _tbm_surf_queue_mutex_unlock();
1319
1320         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1321
1322         _tbm_surf_queue_mutex_lock();
1323
1324         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1325
1326         pthread_mutex_lock(&surface_queue->lock);
1327
1328         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1329
1330         if (_queue_is_empty(&surface_queue->free_queue)) {
1331                 if (surface_queue->impl && surface_queue->impl->need_attach)
1332                         surface_queue->impl->need_attach(surface_queue);
1333
1334                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1335                         TBM_ERR("surface_queue:%p is invalid", surface_queue);
1336                         pthread_mutex_unlock(&surface_queue->lock);
1337                         _tbm_surf_queue_mutex_unlock();
1338                         return 0;
1339                 }
1340         }
1341
1342         if (!_queue_is_empty(&surface_queue->free_queue)) {
1343                 pthread_mutex_unlock(&surface_queue->lock);
1344                 _tbm_surf_queue_mutex_unlock();
1345                 return 1;
1346         }
1347
1348         if (wait) {
1349                 _tbm_surf_queue_mutex_unlock();
1350                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1351                 pthread_mutex_unlock(&surface_queue->lock);
1352                 return 1;
1353         }
1354
1355         pthread_mutex_unlock(&surface_queue->lock);
1356         _tbm_surf_queue_mutex_unlock();
1357         return 0;
1358 }
1359
1360 tbm_surface_queue_error_e
1361 tbm_surface_queue_release(tbm_surface_queue_h
1362                           surface_queue, tbm_surface_h surface)
1363 {
1364         queue_node *node;
1365         int queue_type;
1366
1367         _tbm_surf_queue_mutex_lock();
1368         _tbm_set_last_result(TBM_ERROR_NONE);
1369
1370         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1371                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1372         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1373                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1374
1375         pthread_mutex_lock(&surface_queue->lock);
1376
1377         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1378
1379         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1380         if (node == NULL || queue_type != NODE_LIST) {
1381                 TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1382                         node, queue_type);
1383                 pthread_mutex_unlock(&surface_queue->lock);
1384
1385                 _tbm_surf_queue_mutex_unlock();
1386
1387                 if (!node) {
1388                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1389                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1390                 } else {
1391                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1392                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1393                 }
1394         }
1395
1396         if (node->delete_pending) {
1397                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1398
1399                 _queue_delete_node(surface_queue, node);
1400
1401                 pthread_mutex_unlock(&surface_queue->lock);
1402
1403                 _tbm_surf_queue_mutex_unlock();
1404
1405                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1406
1407                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1408         }
1409
1410         if (surface_queue->queue_size < surface_queue->num_attached) {
1411                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1412
1413                 if (surface_queue->impl && surface_queue->impl->need_detach)
1414                         surface_queue->impl->need_detach(surface_queue, node);
1415                 else
1416                         _tbm_surface_queue_detach(surface_queue, surface);
1417
1418                 pthread_mutex_unlock(&surface_queue->lock);
1419
1420                 _tbm_surf_queue_mutex_unlock();
1421
1422                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1423
1424                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1425         }
1426
1427         if (surface_queue->impl && surface_queue->impl->release)
1428                 surface_queue->impl->release(surface_queue, node);
1429         else
1430                 _tbm_surface_queue_release(surface_queue, node, 1);
1431
1432         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1433                 TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
1434                 _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
1435                 pthread_mutex_unlock(&surface_queue->lock);
1436
1437                 _tbm_surf_queue_mutex_unlock();
1438                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1439         }
1440
1441         node->type = QUEUE_NODE_TYPE_RELEASE;
1442
1443         pthread_mutex_unlock(&surface_queue->lock);
1444         pthread_cond_signal(&surface_queue->free_cond);
1445
1446         _tbm_surf_queue_mutex_unlock();
1447
1448         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1449
1450         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1451
1452         return TBM_SURFACE_QUEUE_ERROR_NONE;
1453 }
1454
1455 tbm_surface_queue_error_e
1456 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1457                         surface_queue, tbm_surface_h surface)
1458 {
1459         queue_node *node;
1460         int queue_type;
1461
1462         _tbm_surf_queue_mutex_lock();
1463         _tbm_set_last_result(TBM_ERROR_NONE);
1464
1465         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1466                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1467         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1468                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1469
1470         pthread_mutex_lock(&surface_queue->lock);
1471
1472         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1473
1474         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1475         if (node == NULL || queue_type != NODE_LIST) {
1476                 TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1477                         node, queue_type);
1478                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1479                 pthread_mutex_unlock(&surface_queue->lock);
1480
1481                 _tbm_surf_queue_mutex_unlock();
1482                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1483         }
1484
1485         if (surface_queue->impl && surface_queue->impl->enqueue)
1486                 surface_queue->impl->enqueue(surface_queue, node);
1487         else
1488                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1489
1490         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1491                 TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
1492                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1493                 pthread_mutex_unlock(&surface_queue->lock);
1494
1495                 _tbm_surf_queue_mutex_unlock();
1496                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1497         }
1498
1499         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1500
1501         pthread_mutex_unlock(&surface_queue->lock);
1502         pthread_cond_signal(&surface_queue->dirty_cond);
1503
1504         _tbm_surf_queue_mutex_unlock();
1505
1506         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1507
1508         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1509
1510         return TBM_SURFACE_QUEUE_ERROR_NONE;
1511 }
1512
1513 tbm_surface_queue_error_e
1514 tbm_surface_queue_acquire(tbm_surface_queue_h
1515                           surface_queue, tbm_surface_h *surface)
1516 {
1517         queue_node *node;
1518
1519         _tbm_surf_queue_mutex_lock();
1520         _tbm_set_last_result(TBM_ERROR_NONE);
1521
1522         *surface = NULL;
1523
1524         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1525                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1526         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1527                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1528
1529         pthread_mutex_lock(&surface_queue->lock);
1530
1531         if (surface_queue->impl && surface_queue->impl->acquire)
1532                 node = surface_queue->impl->acquire(surface_queue);
1533         else
1534                 node = _tbm_surface_queue_acquire(surface_queue);
1535
1536         if (node == NULL || node->surface == NULL) {
1537                 TBM_ERR("_queue_node_pop_front failed\n");
1538                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1539                 pthread_mutex_unlock(&surface_queue->lock);
1540
1541                 _tbm_surf_queue_mutex_unlock();
1542                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1543         }
1544
1545         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1546
1547         *surface = node->surface;
1548
1549         if (surface_queue->acquire_sync_count == 1) {
1550                 tbm_surface_info_s info;
1551                 int ret;
1552
1553                 TBM_ERR("start map surface:%p", *surface);
1554                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1555                 TBM_ERR("end map surface:%p", *surface);
1556                 if (ret == TBM_SURFACE_ERROR_NONE)
1557                         tbm_surface_unmap(*surface);
1558         }
1559
1560         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1561
1562         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1563
1564         pthread_mutex_unlock(&surface_queue->lock);
1565
1566         _tbm_surf_queue_mutex_unlock();
1567
1568         if (b_dump_queue)
1569                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1570
1571         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1572
1573         return TBM_SURFACE_QUEUE_ERROR_NONE;
1574 }
1575
1576 int
1577 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1578 {
1579         _tbm_surf_queue_mutex_lock();
1580         _tbm_set_last_result(TBM_ERROR_NONE);
1581
1582         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1583
1584         pthread_mutex_lock(&surface_queue->lock);
1585
1586         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1587
1588         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1589                 pthread_mutex_unlock(&surface_queue->lock);
1590                 _tbm_surf_queue_mutex_unlock();
1591                 return 1;
1592         }
1593
1594         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1595                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1596                 _tbm_surf_queue_mutex_unlock();
1597                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1598                 pthread_mutex_unlock(&surface_queue->lock);
1599                 return 1;
1600         }
1601
1602         pthread_mutex_unlock(&surface_queue->lock);
1603         _tbm_surf_queue_mutex_unlock();
1604         return 0;
1605 }
1606
1607 void
1608 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1609 {
1610         queue_node *node = NULL, *tmp;
1611
1612         _tbm_surf_queue_mutex_lock();
1613         _tbm_set_last_result(TBM_ERROR_NONE);
1614
1615         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1616
1617         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1618
1619         LIST_DEL(&surface_queue->item_link);
1620
1621         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1622                 _queue_delete_node(surface_queue, node);
1623
1624         if (surface_queue->impl && surface_queue->impl->destroy)
1625                 surface_queue->impl->destroy(surface_queue);
1626
1627         _notify_emit(surface_queue, &surface_queue->destory_noti);
1628
1629         _notify_remove_all(&surface_queue->destory_noti);
1630         _notify_remove_all(&surface_queue->dequeuable_noti);
1631         _notify_remove_all(&surface_queue->dequeue_noti);
1632         _notify_remove_all(&surface_queue->can_dequeue_noti);
1633         _notify_remove_all(&surface_queue->acquirable_noti);
1634         _notify_remove_all(&surface_queue->reset_noti);
1635         _trace_remove_all(&surface_queue->trace_noti);
1636
1637         pthread_mutex_destroy(&surface_queue->lock);
1638
1639         free(surface_queue);
1640
1641         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1642                 _deinit_tbm_surf_queue_bufmgr();
1643
1644         _tbm_surf_queue_mutex_unlock();
1645 }
1646
1647 tbm_surface_queue_error_e
1648 tbm_surface_queue_reset(tbm_surface_queue_h
1649                         surface_queue, int width, int height, int format)
1650 {
1651         queue_node *node = NULL, *tmp;
1652
1653         _tbm_surf_queue_mutex_lock();
1654         _tbm_set_last_result(TBM_ERROR_NONE);
1655
1656         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1657                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1658
1659         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1660
1661         if (width == surface_queue->width && height == surface_queue->height &&
1662                 format == surface_queue->format) {
1663                 _tbm_surf_queue_mutex_unlock();
1664                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1665         }
1666
1667         pthread_mutex_lock(&surface_queue->lock);
1668
1669         surface_queue->width = width;
1670         surface_queue->height = height;
1671         surface_queue->format = format;
1672
1673         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1674                 /* Destory surface and Push to free_queue */
1675                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1676                         _queue_delete_node(surface_queue, node);
1677
1678                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1679                         node->delete_pending = 1;
1680         } else {
1681                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1682                         _queue_delete_node(surface_queue, node);
1683
1684                 _queue_init(&surface_queue->dirty_queue);
1685                 LIST_INITHEAD(&surface_queue->list);
1686         }
1687
1688         /* Reset queue */
1689         _queue_init(&surface_queue->free_queue);
1690
1691         surface_queue->num_attached = 0;
1692
1693         if (surface_queue->impl && surface_queue->impl->reset)
1694                 surface_queue->impl->reset(surface_queue);
1695
1696         pthread_mutex_unlock(&surface_queue->lock);
1697         pthread_cond_signal(&surface_queue->free_cond);
1698
1699         _tbm_surf_queue_mutex_unlock();
1700
1701         _notify_emit(surface_queue, &surface_queue->reset_noti);
1702
1703         return TBM_SURFACE_QUEUE_ERROR_NONE;
1704 }
1705
1706 tbm_surface_queue_error_e
1707 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1708 {
1709         _tbm_surf_queue_mutex_lock();
1710         _tbm_set_last_result(TBM_ERROR_NONE);
1711
1712         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1713                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1714
1715         _tbm_surf_queue_mutex_unlock();
1716
1717         _notify_emit(surface_queue, &surface_queue->reset_noti);
1718
1719         return TBM_SURFACE_QUEUE_ERROR_NONE;
1720 }
1721
1722 tbm_surface_queue_error_e
1723 tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
1724 {
1725         _tbm_surf_queue_mutex_lock();
1726         _tbm_set_last_result(TBM_ERROR_NONE);
1727
1728         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1729                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1730
1731         pthread_mutex_lock(&surface_queue->lock);
1732         pthread_mutex_unlock(&surface_queue->lock);
1733         pthread_cond_signal(&surface_queue->free_cond);
1734
1735         _tbm_surf_queue_mutex_unlock();
1736
1737         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1738
1739         return TBM_SURFACE_QUEUE_ERROR_NONE;
1740 }
1741
1742 tbm_surface_queue_error_e
1743 tbm_surface_queue_set_size(tbm_surface_queue_h
1744                         surface_queue, int queue_size, int flush)
1745 {
1746         queue_node *node = NULL, *tmp;
1747
1748         _tbm_surf_queue_mutex_lock();
1749         _tbm_set_last_result(TBM_ERROR_NONE);
1750
1751         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1752                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1753         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1754                                         TBM_ERROR_INVALID_PARAMETER);
1755
1756         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1757
1758         if ((surface_queue->queue_size == queue_size) && !flush) {
1759                 _tbm_surf_queue_mutex_unlock();
1760                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1761         }
1762
1763         pthread_mutex_lock(&surface_queue->lock);
1764
1765         if (flush) {
1766                 surface_queue->queue_size = queue_size;
1767
1768                 if (surface_queue->num_attached == 0) {
1769                         pthread_mutex_unlock(&surface_queue->lock);
1770                         _tbm_surf_queue_mutex_unlock();
1771                         return TBM_SURFACE_QUEUE_ERROR_NONE;
1772                 }
1773
1774                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1775                         /* Destory surface and Push to free_queue */
1776                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1777                                 _queue_delete_node(surface_queue, node);
1778
1779                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1780                                 node->delete_pending = 1;
1781                 } else {
1782                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1783                                 _queue_delete_node(surface_queue, node);
1784
1785                         _queue_init(&surface_queue->dirty_queue);
1786                         LIST_INITHEAD(&surface_queue->list);
1787                 }
1788
1789                 /* Reset queue */
1790                 _queue_init(&surface_queue->free_queue);
1791
1792                 surface_queue->num_attached = 0;
1793
1794                 if (surface_queue->impl && surface_queue->impl->reset)
1795                         surface_queue->impl->reset(surface_queue);
1796
1797                 pthread_mutex_unlock(&surface_queue->lock);
1798                 pthread_cond_signal(&surface_queue->free_cond);
1799
1800                 _tbm_surf_queue_mutex_unlock();
1801
1802                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1803
1804                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1805         } else {
1806                 if (surface_queue->queue_size > queue_size) {
1807                         int need_del = surface_queue->queue_size - queue_size;
1808
1809                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1810                                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1811
1812                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1813                                         surface_queue->impl->need_detach(surface_queue, node);
1814                                 else
1815                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1816
1817                                 need_del--;
1818                                 if (need_del == 0)
1819                                         break;
1820                         }
1821                 }
1822
1823                 surface_queue->queue_size = queue_size;
1824
1825                 pthread_mutex_unlock(&surface_queue->lock);
1826
1827                 _tbm_surf_queue_mutex_unlock();
1828
1829                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1830         }
1831 }
1832
1833 tbm_surface_queue_error_e
1834 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1835 {
1836         queue_node *node = NULL;
1837
1838         _tbm_surf_queue_mutex_lock();
1839         _tbm_set_last_result(TBM_ERROR_NONE);
1840
1841         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1842                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1843
1844         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1845
1846         if (surface_queue->num_attached == 0) {
1847                 _tbm_surf_queue_mutex_unlock();
1848                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1849         }
1850
1851         pthread_mutex_lock(&surface_queue->lock);
1852
1853         /* Destory surface in free_queue */
1854         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1855                 if (surface_queue->impl && surface_queue->impl->need_detach)
1856                         surface_queue->impl->need_detach(surface_queue, node);
1857                 else
1858                         _tbm_surface_queue_detach(surface_queue, node->surface);
1859         }
1860
1861         /* Reset queue */
1862         _queue_init(&surface_queue->free_queue);
1863
1864         pthread_mutex_unlock(&surface_queue->lock);
1865         _tbm_surf_queue_mutex_unlock();
1866
1867         return TBM_SURFACE_QUEUE_ERROR_NONE;
1868 }
1869
1870 tbm_surface_queue_error_e
1871 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1872 {
1873         queue_node *node = NULL, *tmp;
1874
1875         _tbm_surf_queue_mutex_lock();
1876         _tbm_set_last_result(TBM_ERROR_NONE);
1877
1878         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1879                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1880
1881         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1882
1883         if (surface_queue->num_attached == 0) {
1884                 _tbm_surf_queue_mutex_unlock();
1885                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1886         }
1887
1888         pthread_mutex_lock(&surface_queue->lock);
1889
1890         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1891                 /* Destory surface and Push to free_queue */
1892                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1893                         _queue_delete_node(surface_queue, node);
1894
1895                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1896                         node->delete_pending = 1;
1897         } else {
1898                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1899                         _queue_delete_node(surface_queue, node);
1900
1901                 _queue_init(&surface_queue->dirty_queue);
1902                 LIST_INITHEAD(&surface_queue->list);
1903         }
1904
1905         /* Reset queue */
1906         _queue_init(&surface_queue->free_queue);
1907
1908         surface_queue->num_attached = 0;
1909
1910         if (surface_queue->impl && surface_queue->impl->reset)
1911                 surface_queue->impl->reset(surface_queue);
1912
1913         pthread_mutex_unlock(&surface_queue->lock);
1914         pthread_cond_signal(&surface_queue->free_cond);
1915
1916         _tbm_surf_queue_mutex_unlock();
1917
1918         _notify_emit(surface_queue, &surface_queue->reset_noti);
1919
1920         return TBM_SURFACE_QUEUE_ERROR_NONE;
1921 }
1922
1923 tbm_surface_queue_error_e
1924 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1925                         tbm_surface_h *surfaces, int *num)
1926 {
1927         queue_node *node = NULL;
1928
1929         _tbm_surf_queue_mutex_lock();
1930         _tbm_set_last_result(TBM_ERROR_NONE);
1931
1932         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1933                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1934         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1935                                TBM_ERROR_INVALID_PARAMETER);
1936
1937         *num = 0;
1938
1939         pthread_mutex_lock(&surface_queue->lock);
1940
1941         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1942                 if (node->delete_pending) continue;
1943
1944                 if (surfaces)
1945                         surfaces[*num] = node->surface;
1946
1947                 *num = *num + 1;
1948         }
1949
1950         pthread_mutex_unlock(&surface_queue->lock);
1951
1952         _tbm_surf_queue_mutex_unlock();
1953
1954         return TBM_SURFACE_QUEUE_ERROR_NONE;
1955 }
1956
1957 tbm_surface_queue_error_e
1958 tbm_surface_queue_get_trace_surface_num(
1959                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1960 {
1961         _tbm_surf_queue_mutex_lock();
1962         _tbm_set_last_result(TBM_ERROR_NONE);
1963
1964         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1965                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1966         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1967                                TBM_ERROR_INVALID_PARAMETER);
1968
1969         *num = 0;
1970
1971         pthread_mutex_lock(&surface_queue->lock);
1972
1973         switch (trace) {
1974         case TBM_SURFACE_QUEUE_TRACE_NONE:
1975                 *num = 0;
1976                 break;
1977         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1978                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1979                 break;
1980         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1981                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1982                 break;
1983         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1984                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1985                 break;
1986         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1987                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1988                 break;
1989         default:
1990                 break;
1991         }
1992
1993         pthread_mutex_unlock(&surface_queue->lock);
1994
1995         _tbm_surf_queue_mutex_unlock();
1996
1997         return TBM_SURFACE_QUEUE_ERROR_NONE;
1998 }
1999
2000 typedef struct {
2001         int flags;
2002 } tbm_queue_default;
2003
2004 static void
2005 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
2006 {
2007         free(surface_queue->impl_data);
2008 }
2009
2010 static void
2011 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
2012 {
2013         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
2014         tbm_surface_h surface;
2015
2016         if (surface_queue->queue_size == surface_queue->num_attached)
2017                 return;
2018
2019         if (surface_queue->alloc_cb) {
2020                 pthread_mutex_unlock(&surface_queue->lock);
2021                 _tbm_surf_queue_mutex_unlock();
2022                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2023                 _tbm_surf_queue_mutex_lock();
2024                 pthread_mutex_lock(&surface_queue->lock);
2025
2026                 /* silent return */
2027                 if (!surface)
2028                         return;
2029
2030                 tbm_surface_internal_ref(surface);
2031         } else {
2032                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2033                                 surface_queue->height,
2034                                 surface_queue->format,
2035                                 data->flags);
2036                 TBM_RETURN_IF_FAIL(surface != NULL);
2037         }
2038
2039         _tbm_surface_queue_attach(surface_queue, surface);
2040         tbm_surface_internal_unref(surface);
2041 }
2042
2043 static const tbm_surface_queue_interface tbm_queue_default_impl = {
2044         NULL,                           /*__tbm_queue_default_init*/
2045         NULL,                           /*__tbm_queue_default_reset*/
2046         __tbm_queue_default_destroy,
2047         __tbm_queue_default_need_attach,
2048         NULL,                           /*__tbm_queue_default_enqueue*/
2049         NULL,                           /*__tbm_queue_default_release*/
2050         NULL,                           /*__tbm_queue_default_dequeue*/
2051         NULL,                           /*__tbm_queue_default_acquire*/
2052         NULL,                           /*__tbm_queue_default_need_detach*/
2053 };
2054
2055 tbm_surface_queue_h
2056 tbm_surface_queue_create(int queue_size, int width,
2057                          int height, int format, int flags)
2058 {
2059         _tbm_surf_queue_mutex_lock();
2060         _tbm_set_last_result(TBM_ERROR_NONE);
2061
2062         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2063         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2064         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2065         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2066
2067         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2068                                             sizeof(struct _tbm_surface_queue));
2069         if (!surface_queue) {
2070                 TBM_ERR("cannot allocate the surface_queue.\n");
2071                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2072                 _tbm_surf_queue_mutex_unlock();
2073                 return NULL;
2074         }
2075
2076         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2077
2078         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
2079                                   sizeof(tbm_queue_default));
2080         if (data == NULL) {
2081                 TBM_ERR("cannot allocate the tbm_queue_default.\n");
2082                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2083                 free(surface_queue);
2084                 _tbm_surf_queue_mutex_unlock();
2085                 return NULL;
2086         }
2087
2088         data->flags = flags;
2089         _tbm_surface_queue_init(surface_queue,
2090                                 queue_size,
2091                                 width, height, format,
2092                                 &tbm_queue_default_impl, data);
2093
2094         _tbm_surf_queue_mutex_unlock();
2095
2096         return surface_queue;
2097 }
2098
2099 typedef struct {
2100         int flags;
2101         queue dequeue_list;
2102 } tbm_queue_sequence;
2103
2104 static void
2105 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2106 {
2107         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2108
2109         _queue_init(&data->dequeue_list);
2110 }
2111
2112 static void
2113 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2114 {
2115         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2116
2117         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2118                 return;
2119
2120         _queue_init(&data->dequeue_list);
2121 }
2122
2123 static void
2124 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2125 {
2126         free(surface_queue->impl_data);
2127 }
2128
2129 static void
2130 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2131 {
2132         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2133         tbm_surface_h surface;
2134
2135         if (surface_queue->queue_size == surface_queue->num_attached)
2136                 return;
2137
2138         if (surface_queue->alloc_cb) {
2139                 pthread_mutex_unlock(&surface_queue->lock);
2140                 _tbm_surf_queue_mutex_unlock();
2141                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2142                 _tbm_surf_queue_mutex_lock();
2143                 pthread_mutex_lock(&surface_queue->lock);
2144
2145                 /* silent return */
2146                 if (!surface)
2147                         return;
2148
2149                 tbm_surface_internal_ref(surface);
2150         } else {
2151                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2152                                 surface_queue->height,
2153                                 surface_queue->format,
2154                                 data->flags);
2155                 TBM_RETURN_IF_FAIL(surface != NULL);
2156         }
2157
2158         _tbm_surface_queue_attach(surface_queue, surface);
2159         tbm_surface_internal_unref(surface);
2160 }
2161
2162 static void
2163 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2164                              queue_node *node)
2165 {
2166         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2167         queue_node *first = NULL;
2168
2169         first = container_of(data->dequeue_list.head.next, first, item_link);
2170         if (first != node) {
2171                 return;
2172         }
2173
2174         node->priv_flags = 0;
2175
2176         _queue_node_pop(&data->dequeue_list, node);
2177         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2178 }
2179
2180 static void
2181 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2182                                 queue_node *node)
2183 {
2184         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2185
2186         if (node->priv_flags) {
2187                 node->priv_flags = 0;
2188                 _queue_node_pop(&data->dequeue_list, node);
2189         }
2190
2191         _tbm_surface_queue_release(surface_queue, node, 1);
2192 }
2193
2194 static queue_node *
2195 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2196                              surface_queue)
2197 {
2198         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2199         queue_node *node;
2200
2201         node = _tbm_surface_queue_dequeue(surface_queue);
2202         if (node) {
2203                 _queue_node_push_back(&data->dequeue_list, node);
2204                 node->priv_flags = 1;
2205         }
2206
2207         return node;
2208 }
2209
2210 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2211         __tbm_queue_sequence_init,
2212         __tbm_queue_sequence_reset,
2213         __tbm_queue_sequence_destroy,
2214         __tbm_queue_sequence_need_attach,
2215         __tbm_queue_sequence_enqueue,
2216         __tbm_queue_sequence_release,
2217         __tbm_queue_sequence_dequeue,
2218         NULL,                                   /*__tbm_queue_sequence_acquire*/
2219         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2220 };
2221
2222 tbm_surface_queue_h
2223 tbm_surface_queue_sequence_create(int queue_size, int width,
2224                                   int height, int format, int flags)
2225 {
2226         _tbm_surf_queue_mutex_lock();
2227         _tbm_set_last_result(TBM_ERROR_NONE);
2228
2229         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2230         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2231         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2232         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2233
2234         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2235                                             sizeof(struct _tbm_surface_queue));
2236         if (surface_queue == NULL) {
2237                 TBM_ERR("cannot allocate the surface_queue.\n");
2238                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2239                 _tbm_surf_queue_mutex_unlock();
2240                 return NULL;
2241         }
2242
2243         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2244
2245         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2246                                    sizeof(tbm_queue_sequence));
2247         if (data == NULL) {
2248                 TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
2249                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2250                 free(surface_queue);
2251                 _tbm_surf_queue_mutex_unlock();
2252                 return NULL;
2253         }
2254
2255         data->flags = flags;
2256         _tbm_surface_queue_init(surface_queue,
2257                                 queue_size,
2258                                 width, height, format,
2259                                 &tbm_queue_sequence_impl, data);
2260
2261         _tbm_surf_queue_mutex_unlock();
2262
2263         return surface_queue;
2264 }
2265
2266 tbm_surface_queue_error_e
2267 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2268                                   int modes)
2269 {
2270         _tbm_surf_queue_mutex_lock();
2271         _tbm_set_last_result(TBM_ERROR_NONE);
2272
2273         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2274                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2275
2276         pthread_mutex_lock(&surface_queue->lock);
2277
2278         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2279                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2280         else
2281                 surface_queue->modes |= modes;
2282
2283         pthread_mutex_unlock(&surface_queue->lock);
2284
2285         _tbm_surf_queue_mutex_unlock();
2286
2287         return TBM_SURFACE_QUEUE_ERROR_NONE;
2288 }
2289
2290 tbm_surface_queue_error_e
2291 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2292                                   unsigned int sync_count)
2293 {
2294         int dequeue_num, enqueue_num;
2295
2296         _tbm_surf_queue_mutex_lock();
2297         _tbm_set_last_result(TBM_ERROR_NONE);
2298
2299         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2300                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2301
2302         pthread_mutex_lock(&surface_queue->lock);
2303
2304         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2305         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2306
2307         if (dequeue_num + sync_count == 0)
2308                 surface_queue->acquire_sync_count = enqueue_num;
2309         else
2310                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2311
2312         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2313                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2314
2315         pthread_mutex_unlock(&surface_queue->lock);
2316
2317         _tbm_surf_queue_mutex_unlock();
2318
2319         return TBM_SURFACE_QUEUE_ERROR_NONE;
2320 }