de15fda15a41fe3e454ff747c5ef227d6eb69d57
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 static tbm_bufmgr g_surf_queue_bufmgr;
42 static pthread_mutex_t tbm_surf_queue_lock;
43 void _tbm_surface_queue_mutex_unlock(void);
44
45 /* check condition */
46 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
47         if (!(cond)) {\
48                 TBM_ERR("'%s' failed.\n", #cond);\
49                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
50                 _tbm_surf_queue_mutex_unlock();\
51                 return;\
52         } \
53 }
54
55 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
56         if (!(cond)) {\
57                 TBM_ERR("'%s' failed.\n", #cond);\
58                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
59                 _tbm_surf_queue_mutex_unlock();\
60                 return val;\
61         } \
62 }
63
64 typedef enum _queue_node_type {
65         QUEUE_NODE_TYPE_NONE,
66         QUEUE_NODE_TYPE_DEQUEUE,
67         QUEUE_NODE_TYPE_ENQUEUE,
68         QUEUE_NODE_TYPE_ACQUIRE,
69         QUEUE_NODE_TYPE_RELEASE
70 } Queue_Node_Type;
71
72 typedef struct {
73         struct list_head head;
74         int count;
75 } queue;
76
77 typedef struct {
78         tbm_surface_h surface;
79
80         struct list_head item_link;
81         struct list_head link;
82
83         Queue_Node_Type type;
84
85         unsigned int priv_flags;        /*for each queue*/
86
87         int delete_pending;
88 } queue_node;
89
90 typedef struct {
91         struct list_head link;
92
93         tbm_surface_queue_notify_cb cb;
94         void *data;
95 } queue_notify;
96
97 typedef struct {
98         struct list_head link;
99
100         tbm_surface_queue_trace_cb cb;
101         void *data;
102 } queue_trace;
103
104 typedef struct _tbm_surface_queue_interface {
105         void (*init)(tbm_surface_queue_h queue);
106         void (*reset)(tbm_surface_queue_h queue);
107         void (*destroy)(tbm_surface_queue_h queue);
108         void (*need_attach)(tbm_surface_queue_h queue);
109
110         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
111         void (*release)(tbm_surface_queue_h queue, queue_node *node);
112         queue_node *(*dequeue)(tbm_surface_queue_h queue);
113         queue_node *(*acquire)(tbm_surface_queue_h queue);
114         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
115 } tbm_surface_queue_interface;
116
117 struct _tbm_surface_queue {
118         int width;
119         int height;
120         int format;
121         int queue_size;
122         int num_attached;
123
124         queue free_queue;
125         queue dirty_queue;
126         struct list_head list;
127
128         struct list_head destory_noti;
129         struct list_head dequeuable_noti;
130         struct list_head dequeue_noti;
131         struct list_head can_dequeue_noti;
132         struct list_head acquirable_noti;
133         struct list_head reset_noti;
134         struct list_head trace_noti;
135
136         pthread_mutex_t lock;
137         pthread_cond_t free_cond;
138         pthread_cond_t dirty_cond;
139
140         const tbm_surface_queue_interface *impl;
141         void *impl_data;
142
143         //For external buffer allocation
144         tbm_surface_alloc_cb alloc_cb;
145         tbm_surface_free_cb free_cb;
146         void *alloc_cb_data;
147
148         struct list_head item_link; /* link of surface queue */
149
150         int modes;
151         unsigned int enqueue_sync_count;
152         unsigned int acquire_sync_count;
153 };
154
155 static bool
156 _tbm_surf_queue_mutex_init(void)
157 {
158         static bool tbm_surf_queue_mutex_init = false;
159
160         if (tbm_surf_queue_mutex_init)
161                 return true;
162
163         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
164                 TBM_ERR("fail: pthread_mutex_init\n");
165                 return false;
166         }
167
168         tbm_surf_queue_mutex_init = true;
169
170         return true;
171 }
172
173 static void
174 _tbm_surf_queue_mutex_lock(void)
175 {
176         if (!_tbm_surf_queue_mutex_init()) {
177                 TBM_ERR("fail: _tbm_surf_queue_mutex_init\n");
178                 return;
179         }
180
181         pthread_mutex_lock(&tbm_surf_queue_lock);
182 }
183
184 static void
185 _tbm_surf_queue_mutex_unlock(void)
186 {
187         pthread_mutex_unlock(&tbm_surf_queue_lock);
188 }
189
190 static void
191 _init_tbm_surf_queue_bufmgr(void)
192 {
193         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
194 }
195
196 static void
197 _deinit_tbm_surf_queue_bufmgr(void)
198 {
199         if (!g_surf_queue_bufmgr)
200                 return;
201
202         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
203         g_surf_queue_bufmgr = NULL;
204 }
205
206 static int
207 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
208 {
209         tbm_surface_queue_h old_data = NULL;
210
211         if (surface_queue == NULL) {
212                 TBM_ERR("error: surface_queue is NULL.\n");
213                 return 0;
214         }
215
216         if (g_surf_queue_bufmgr == NULL) {
217                 TBM_ERR("error: g_surf_queue_bufmgr is NULL.\n");
218                 return 0;
219         }
220
221         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
222                 TBM_ERR("error: surf_queue_list is empty\n");
223                 return 0;
224         }
225
226         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
227                                 item_link) {
228                 if (old_data == surface_queue) {
229                         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
230                         return 1;
231                 }
232         }
233
234         TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
235
236         return 0;
237 }
238
239 static queue_node *
240 _queue_node_create(void)
241 {
242         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
243
244         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
245
246         return node;
247 }
248
249 static void
250 _queue_node_delete(queue_node *node)
251 {
252         LIST_DEL(&node->item_link);
253         LIST_DEL(&node->link);
254         free(node);
255 }
256
257 static int
258 _queue_is_empty(queue *queue)
259 {
260         if (LIST_IS_EMPTY(&queue->head))
261                 return 1;
262
263         return 0;
264 }
265
266 static void
267 _queue_node_push_back(queue *queue, queue_node *node)
268 {
269         LIST_ADDTAIL(&node->item_link, &queue->head);
270         queue->count++;
271 }
272
273 static void
274 _queue_node_push_front(queue *queue, queue_node *node)
275 {
276         LIST_ADD(&node->item_link, &queue->head);
277         queue->count++;
278 }
279
280 static queue_node *
281 _queue_node_pop_front(queue *queue)
282 {
283         queue_node *node;
284
285         if (!queue->head.next) return NULL;
286         if (!queue->count) return NULL;
287
288         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
289
290         LIST_DELINIT(&node->item_link);
291         queue->count--;
292
293         return node;
294 }
295
296 static queue_node *
297 _queue_node_pop(queue *queue, queue_node *node)
298 {
299         LIST_DELINIT(&node->item_link);
300         queue->count--;
301
302         return node;
303 }
304
305 static queue_node *
306 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
307                 tbm_surface_h surface, int *out_type)
308 {
309         queue_node *node = NULL;
310
311         if (type == 0)
312                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
313         if (out_type)
314                 *out_type = 0;
315
316         if (type & FREE_QUEUE) {
317                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
318                                          item_link) {
319                         if (node->surface == surface) {
320                                 if (out_type)
321                                         *out_type = FREE_QUEUE;
322
323                                 return node;
324                         }
325                 }
326         }
327
328         if (type & DIRTY_QUEUE) {
329                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
330                                          item_link) {
331                         if (node->surface == surface) {
332                                 if (out_type)
333                                         *out_type = DIRTY_QUEUE;
334
335                                 return node;
336                         }
337                 }
338         }
339
340         if (type & NODE_LIST) {
341                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
342                         if (node->surface == surface) {
343                                 if (out_type)
344                                         *out_type = NODE_LIST;
345
346                                 return node;
347                         }
348                 }
349         }
350
351         TBM_ERR("fail to get the queue_node.\n");
352
353         return NULL;
354 }
355
356 static void
357 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
358 {
359         if (node->surface) {
360                 if (surface_queue->free_cb) {
361                         surface_queue->free_cb(surface_queue,
362                                         surface_queue->alloc_cb_data,
363                                         node->surface);
364                 }
365
366                 tbm_surface_destroy(node->surface);
367         }
368
369         _queue_node_delete(node);
370 }
371
372 static void
373 _queue_init(queue *queue)
374 {
375         LIST_INITHEAD(&queue->head);
376
377         queue->count = 0;
378 }
379
380 static void
381 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
382             void *data)
383 {
384         TBM_RETURN_IF_FAIL(cb != NULL);
385
386         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
387
388         TBM_RETURN_IF_FAIL(item != NULL);
389
390         LIST_INITHEAD(&item->link);
391         item->cb = cb;
392         item->data = data;
393
394         LIST_ADDTAIL(&item->link, list);
395 }
396
397 static void
398 _notify_remove(struct list_head *list,
399                tbm_surface_queue_notify_cb cb, void *data)
400 {
401         queue_notify *item = NULL, *tmp;
402
403         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
404                 if (item->cb == cb && item->data == data) {
405                         LIST_DEL(&item->link);
406                         free(item);
407                         return;
408                 }
409         }
410
411         TBM_ERR("Cannot find notifiy\n");
412 }
413
414 static void
415 _notify_remove_all(struct list_head *list)
416 {
417         queue_notify *item = NULL, *tmp;
418
419         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
420                 LIST_DEL(&item->link);
421                 free(item);
422         }
423 }
424
425 static void
426 _notify_emit(tbm_surface_queue_h surface_queue,
427              struct list_head *list)
428 {
429         queue_notify *item = NULL, *tmp;;
430
431         /*
432                 The item->cb is the outside function of the libtbm.
433                 The tbm user may/can remove the item of the list,
434                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
435         */
436         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
437                 item->cb(surface_queue, item->data);
438 }
439
440 static void
441 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
442             void *data)
443 {
444         TBM_RETURN_IF_FAIL(cb != NULL);
445
446         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
447
448         TBM_RETURN_IF_FAIL(item != NULL);
449
450         LIST_INITHEAD(&item->link);
451         item->cb = cb;
452         item->data = data;
453
454         LIST_ADDTAIL(&item->link, list);
455 }
456
457 static void
458 _trace_remove(struct list_head *list,
459                tbm_surface_queue_trace_cb cb, void *data)
460 {
461         queue_trace *item = NULL, *tmp;
462
463         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
464                 if (item->cb == cb && item->data == data) {
465                         LIST_DEL(&item->link);
466                         free(item);
467                         return;
468                 }
469         }
470
471         TBM_ERR("Cannot find notifiy\n");
472 }
473
474 static void
475 _trace_remove_all(struct list_head *list)
476 {
477         queue_trace *item = NULL, *tmp;
478
479         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
480                 LIST_DEL(&item->link);
481                 free(item);
482         }
483 }
484
485 static void
486 _trace_emit(tbm_surface_queue_h surface_queue,
487              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
488 {
489         queue_trace *item = NULL, *tmp;;
490
491         /*
492                 The item->cb is the outside function of the libtbm.
493                 The tbm user may/can remove the item of the list,
494                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
495         */
496         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
497                 item->cb(surface_queue, surface, trace, item->data);
498 }
499
500 static int
501 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
502 {
503         queue_node *node = NULL;
504         int count = 0;
505
506         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
507                 if (node->type == type)
508                         count++;
509         }
510
511         return count;
512 }
513
514 static void
515 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
516                           tbm_surface_h surface)
517 {
518         queue_node *node;
519
520         node = _queue_node_create();
521         TBM_RETURN_IF_FAIL(node != NULL);
522
523         tbm_surface_internal_ref(surface);
524         node->surface = surface;
525
526         LIST_ADDTAIL(&node->link, &surface_queue->list);
527         surface_queue->num_attached++;
528         _queue_node_push_back(&surface_queue->free_queue, node);
529 }
530
531 static void
532 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
533                           tbm_surface_h surface)
534 {
535         queue_node *node;
536         int queue_type;
537
538         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
539         if (node) {
540                 _queue_delete_node(surface_queue, node);
541                 surface_queue->num_attached--;
542         }
543 }
544
545 static void
546 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
547                            queue_node *node, int push_back)
548 {
549         if (push_back)
550                 _queue_node_push_back(&surface_queue->dirty_queue, node);
551         else
552                 _queue_node_push_front(&surface_queue->dirty_queue, node);
553 }
554
555 static queue_node *
556 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
557 {
558         queue_node *node;
559
560         node = _queue_node_pop_front(&surface_queue->free_queue);
561
562         return node;
563 }
564
565 static queue_node *
566 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
567 {
568         queue_node *node;
569
570         if (_queue_is_empty(&surface_queue->dirty_queue))
571                 return NULL;
572
573         node = _queue_node_pop_front(&surface_queue->dirty_queue);
574
575         return node;
576 }
577
578 static void
579 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
580                            queue_node *node, int push_back)
581 {
582         if (push_back)
583                 _queue_node_push_back(&surface_queue->free_queue, node);
584         else
585                 _queue_node_push_front(&surface_queue->free_queue, node);
586 }
587
588 static void
589 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
590                         int queue_size,
591                         int width, int height, int format,
592                         const tbm_surface_queue_interface *impl, void *data)
593 {
594         TBM_RETURN_IF_FAIL(surface_queue != NULL);
595         TBM_RETURN_IF_FAIL(impl != NULL);
596
597         if (!g_surf_queue_bufmgr)
598                 _init_tbm_surf_queue_bufmgr();
599
600         pthread_mutex_init(&surface_queue->lock, NULL);
601         pthread_cond_init(&surface_queue->free_cond, NULL);
602         pthread_cond_init(&surface_queue->dirty_cond, NULL);
603
604         surface_queue->queue_size = queue_size;
605         surface_queue->width = width;
606         surface_queue->height = height;
607         surface_queue->format = format;
608         surface_queue->impl = impl;
609         surface_queue->impl_data = data;
610         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
611
612         _queue_init(&surface_queue->free_queue);
613         _queue_init(&surface_queue->dirty_queue);
614         LIST_INITHEAD(&surface_queue->list);
615
616         LIST_INITHEAD(&surface_queue->destory_noti);
617         LIST_INITHEAD(&surface_queue->dequeuable_noti);
618         LIST_INITHEAD(&surface_queue->dequeue_noti);
619         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
620         LIST_INITHEAD(&surface_queue->acquirable_noti);
621         LIST_INITHEAD(&surface_queue->reset_noti);
622         LIST_INITHEAD(&surface_queue->trace_noti);
623
624         if (surface_queue->impl && surface_queue->impl->init)
625                 surface_queue->impl->init(surface_queue);
626
627         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
628 }
629
630 tbm_surface_queue_error_e
631 tbm_surface_queue_add_destroy_cb(
632         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
633         void *data)
634 {
635         _tbm_surf_queue_mutex_lock();
636         _tbm_set_last_result(TBM_ERROR_NONE);
637
638         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
639                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
640         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
641                                TBM_ERROR_INVALID_PARAMETER);
642
643         pthread_mutex_lock(&surface_queue->lock);
644
645         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
646
647         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
648
649         pthread_mutex_unlock(&surface_queue->lock);
650
651         _tbm_surf_queue_mutex_unlock();
652
653         return TBM_SURFACE_QUEUE_ERROR_NONE;
654 }
655
656 tbm_surface_queue_error_e
657 tbm_surface_queue_remove_destroy_cb(
658         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
659         void *data)
660 {
661         _tbm_surf_queue_mutex_lock();
662         _tbm_set_last_result(TBM_ERROR_NONE);
663
664         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
665                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
666
667         pthread_mutex_lock(&surface_queue->lock);
668
669         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
670
671         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
672
673         pthread_mutex_unlock(&surface_queue->lock);
674
675         _tbm_surf_queue_mutex_unlock();
676
677         return TBM_SURFACE_QUEUE_ERROR_NONE;
678 }
679
680 tbm_surface_queue_error_e
681 tbm_surface_queue_add_dequeuable_cb(
682         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
683         void *data)
684 {
685         _tbm_surf_queue_mutex_lock();
686         _tbm_set_last_result(TBM_ERROR_NONE);
687
688         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
689                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
690         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
691                                TBM_ERROR_INVALID_PARAMETER);
692
693         pthread_mutex_lock(&surface_queue->lock);
694
695         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
696
697         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
698
699         pthread_mutex_unlock(&surface_queue->lock);
700
701         _tbm_surf_queue_mutex_unlock();
702
703         return TBM_SURFACE_QUEUE_ERROR_NONE;
704 }
705
706 tbm_surface_queue_error_e
707 tbm_surface_queue_remove_dequeuable_cb(
708         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
709         void *data)
710 {
711         _tbm_surf_queue_mutex_lock();
712         _tbm_set_last_result(TBM_ERROR_NONE);
713
714         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
715                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
716
717         pthread_mutex_lock(&surface_queue->lock);
718
719         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
720
721         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
722
723         pthread_mutex_unlock(&surface_queue->lock);
724
725         _tbm_surf_queue_mutex_unlock();
726
727         return TBM_SURFACE_QUEUE_ERROR_NONE;
728 }
729
730 tbm_surface_queue_error_e
731 tbm_surface_queue_add_dequeue_cb(
732         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
733         void *data)
734 {
735         _tbm_surf_queue_mutex_lock();
736         _tbm_set_last_result(TBM_ERROR_NONE);
737
738         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
739                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
740         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
741                                TBM_ERROR_INVALID_PARAMETER);
742
743         pthread_mutex_lock(&surface_queue->lock);
744
745         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
746
747         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
748
749         pthread_mutex_unlock(&surface_queue->lock);
750
751         _tbm_surf_queue_mutex_unlock();
752
753         return TBM_SURFACE_QUEUE_ERROR_NONE;
754 }
755
756 tbm_surface_queue_error_e
757 tbm_surface_queue_remove_dequeue_cb(
758         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
759         void *data)
760 {
761         _tbm_surf_queue_mutex_lock();
762         _tbm_set_last_result(TBM_ERROR_NONE);
763
764         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
765                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
766
767         pthread_mutex_lock(&surface_queue->lock);
768
769         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
770
771         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
772
773         pthread_mutex_unlock(&surface_queue->lock);
774
775         _tbm_surf_queue_mutex_unlock();
776
777         return TBM_SURFACE_QUEUE_ERROR_NONE;
778 }
779
780 tbm_surface_queue_error_e
781 tbm_surface_queue_add_can_dequeue_cb(
782         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
783         void *data)
784 {
785         _tbm_surf_queue_mutex_lock();
786         _tbm_set_last_result(TBM_ERROR_NONE);
787
788         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
789                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
790         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
791                                TBM_ERROR_INVALID_PARAMETER);
792
793         pthread_mutex_lock(&surface_queue->lock);
794
795         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
796
797         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
798
799         pthread_mutex_unlock(&surface_queue->lock);
800
801         _tbm_surf_queue_mutex_unlock();
802
803         return TBM_SURFACE_QUEUE_ERROR_NONE;
804 }
805
806 tbm_surface_queue_error_e
807 tbm_surface_queue_remove_can_dequeue_cb(
808         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
809         void *data)
810 {
811         _tbm_surf_queue_mutex_lock();
812         _tbm_set_last_result(TBM_ERROR_NONE);
813
814         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
815                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
816
817         pthread_mutex_lock(&surface_queue->lock);
818
819         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
820
821         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
822
823         pthread_mutex_unlock(&surface_queue->lock);
824
825         _tbm_surf_queue_mutex_unlock();
826
827         return TBM_SURFACE_QUEUE_ERROR_NONE;
828 }
829
830 tbm_surface_queue_error_e
831 tbm_surface_queue_add_acquirable_cb(
832         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
833         void *data)
834 {
835         _tbm_surf_queue_mutex_lock();
836         _tbm_set_last_result(TBM_ERROR_NONE);
837
838         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
839                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
840         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
841                                TBM_ERROR_INVALID_PARAMETER);
842
843         pthread_mutex_lock(&surface_queue->lock);
844
845         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
846
847         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
848
849         pthread_mutex_unlock(&surface_queue->lock);
850
851         _tbm_surf_queue_mutex_unlock();
852
853         return TBM_SURFACE_QUEUE_ERROR_NONE;
854 }
855
856 tbm_surface_queue_error_e
857 tbm_surface_queue_remove_acquirable_cb(
858         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
859         void *data)
860 {
861         _tbm_surf_queue_mutex_lock();
862         _tbm_set_last_result(TBM_ERROR_NONE);
863
864         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
865                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
866
867         pthread_mutex_lock(&surface_queue->lock);
868
869         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
870
871         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
872
873         pthread_mutex_unlock(&surface_queue->lock);
874
875         _tbm_surf_queue_mutex_unlock();
876
877         return TBM_SURFACE_QUEUE_ERROR_NONE;
878 }
879
880 tbm_surface_queue_error_e
881 tbm_surface_queue_add_trace_cb(
882         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
883         void *data)
884 {
885         _tbm_surf_queue_mutex_lock();
886         _tbm_set_last_result(TBM_ERROR_NONE);
887
888         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
889                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
890         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
891                                TBM_ERROR_INVALID_PARAMETER);
892
893         pthread_mutex_lock(&surface_queue->lock);
894
895         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
896
897         _trace_add(&surface_queue->trace_noti, trace_cb, data);
898
899         pthread_mutex_unlock(&surface_queue->lock);
900
901         _tbm_surf_queue_mutex_unlock();
902
903         return TBM_SURFACE_QUEUE_ERROR_NONE;
904 }
905
906 tbm_surface_queue_error_e
907 tbm_surface_queue_remove_trace_cb(
908         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
909         void *data)
910 {
911         _tbm_surf_queue_mutex_lock();
912         _tbm_set_last_result(TBM_ERROR_NONE);
913
914         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
915                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
916
917         pthread_mutex_lock(&surface_queue->lock);
918
919         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
920
921         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
922
923         pthread_mutex_unlock(&surface_queue->lock);
924
925         _tbm_surf_queue_mutex_unlock();
926
927         return TBM_SURFACE_QUEUE_ERROR_NONE;
928 }
929
930 tbm_surface_queue_error_e
931 tbm_surface_queue_set_alloc_cb(
932         tbm_surface_queue_h surface_queue,
933         tbm_surface_alloc_cb alloc_cb,
934         tbm_surface_free_cb free_cb,
935         void *data)
936 {
937         _tbm_surf_queue_mutex_lock();
938         _tbm_set_last_result(TBM_ERROR_NONE);
939
940         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
941                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
942
943         pthread_mutex_lock(&surface_queue->lock);
944
945         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
946
947         surface_queue->alloc_cb = alloc_cb;
948         surface_queue->free_cb = free_cb;
949         surface_queue->alloc_cb_data = data;
950
951         pthread_mutex_unlock(&surface_queue->lock);
952
953         _tbm_surf_queue_mutex_unlock();
954
955         return TBM_SURFACE_QUEUE_ERROR_NONE;
956 }
957
958 int
959 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
960 {
961         int width;
962
963         _tbm_surf_queue_mutex_lock();
964         _tbm_set_last_result(TBM_ERROR_NONE);
965
966         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
967
968         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
969
970         width = surface_queue->width;
971
972         _tbm_surf_queue_mutex_unlock();
973
974         return width;
975 }
976
977 int
978 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
979 {
980         int height;
981
982         _tbm_surf_queue_mutex_lock();
983         _tbm_set_last_result(TBM_ERROR_NONE);
984
985         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
986
987         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
988
989         height = surface_queue->height;
990
991         _tbm_surf_queue_mutex_unlock();
992
993         return height;
994 }
995
996 int
997 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
998 {
999         int format;
1000
1001         _tbm_surf_queue_mutex_lock();
1002         _tbm_set_last_result(TBM_ERROR_NONE);
1003
1004         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1005
1006         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1007
1008         format = surface_queue->format;
1009
1010         _tbm_surf_queue_mutex_unlock();
1011
1012         return format;
1013 }
1014
1015 int
1016 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1017 {
1018         int queue_size;
1019
1020         _tbm_surf_queue_mutex_lock();
1021         _tbm_set_last_result(TBM_ERROR_NONE);
1022
1023         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1024
1025         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1026
1027         queue_size = surface_queue->queue_size;
1028
1029         _tbm_surf_queue_mutex_unlock();
1030
1031         return queue_size;
1032 }
1033
1034 tbm_surface_queue_error_e
1035 tbm_surface_queue_add_reset_cb(
1036         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1037         void *data)
1038 {
1039         _tbm_surf_queue_mutex_lock();
1040         _tbm_set_last_result(TBM_ERROR_NONE);
1041
1042         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1043                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1044         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
1045                                TBM_ERROR_INVALID_PARAMETER);
1046
1047         pthread_mutex_lock(&surface_queue->lock);
1048
1049         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1050
1051         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1052
1053         pthread_mutex_unlock(&surface_queue->lock);
1054
1055         _tbm_surf_queue_mutex_unlock();
1056
1057         return TBM_SURFACE_QUEUE_ERROR_NONE;
1058 }
1059
1060 tbm_surface_queue_error_e
1061 tbm_surface_queue_remove_reset_cb(
1062         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1063         void *data)
1064 {
1065         _tbm_surf_queue_mutex_lock();
1066         _tbm_set_last_result(TBM_ERROR_NONE);
1067
1068         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1069                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1070
1071         pthread_mutex_lock(&surface_queue->lock);
1072
1073         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1074
1075         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1076
1077         pthread_mutex_unlock(&surface_queue->lock);
1078
1079         _tbm_surf_queue_mutex_unlock();
1080
1081         return TBM_SURFACE_QUEUE_ERROR_NONE;
1082 }
1083
1084 tbm_surface_queue_error_e
1085 tbm_surface_queue_enqueue(tbm_surface_queue_h
1086                           surface_queue, tbm_surface_h surface)
1087 {
1088         queue_node *node;
1089         int queue_type;
1090
1091         _tbm_surf_queue_mutex_lock();
1092         _tbm_set_last_result(TBM_ERROR_NONE);
1093
1094         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1095                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1096         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1097                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1098
1099         if (b_dump_queue)
1100                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1101
1102         pthread_mutex_lock(&surface_queue->lock);
1103
1104         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1105
1106         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1107         if (node == NULL || queue_type != NODE_LIST) {
1108                 TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1109                         node, queue_type);
1110                 pthread_mutex_unlock(&surface_queue->lock);
1111
1112                 _tbm_surf_queue_mutex_unlock();
1113
1114                 if (!node) {
1115                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1116                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1117                 } else {
1118                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1119                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1120                 }
1121         }
1122
1123         if (surface_queue->impl && surface_queue->impl->enqueue)
1124                 surface_queue->impl->enqueue(surface_queue, node);
1125         else
1126                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1127
1128         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1129                 TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1130                 _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
1131                 pthread_mutex_unlock(&surface_queue->lock);
1132
1133                 _tbm_surf_queue_mutex_unlock();
1134                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1135         }
1136
1137         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1138
1139         if (surface_queue->enqueue_sync_count == 1) {
1140                 tbm_surface_info_s info;
1141                 int ret;
1142
1143                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1144                 if (ret == TBM_SURFACE_ERROR_NONE)
1145                         tbm_surface_unmap(surface);
1146         }
1147
1148         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1149
1150         pthread_mutex_unlock(&surface_queue->lock);
1151         pthread_cond_signal(&surface_queue->dirty_cond);
1152
1153         _tbm_surf_queue_mutex_unlock();
1154
1155         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1156
1157         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1158
1159         return TBM_SURFACE_QUEUE_ERROR_NONE;
1160 }
1161
1162 tbm_surface_queue_error_e
1163 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1164                           surface_queue, tbm_surface_h surface)
1165 {
1166         queue_node *node;
1167         int queue_type;
1168
1169         _tbm_surf_queue_mutex_lock();
1170         _tbm_set_last_result(TBM_ERROR_NONE);
1171
1172         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1173                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1174         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1175                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1176
1177         pthread_mutex_lock(&surface_queue->lock);
1178
1179         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1180
1181         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1182         if (node == NULL || queue_type != NODE_LIST) {
1183                 TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1184                         node, queue_type);
1185                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1186                 pthread_mutex_unlock(&surface_queue->lock);
1187
1188                 _tbm_surf_queue_mutex_unlock();
1189                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1190         }
1191
1192         if (node->delete_pending) {
1193                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1194
1195                 _queue_delete_node(surface_queue, node);
1196
1197                 pthread_mutex_unlock(&surface_queue->lock);
1198
1199                 _tbm_surf_queue_mutex_unlock();
1200
1201                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1202
1203                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1204         }
1205
1206         if (surface_queue->queue_size < surface_queue->num_attached) {
1207                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1208
1209                 if (surface_queue->impl && surface_queue->impl->need_detach)
1210                         surface_queue->impl->need_detach(surface_queue, node);
1211                 else
1212                         _tbm_surface_queue_detach(surface_queue, surface);
1213
1214                 pthread_mutex_unlock(&surface_queue->lock);
1215
1216                 _tbm_surf_queue_mutex_unlock();
1217
1218                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1219
1220                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1221         }
1222
1223         if (surface_queue->impl && surface_queue->impl->release)
1224                 surface_queue->impl->release(surface_queue, node);
1225         else
1226                 _tbm_surface_queue_release(surface_queue, node, 1);
1227
1228         if (_queue_is_empty(&surface_queue->free_queue)) {
1229                 TBM_ERR("surface_queue->free_queue is empty.\n");
1230                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1231                 pthread_mutex_unlock(&surface_queue->lock);
1232
1233                 _tbm_surf_queue_mutex_unlock();
1234                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1235         }
1236
1237         node->type = QUEUE_NODE_TYPE_RELEASE;
1238
1239         pthread_mutex_unlock(&surface_queue->lock);
1240         pthread_cond_signal(&surface_queue->free_cond);
1241
1242         _tbm_surf_queue_mutex_unlock();
1243
1244         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1245
1246         return TBM_SURFACE_QUEUE_ERROR_NONE;
1247 }
1248
1249 tbm_surface_queue_error_e
1250 tbm_surface_queue_dequeue(tbm_surface_queue_h
1251                           surface_queue, tbm_surface_h *surface)
1252 {
1253         queue_node *node;
1254
1255         _tbm_surf_queue_mutex_lock();
1256         _tbm_set_last_result(TBM_ERROR_NONE);
1257
1258         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1259                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1260         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1261                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1262
1263         *surface = NULL;
1264
1265         pthread_mutex_lock(&surface_queue->lock);
1266
1267         if (_queue_is_empty(&surface_queue->free_queue)) {
1268                 if (surface_queue->impl && surface_queue->impl->need_attach)
1269                         surface_queue->impl->need_attach(surface_queue);
1270
1271                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1272                         TBM_ERR("surface_queue:%p is invalid", surface_queue);
1273                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1274                         pthread_mutex_unlock(&surface_queue->lock);
1275                         _tbm_surf_queue_mutex_unlock();
1276                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1277                 }
1278         }
1279
1280         if (surface_queue->impl && surface_queue->impl->dequeue)
1281                 node = surface_queue->impl->dequeue(surface_queue);
1282         else
1283                 node = _tbm_surface_queue_dequeue(surface_queue);
1284
1285         if (node == NULL || node->surface == NULL) {
1286                 TBM_ERR("_queue_node_pop_front failed\n");
1287                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1288                 pthread_mutex_unlock(&surface_queue->lock);
1289
1290                 _tbm_surf_queue_mutex_unlock();
1291                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1292         }
1293
1294         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1295         *surface = node->surface;
1296
1297         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1298
1299         pthread_mutex_unlock(&surface_queue->lock);
1300
1301         _tbm_surf_queue_mutex_unlock();
1302
1303         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1304
1305         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1306
1307         return TBM_SURFACE_QUEUE_ERROR_NONE;
1308 }
1309
1310 int
1311 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1312 {
1313         _tbm_surf_queue_mutex_lock();
1314         _tbm_set_last_result(TBM_ERROR_NONE);
1315
1316         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1317
1318         _tbm_surf_queue_mutex_unlock();
1319
1320         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1321
1322         _tbm_surf_queue_mutex_lock();
1323
1324         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1325
1326         pthread_mutex_lock(&surface_queue->lock);
1327
1328         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1329
1330         if (_queue_is_empty(&surface_queue->free_queue)) {
1331                 if (surface_queue->impl && surface_queue->impl->need_attach)
1332                         surface_queue->impl->need_attach(surface_queue);
1333
1334                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1335                         TBM_ERR("surface_queue:%p is invalid", surface_queue);
1336                         pthread_mutex_unlock(&surface_queue->lock);
1337                         _tbm_surf_queue_mutex_unlock();
1338                         return 0;
1339                 }
1340         }
1341
1342         if (!_queue_is_empty(&surface_queue->free_queue)) {
1343                 pthread_mutex_unlock(&surface_queue->lock);
1344                 _tbm_surf_queue_mutex_unlock();
1345                 return 1;
1346         }
1347
1348         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1349                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1350                 _tbm_surf_queue_mutex_unlock();
1351                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1352                 pthread_mutex_unlock(&surface_queue->lock);
1353                 return 1;
1354         }
1355
1356         pthread_mutex_unlock(&surface_queue->lock);
1357         _tbm_surf_queue_mutex_unlock();
1358         return 0;
1359 }
1360
1361 tbm_surface_queue_error_e
1362 tbm_surface_queue_release(tbm_surface_queue_h
1363                           surface_queue, tbm_surface_h surface)
1364 {
1365         queue_node *node;
1366         int queue_type;
1367
1368         _tbm_surf_queue_mutex_lock();
1369         _tbm_set_last_result(TBM_ERROR_NONE);
1370
1371         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1372                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1373         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1374                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1375
1376         pthread_mutex_lock(&surface_queue->lock);
1377
1378         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1379
1380         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1381         if (node == NULL || queue_type != NODE_LIST) {
1382                 TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1383                         node, queue_type);
1384                 pthread_mutex_unlock(&surface_queue->lock);
1385
1386                 _tbm_surf_queue_mutex_unlock();
1387
1388                 if (!node) {
1389                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1390                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1391                 } else {
1392                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1393                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1394                 }
1395         }
1396
1397         if (node->delete_pending) {
1398                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1399
1400                 _queue_delete_node(surface_queue, node);
1401
1402                 pthread_mutex_unlock(&surface_queue->lock);
1403
1404                 _tbm_surf_queue_mutex_unlock();
1405
1406                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1407
1408                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1409         }
1410
1411         if (surface_queue->queue_size < surface_queue->num_attached) {
1412                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1413
1414                 if (surface_queue->impl && surface_queue->impl->need_detach)
1415                         surface_queue->impl->need_detach(surface_queue, node);
1416                 else
1417                         _tbm_surface_queue_detach(surface_queue, surface);
1418
1419                 pthread_mutex_unlock(&surface_queue->lock);
1420
1421                 _tbm_surf_queue_mutex_unlock();
1422
1423                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1424
1425                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1426         }
1427
1428         if (surface_queue->impl && surface_queue->impl->release)
1429                 surface_queue->impl->release(surface_queue, node);
1430         else
1431                 _tbm_surface_queue_release(surface_queue, node, 1);
1432
1433         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1434                 TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
1435                 _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
1436                 pthread_mutex_unlock(&surface_queue->lock);
1437
1438                 _tbm_surf_queue_mutex_unlock();
1439                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1440         }
1441
1442         node->type = QUEUE_NODE_TYPE_RELEASE;
1443
1444         pthread_mutex_unlock(&surface_queue->lock);
1445         pthread_cond_signal(&surface_queue->free_cond);
1446
1447         _tbm_surf_queue_mutex_unlock();
1448
1449         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1450
1451         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1452
1453         return TBM_SURFACE_QUEUE_ERROR_NONE;
1454 }
1455
1456 tbm_surface_queue_error_e
1457 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1458                         surface_queue, tbm_surface_h surface)
1459 {
1460         queue_node *node;
1461         int queue_type;
1462
1463         _tbm_surf_queue_mutex_lock();
1464         _tbm_set_last_result(TBM_ERROR_NONE);
1465
1466         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1467                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1468         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1469                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1470
1471         pthread_mutex_lock(&surface_queue->lock);
1472
1473         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1474
1475         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1476         if (node == NULL || queue_type != NODE_LIST) {
1477                 TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1478                         node, queue_type);
1479                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1480                 pthread_mutex_unlock(&surface_queue->lock);
1481
1482                 _tbm_surf_queue_mutex_unlock();
1483                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1484         }
1485
1486         if (surface_queue->impl && surface_queue->impl->enqueue)
1487                 surface_queue->impl->enqueue(surface_queue, node);
1488         else
1489                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1490
1491         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1492                 TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
1493                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1494                 pthread_mutex_unlock(&surface_queue->lock);
1495
1496                 _tbm_surf_queue_mutex_unlock();
1497                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1498         }
1499
1500         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1501
1502         pthread_mutex_unlock(&surface_queue->lock);
1503         pthread_cond_signal(&surface_queue->dirty_cond);
1504
1505         _tbm_surf_queue_mutex_unlock();
1506
1507         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1508
1509         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1510
1511         return TBM_SURFACE_QUEUE_ERROR_NONE;
1512 }
1513
1514 tbm_surface_queue_error_e
1515 tbm_surface_queue_acquire(tbm_surface_queue_h
1516                           surface_queue, tbm_surface_h *surface)
1517 {
1518         queue_node *node;
1519
1520         _tbm_surf_queue_mutex_lock();
1521         _tbm_set_last_result(TBM_ERROR_NONE);
1522
1523         *surface = NULL;
1524
1525         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1526                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1527         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1528                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1529
1530         pthread_mutex_lock(&surface_queue->lock);
1531
1532         if (surface_queue->impl && surface_queue->impl->acquire)
1533                 node = surface_queue->impl->acquire(surface_queue);
1534         else
1535                 node = _tbm_surface_queue_acquire(surface_queue);
1536
1537         if (node == NULL || node->surface == NULL) {
1538                 TBM_ERR("_queue_node_pop_front failed\n");
1539                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1540                 pthread_mutex_unlock(&surface_queue->lock);
1541
1542                 _tbm_surf_queue_mutex_unlock();
1543                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1544         }
1545
1546         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1547
1548         *surface = node->surface;
1549
1550         if (surface_queue->acquire_sync_count == 1) {
1551                 tbm_surface_info_s info;
1552                 int ret;
1553
1554                 TBM_ERR("start map surface:%p", *surface);
1555                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1556                 TBM_ERR("end map surface:%p", *surface);
1557                 if (ret == TBM_SURFACE_ERROR_NONE)
1558                         tbm_surface_unmap(*surface);
1559         }
1560
1561         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1562
1563         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1564
1565         pthread_mutex_unlock(&surface_queue->lock);
1566
1567         _tbm_surf_queue_mutex_unlock();
1568
1569         if (b_dump_queue)
1570                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1571
1572         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1573
1574         return TBM_SURFACE_QUEUE_ERROR_NONE;
1575 }
1576
1577 int
1578 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1579 {
1580         _tbm_surf_queue_mutex_lock();
1581         _tbm_set_last_result(TBM_ERROR_NONE);
1582
1583         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1584
1585         pthread_mutex_lock(&surface_queue->lock);
1586
1587         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1588
1589         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1590                 pthread_mutex_unlock(&surface_queue->lock);
1591                 _tbm_surf_queue_mutex_unlock();
1592                 return 1;
1593         }
1594
1595         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1596                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1597                 _tbm_surf_queue_mutex_unlock();
1598                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1599                 pthread_mutex_unlock(&surface_queue->lock);
1600                 return 1;
1601         }
1602
1603         pthread_mutex_unlock(&surface_queue->lock);
1604         _tbm_surf_queue_mutex_unlock();
1605         return 0;
1606 }
1607
1608 void
1609 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1610 {
1611         queue_node *node = NULL, *tmp;
1612
1613         _tbm_surf_queue_mutex_lock();
1614         _tbm_set_last_result(TBM_ERROR_NONE);
1615
1616         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1617
1618         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1619
1620         LIST_DEL(&surface_queue->item_link);
1621
1622         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1623                 _queue_delete_node(surface_queue, node);
1624
1625         if (surface_queue->impl && surface_queue->impl->destroy)
1626                 surface_queue->impl->destroy(surface_queue);
1627
1628         _notify_emit(surface_queue, &surface_queue->destory_noti);
1629
1630         _notify_remove_all(&surface_queue->destory_noti);
1631         _notify_remove_all(&surface_queue->dequeuable_noti);
1632         _notify_remove_all(&surface_queue->dequeue_noti);
1633         _notify_remove_all(&surface_queue->can_dequeue_noti);
1634         _notify_remove_all(&surface_queue->acquirable_noti);
1635         _notify_remove_all(&surface_queue->reset_noti);
1636         _trace_remove_all(&surface_queue->trace_noti);
1637
1638         pthread_mutex_destroy(&surface_queue->lock);
1639
1640         free(surface_queue);
1641
1642         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1643                 _deinit_tbm_surf_queue_bufmgr();
1644
1645         _tbm_surf_queue_mutex_unlock();
1646 }
1647
1648 tbm_surface_queue_error_e
1649 tbm_surface_queue_reset(tbm_surface_queue_h
1650                         surface_queue, int width, int height, int format)
1651 {
1652         queue_node *node = NULL, *tmp;
1653
1654         _tbm_surf_queue_mutex_lock();
1655         _tbm_set_last_result(TBM_ERROR_NONE);
1656
1657         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1658                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1659
1660         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1661
1662         if (width == surface_queue->width && height == surface_queue->height &&
1663                 format == surface_queue->format) {
1664                 _tbm_surf_queue_mutex_unlock();
1665                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1666         }
1667
1668         pthread_mutex_lock(&surface_queue->lock);
1669
1670         surface_queue->width = width;
1671         surface_queue->height = height;
1672         surface_queue->format = format;
1673
1674         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1675                 /* Destory surface and Push to free_queue */
1676                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1677                         _queue_delete_node(surface_queue, node);
1678
1679                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1680                         node->delete_pending = 1;
1681         } else {
1682                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1683                         _queue_delete_node(surface_queue, node);
1684
1685                 _queue_init(&surface_queue->dirty_queue);
1686                 LIST_INITHEAD(&surface_queue->list);
1687         }
1688
1689         /* Reset queue */
1690         _queue_init(&surface_queue->free_queue);
1691
1692         surface_queue->num_attached = 0;
1693
1694         if (surface_queue->impl && surface_queue->impl->reset)
1695                 surface_queue->impl->reset(surface_queue);
1696
1697         pthread_mutex_unlock(&surface_queue->lock);
1698         pthread_cond_signal(&surface_queue->free_cond);
1699
1700         _tbm_surf_queue_mutex_unlock();
1701
1702         _notify_emit(surface_queue, &surface_queue->reset_noti);
1703
1704         return TBM_SURFACE_QUEUE_ERROR_NONE;
1705 }
1706
1707 tbm_surface_queue_error_e
1708 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1709 {
1710         _tbm_surf_queue_mutex_lock();
1711         _tbm_set_last_result(TBM_ERROR_NONE);
1712
1713         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1714                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1715
1716         _tbm_surf_queue_mutex_unlock();
1717
1718         _notify_emit(surface_queue, &surface_queue->reset_noti);
1719
1720         return TBM_SURFACE_QUEUE_ERROR_NONE;
1721 }
1722
1723 tbm_surface_queue_error_e
1724 tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
1725 {
1726         _tbm_surf_queue_mutex_lock();
1727         _tbm_set_last_result(TBM_ERROR_NONE);
1728
1729         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1730                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1731
1732         _tbm_surf_queue_mutex_unlock();
1733
1734         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1735
1736         return TBM_SURFACE_QUEUE_ERROR_NONE;
1737 }
1738
1739 tbm_surface_queue_error_e
1740 tbm_surface_queue_set_size(tbm_surface_queue_h
1741                         surface_queue, int queue_size, int flush)
1742 {
1743         queue_node *node = NULL, *tmp;
1744
1745         _tbm_surf_queue_mutex_lock();
1746         _tbm_set_last_result(TBM_ERROR_NONE);
1747
1748         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1749                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1750         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1751                                         TBM_ERROR_INVALID_PARAMETER);
1752
1753         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1754
1755         if ((surface_queue->queue_size == queue_size) && !flush) {
1756                 _tbm_surf_queue_mutex_unlock();
1757                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1758         }
1759
1760         pthread_mutex_lock(&surface_queue->lock);
1761
1762         if (flush) {
1763                 surface_queue->queue_size = queue_size;
1764
1765                 if (surface_queue->num_attached == 0) {
1766                         pthread_mutex_unlock(&surface_queue->lock);
1767                         _tbm_surf_queue_mutex_unlock();
1768                         return TBM_SURFACE_QUEUE_ERROR_NONE;
1769                 }
1770
1771                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1772                         /* Destory surface and Push to free_queue */
1773                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1774                                 _queue_delete_node(surface_queue, node);
1775
1776                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1777                                 node->delete_pending = 1;
1778                 } else {
1779                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1780                                 _queue_delete_node(surface_queue, node);
1781
1782                         _queue_init(&surface_queue->dirty_queue);
1783                         LIST_INITHEAD(&surface_queue->list);
1784                 }
1785
1786                 /* Reset queue */
1787                 _queue_init(&surface_queue->free_queue);
1788
1789                 surface_queue->num_attached = 0;
1790
1791                 if (surface_queue->impl && surface_queue->impl->reset)
1792                         surface_queue->impl->reset(surface_queue);
1793
1794                 pthread_mutex_unlock(&surface_queue->lock);
1795                 pthread_cond_signal(&surface_queue->free_cond);
1796
1797                 _tbm_surf_queue_mutex_unlock();
1798
1799                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1800
1801                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1802         } else {
1803                 if (surface_queue->queue_size > queue_size) {
1804                         int need_del = surface_queue->queue_size - queue_size;
1805
1806                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1807                                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1808
1809                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1810                                         surface_queue->impl->need_detach(surface_queue, node);
1811                                 else
1812                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1813
1814                                 need_del--;
1815                                 if (need_del == 0)
1816                                         break;
1817                         }
1818                 }
1819
1820                 surface_queue->queue_size = queue_size;
1821
1822                 pthread_mutex_unlock(&surface_queue->lock);
1823
1824                 _tbm_surf_queue_mutex_unlock();
1825
1826                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1827         }
1828 }
1829
1830 tbm_surface_queue_error_e
1831 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1832 {
1833         queue_node *node = NULL;
1834
1835         _tbm_surf_queue_mutex_lock();
1836         _tbm_set_last_result(TBM_ERROR_NONE);
1837
1838         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1839                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1840
1841         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1842
1843         if (surface_queue->num_attached == 0) {
1844                 _tbm_surf_queue_mutex_unlock();
1845                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1846         }
1847
1848         pthread_mutex_lock(&surface_queue->lock);
1849
1850         /* Destory surface in free_queue */
1851         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1852                 if (surface_queue->impl && surface_queue->impl->need_detach)
1853                         surface_queue->impl->need_detach(surface_queue, node);
1854                 else
1855                         _tbm_surface_queue_detach(surface_queue, node->surface);
1856         }
1857
1858         /* Reset queue */
1859         _queue_init(&surface_queue->free_queue);
1860
1861         pthread_mutex_unlock(&surface_queue->lock);
1862         _tbm_surf_queue_mutex_unlock();
1863
1864         return TBM_SURFACE_QUEUE_ERROR_NONE;
1865 }
1866
1867 tbm_surface_queue_error_e
1868 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1869 {
1870         queue_node *node = NULL, *tmp;
1871
1872         _tbm_surf_queue_mutex_lock();
1873         _tbm_set_last_result(TBM_ERROR_NONE);
1874
1875         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1876                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1877
1878         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1879
1880         if (surface_queue->num_attached == 0) {
1881                 _tbm_surf_queue_mutex_unlock();
1882                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1883         }
1884
1885         pthread_mutex_lock(&surface_queue->lock);
1886
1887         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1888                 /* Destory surface and Push to free_queue */
1889                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1890                         _queue_delete_node(surface_queue, node);
1891
1892                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1893                         node->delete_pending = 1;
1894         } else {
1895                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1896                         _queue_delete_node(surface_queue, node);
1897
1898                 _queue_init(&surface_queue->dirty_queue);
1899                 LIST_INITHEAD(&surface_queue->list);
1900         }
1901
1902         /* Reset queue */
1903         _queue_init(&surface_queue->free_queue);
1904
1905         surface_queue->num_attached = 0;
1906
1907         if (surface_queue->impl && surface_queue->impl->reset)
1908                 surface_queue->impl->reset(surface_queue);
1909
1910         pthread_mutex_unlock(&surface_queue->lock);
1911         pthread_cond_signal(&surface_queue->free_cond);
1912
1913         _tbm_surf_queue_mutex_unlock();
1914
1915         _notify_emit(surface_queue, &surface_queue->reset_noti);
1916
1917         return TBM_SURFACE_QUEUE_ERROR_NONE;
1918 }
1919
1920 tbm_surface_queue_error_e
1921 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1922                         tbm_surface_h *surfaces, int *num)
1923 {
1924         queue_node *node = NULL;
1925
1926         _tbm_surf_queue_mutex_lock();
1927         _tbm_set_last_result(TBM_ERROR_NONE);
1928
1929         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1930                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1931         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1932                                TBM_ERROR_INVALID_PARAMETER);
1933
1934         *num = 0;
1935
1936         pthread_mutex_lock(&surface_queue->lock);
1937
1938         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1939                 if (node->delete_pending) continue;
1940
1941                 if (surfaces)
1942                         surfaces[*num] = node->surface;
1943
1944                 *num = *num + 1;
1945         }
1946
1947         pthread_mutex_unlock(&surface_queue->lock);
1948
1949         _tbm_surf_queue_mutex_unlock();
1950
1951         return TBM_SURFACE_QUEUE_ERROR_NONE;
1952 }
1953
1954 tbm_surface_queue_error_e
1955 tbm_surface_queue_get_trace_surface_num(
1956                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1957 {
1958         _tbm_surf_queue_mutex_lock();
1959         _tbm_set_last_result(TBM_ERROR_NONE);
1960
1961         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1962                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1963         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1964                                TBM_ERROR_INVALID_PARAMETER);
1965
1966         *num = 0;
1967
1968         pthread_mutex_lock(&surface_queue->lock);
1969
1970         switch (trace) {
1971         case TBM_SURFACE_QUEUE_TRACE_NONE:
1972                 *num = 0;
1973                 break;
1974         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1975                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1976                 break;
1977         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1978                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1979                 break;
1980         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1981                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1982                 break;
1983         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1984                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1985                 break;
1986         default:
1987                 break;
1988         }
1989
1990         pthread_mutex_unlock(&surface_queue->lock);
1991
1992         _tbm_surf_queue_mutex_unlock();
1993
1994         return TBM_SURFACE_QUEUE_ERROR_NONE;
1995 }
1996
1997 typedef struct {
1998         int flags;
1999 } tbm_queue_default;
2000
2001 static void
2002 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
2003 {
2004         free(surface_queue->impl_data);
2005 }
2006
2007 static void
2008 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
2009 {
2010         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
2011         tbm_surface_h surface;
2012
2013         if (surface_queue->queue_size == surface_queue->num_attached)
2014                 return;
2015
2016         if (surface_queue->alloc_cb) {
2017                 pthread_mutex_unlock(&surface_queue->lock);
2018                 _tbm_surf_queue_mutex_unlock();
2019                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2020                 _tbm_surf_queue_mutex_lock();
2021                 pthread_mutex_lock(&surface_queue->lock);
2022
2023                 /* silent return */
2024                 if (!surface)
2025                         return;
2026
2027                 tbm_surface_internal_ref(surface);
2028         } else {
2029                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2030                                 surface_queue->height,
2031                                 surface_queue->format,
2032                                 data->flags);
2033                 TBM_RETURN_IF_FAIL(surface != NULL);
2034         }
2035
2036         _tbm_surface_queue_attach(surface_queue, surface);
2037         tbm_surface_internal_unref(surface);
2038 }
2039
2040 static const tbm_surface_queue_interface tbm_queue_default_impl = {
2041         NULL,                           /*__tbm_queue_default_init*/
2042         NULL,                           /*__tbm_queue_default_reset*/
2043         __tbm_queue_default_destroy,
2044         __tbm_queue_default_need_attach,
2045         NULL,                           /*__tbm_queue_default_enqueue*/
2046         NULL,                           /*__tbm_queue_default_release*/
2047         NULL,                           /*__tbm_queue_default_dequeue*/
2048         NULL,                           /*__tbm_queue_default_acquire*/
2049         NULL,                           /*__tbm_queue_default_need_detach*/
2050 };
2051
2052 tbm_surface_queue_h
2053 tbm_surface_queue_create(int queue_size, int width,
2054                          int height, int format, int flags)
2055 {
2056         _tbm_surf_queue_mutex_lock();
2057         _tbm_set_last_result(TBM_ERROR_NONE);
2058
2059         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2060         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2061         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2062         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2063
2064         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2065                                             sizeof(struct _tbm_surface_queue));
2066         if (!surface_queue) {
2067                 TBM_ERR("cannot allocate the surface_queue.\n");
2068                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2069                 _tbm_surf_queue_mutex_unlock();
2070                 return NULL;
2071         }
2072
2073         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2074
2075         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
2076                                   sizeof(tbm_queue_default));
2077         if (data == NULL) {
2078                 TBM_ERR("cannot allocate the tbm_queue_default.\n");
2079                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2080                 free(surface_queue);
2081                 _tbm_surf_queue_mutex_unlock();
2082                 return NULL;
2083         }
2084
2085         data->flags = flags;
2086         _tbm_surface_queue_init(surface_queue,
2087                                 queue_size,
2088                                 width, height, format,
2089                                 &tbm_queue_default_impl, data);
2090
2091         _tbm_surf_queue_mutex_unlock();
2092
2093         return surface_queue;
2094 }
2095
2096 typedef struct {
2097         int flags;
2098         queue dequeue_list;
2099 } tbm_queue_sequence;
2100
2101 static void
2102 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2103 {
2104         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2105
2106         _queue_init(&data->dequeue_list);
2107 }
2108
2109 static void
2110 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2111 {
2112         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2113
2114         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2115                 return;
2116
2117         _queue_init(&data->dequeue_list);
2118 }
2119
2120 static void
2121 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2122 {
2123         free(surface_queue->impl_data);
2124 }
2125
2126 static void
2127 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2128 {
2129         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2130         tbm_surface_h surface;
2131
2132         if (surface_queue->queue_size == surface_queue->num_attached)
2133                 return;
2134
2135         if (surface_queue->alloc_cb) {
2136                 pthread_mutex_unlock(&surface_queue->lock);
2137                 _tbm_surf_queue_mutex_unlock();
2138                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2139                 _tbm_surf_queue_mutex_lock();
2140                 pthread_mutex_lock(&surface_queue->lock);
2141
2142                 /* silent return */
2143                 if (!surface)
2144                         return;
2145
2146                 tbm_surface_internal_ref(surface);
2147         } else {
2148                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2149                                 surface_queue->height,
2150                                 surface_queue->format,
2151                                 data->flags);
2152                 TBM_RETURN_IF_FAIL(surface != NULL);
2153         }
2154
2155         _tbm_surface_queue_attach(surface_queue, surface);
2156         tbm_surface_internal_unref(surface);
2157 }
2158
2159 static void
2160 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2161                              queue_node *node)
2162 {
2163         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2164         queue_node *first = NULL;
2165
2166         first = container_of(data->dequeue_list.head.next, first, item_link);
2167         if (first != node) {
2168                 return;
2169         }
2170
2171         node->priv_flags = 0;
2172
2173         _queue_node_pop(&data->dequeue_list, node);
2174         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2175 }
2176
2177 static void
2178 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2179                                 queue_node *node)
2180 {
2181         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2182
2183         if (node->priv_flags) {
2184                 node->priv_flags = 0;
2185                 _queue_node_pop(&data->dequeue_list, node);
2186         }
2187
2188         _tbm_surface_queue_release(surface_queue, node, 1);
2189 }
2190
2191 static queue_node *
2192 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2193                              surface_queue)
2194 {
2195         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2196         queue_node *node;
2197
2198         node = _tbm_surface_queue_dequeue(surface_queue);
2199         if (node) {
2200                 _queue_node_push_back(&data->dequeue_list, node);
2201                 node->priv_flags = 1;
2202         }
2203
2204         return node;
2205 }
2206
2207 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2208         __tbm_queue_sequence_init,
2209         __tbm_queue_sequence_reset,
2210         __tbm_queue_sequence_destroy,
2211         __tbm_queue_sequence_need_attach,
2212         __tbm_queue_sequence_enqueue,
2213         __tbm_queue_sequence_release,
2214         __tbm_queue_sequence_dequeue,
2215         NULL,                                   /*__tbm_queue_sequence_acquire*/
2216         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2217 };
2218
2219 tbm_surface_queue_h
2220 tbm_surface_queue_sequence_create(int queue_size, int width,
2221                                   int height, int format, int flags)
2222 {
2223         _tbm_surf_queue_mutex_lock();
2224         _tbm_set_last_result(TBM_ERROR_NONE);
2225
2226         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2227         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2228         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2229         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2230
2231         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2232                                             sizeof(struct _tbm_surface_queue));
2233         if (surface_queue == NULL) {
2234                 TBM_ERR("cannot allocate the surface_queue.\n");
2235                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2236                 _tbm_surf_queue_mutex_unlock();
2237                 return NULL;
2238         }
2239
2240         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2241
2242         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2243                                    sizeof(tbm_queue_sequence));
2244         if (data == NULL) {
2245                 TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
2246                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2247                 free(surface_queue);
2248                 _tbm_surf_queue_mutex_unlock();
2249                 return NULL;
2250         }
2251
2252         data->flags = flags;
2253         _tbm_surface_queue_init(surface_queue,
2254                                 queue_size,
2255                                 width, height, format,
2256                                 &tbm_queue_sequence_impl, data);
2257
2258         _tbm_surf_queue_mutex_unlock();
2259
2260         return surface_queue;
2261 }
2262
2263 tbm_surface_queue_error_e
2264 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2265                                   int modes)
2266 {
2267         _tbm_surf_queue_mutex_lock();
2268         _tbm_set_last_result(TBM_ERROR_NONE);
2269
2270         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2271                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2272
2273         pthread_mutex_lock(&surface_queue->lock);
2274
2275         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2276                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2277         else
2278                 surface_queue->modes |= modes;
2279
2280         pthread_mutex_unlock(&surface_queue->lock);
2281
2282         _tbm_surf_queue_mutex_unlock();
2283
2284         return TBM_SURFACE_QUEUE_ERROR_NONE;
2285 }
2286
2287 tbm_surface_queue_error_e
2288 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2289                                   unsigned int sync_count)
2290 {
2291         int dequeue_num, enqueue_num;
2292
2293         _tbm_surf_queue_mutex_lock();
2294         _tbm_set_last_result(TBM_ERROR_NONE);
2295
2296         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2297                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2298
2299         pthread_mutex_lock(&surface_queue->lock);
2300
2301         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2302         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2303
2304         if (dequeue_num + sync_count == 0)
2305                 surface_queue->acquire_sync_count = enqueue_num;
2306         else
2307                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2308
2309         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2310                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2311
2312         pthread_mutex_unlock(&surface_queue->lock);
2313
2314         _tbm_surf_queue_mutex_unlock();
2315
2316         return TBM_SURFACE_QUEUE_ERROR_NONE;
2317 }