introduce tbm_surface_queue_can_dequeue_wait_timeout
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 static tbm_bufmgr g_surf_queue_bufmgr;
42 static pthread_mutex_t tbm_surf_queue_lock;
43 void _tbm_surface_queue_mutex_unlock(void);
44
45 /* check condition */
46 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
47         if (!(cond)) {\
48                 TBM_ERR("'%s' failed.\n", #cond);\
49                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
50                 _tbm_surf_queue_mutex_unlock();\
51                 return;\
52         } \
53 }
54
55 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
56         if (!(cond)) {\
57                 TBM_ERR("'%s' failed.\n", #cond);\
58                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
59                 _tbm_surf_queue_mutex_unlock();\
60                 return val;\
61         } \
62 }
63
64 typedef enum _queue_node_type {
65         QUEUE_NODE_TYPE_NONE,
66         QUEUE_NODE_TYPE_DEQUEUE,
67         QUEUE_NODE_TYPE_ENQUEUE,
68         QUEUE_NODE_TYPE_ACQUIRE,
69         QUEUE_NODE_TYPE_RELEASE
70 } Queue_Node_Type;
71
72 typedef struct {
73         struct list_head head;
74         int count;
75 } queue;
76
77 typedef struct {
78         tbm_surface_h surface;
79
80         struct list_head item_link;
81         struct list_head link;
82
83         Queue_Node_Type type;
84
85         unsigned int priv_flags;        /*for each queue*/
86
87         int delete_pending;
88 } queue_node;
89
90 typedef struct {
91         struct list_head link;
92
93         tbm_surface_queue_notify_cb cb;
94         void *data;
95 } queue_notify;
96
97 typedef struct {
98         struct list_head link;
99
100         tbm_surface_queue_trace_cb cb;
101         void *data;
102 } queue_trace;
103
104 typedef struct _tbm_surface_queue_interface {
105         void (*init)(tbm_surface_queue_h queue);
106         void (*reset)(tbm_surface_queue_h queue);
107         void (*destroy)(tbm_surface_queue_h queue);
108         void (*need_attach)(tbm_surface_queue_h queue);
109
110         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
111         void (*release)(tbm_surface_queue_h queue, queue_node *node);
112         queue_node *(*dequeue)(tbm_surface_queue_h queue);
113         queue_node *(*acquire)(tbm_surface_queue_h queue);
114         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
115 } tbm_surface_queue_interface;
116
117 struct _tbm_surface_queue {
118         int width;
119         int height;
120         int format;
121         int queue_size;
122         int num_attached;
123
124         queue free_queue;
125         queue dirty_queue;
126         struct list_head list;
127
128         struct list_head destory_noti;
129         struct list_head dequeuable_noti;
130         struct list_head dequeue_noti;
131         struct list_head can_dequeue_noti;
132         struct list_head acquirable_noti;
133         struct list_head reset_noti;
134         struct list_head trace_noti;
135
136         pthread_mutex_t lock;
137         pthread_cond_t free_cond;
138         pthread_cond_t dirty_cond;
139
140         const tbm_surface_queue_interface *impl;
141         void *impl_data;
142
143         //For external buffer allocation
144         tbm_surface_alloc_cb alloc_cb;
145         tbm_surface_free_cb free_cb;
146         void *alloc_cb_data;
147
148         struct list_head item_link; /* link of surface queue */
149
150         int modes;
151         unsigned int enqueue_sync_count;
152         unsigned int acquire_sync_count;
153 };
154
155 static bool
156 _tbm_surf_queue_mutex_init(void)
157 {
158         static bool tbm_surf_queue_mutex_init = false;
159
160         if (tbm_surf_queue_mutex_init)
161                 return true;
162
163         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
164                 TBM_ERR("fail: pthread_mutex_init\n");
165                 return false;
166         }
167
168         tbm_surf_queue_mutex_init = true;
169
170         return true;
171 }
172
173 static void
174 _tbm_surf_queue_mutex_lock(void)
175 {
176         if (!_tbm_surf_queue_mutex_init()) {
177                 TBM_ERR("fail: _tbm_surf_queue_mutex_init\n");
178                 return;
179         }
180
181         pthread_mutex_lock(&tbm_surf_queue_lock);
182 }
183
184 static void
185 _tbm_surf_queue_mutex_unlock(void)
186 {
187         pthread_mutex_unlock(&tbm_surf_queue_lock);
188 }
189
190 static void
191 _init_tbm_surf_queue_bufmgr(void)
192 {
193         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
194 }
195
196 static void
197 _deinit_tbm_surf_queue_bufmgr(void)
198 {
199         if (!g_surf_queue_bufmgr)
200                 return;
201
202         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
203         g_surf_queue_bufmgr = NULL;
204 }
205
206 static int
207 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
208 {
209         tbm_surface_queue_h old_data = NULL;
210
211         if (surface_queue == NULL) {
212                 TBM_ERR("error: surface_queue is NULL.\n");
213                 return 0;
214         }
215
216         if (g_surf_queue_bufmgr == NULL) {
217                 TBM_ERR("error: g_surf_queue_bufmgr is NULL.\n");
218                 return 0;
219         }
220
221         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
222                 TBM_ERR("error: surf_queue_list is empty\n");
223                 return 0;
224         }
225
226         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
227                                 item_link) {
228                 if (old_data == surface_queue) {
229                         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
230                         return 1;
231                 }
232         }
233
234         TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
235
236         return 0;
237 }
238
239 static queue_node *
240 _queue_node_create(void)
241 {
242         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
243
244         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
245
246         return node;
247 }
248
249 static void
250 _queue_node_delete(queue_node *node)
251 {
252         LIST_DEL(&node->item_link);
253         LIST_DEL(&node->link);
254         free(node);
255 }
256
257 static int
258 _queue_is_empty(queue *queue)
259 {
260         if (LIST_IS_EMPTY(&queue->head))
261                 return 1;
262
263         return 0;
264 }
265
266 static void
267 _queue_node_push_back(queue *queue, queue_node *node)
268 {
269         LIST_ADDTAIL(&node->item_link, &queue->head);
270         queue->count++;
271 }
272
273 static void
274 _queue_node_push_front(queue *queue, queue_node *node)
275 {
276         LIST_ADD(&node->item_link, &queue->head);
277         queue->count++;
278 }
279
280 static queue_node *
281 _queue_node_pop_front(queue *queue)
282 {
283         queue_node *node;
284
285         if (!queue->head.next) return NULL;
286         if (!queue->count) return NULL;
287
288         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
289
290         LIST_DELINIT(&node->item_link);
291         queue->count--;
292
293         return node;
294 }
295
296 static queue_node *
297 _queue_node_pop(queue *queue, queue_node *node)
298 {
299         LIST_DELINIT(&node->item_link);
300         queue->count--;
301
302         return node;
303 }
304
305 static queue_node *
306 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
307                 tbm_surface_h surface, int *out_type)
308 {
309         queue_node *node = NULL;
310
311         if (type == 0)
312                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
313         if (out_type)
314                 *out_type = 0;
315
316         if (type & FREE_QUEUE) {
317                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
318                                          item_link) {
319                         if (node->surface == surface) {
320                                 if (out_type)
321                                         *out_type = FREE_QUEUE;
322
323                                 return node;
324                         }
325                 }
326         }
327
328         if (type & DIRTY_QUEUE) {
329                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
330                                          item_link) {
331                         if (node->surface == surface) {
332                                 if (out_type)
333                                         *out_type = DIRTY_QUEUE;
334
335                                 return node;
336                         }
337                 }
338         }
339
340         if (type & NODE_LIST) {
341                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
342                         if (node->surface == surface) {
343                                 if (out_type)
344                                         *out_type = NODE_LIST;
345
346                                 return node;
347                         }
348                 }
349         }
350
351         TBM_ERR("fail to get the queue_node.\n");
352
353         return NULL;
354 }
355
356 static void
357 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
358 {
359         if (node->surface) {
360                 if (surface_queue->free_cb) {
361                         surface_queue->free_cb(surface_queue,
362                                         surface_queue->alloc_cb_data,
363                                         node->surface);
364                 }
365
366                 tbm_surface_destroy(node->surface);
367         }
368
369         _queue_node_delete(node);
370 }
371
372 static void
373 _queue_init(queue *queue)
374 {
375         LIST_INITHEAD(&queue->head);
376
377         queue->count = 0;
378 }
379
380 static void
381 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
382             void *data)
383 {
384         TBM_RETURN_IF_FAIL(cb != NULL);
385
386         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
387
388         TBM_RETURN_IF_FAIL(item != NULL);
389
390         LIST_INITHEAD(&item->link);
391         item->cb = cb;
392         item->data = data;
393
394         LIST_ADDTAIL(&item->link, list);
395 }
396
397 static void
398 _notify_remove(struct list_head *list,
399                tbm_surface_queue_notify_cb cb, void *data)
400 {
401         queue_notify *item = NULL, *tmp;
402
403         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
404                 if (item->cb == cb && item->data == data) {
405                         LIST_DEL(&item->link);
406                         free(item);
407                         return;
408                 }
409         }
410
411         TBM_ERR("Cannot find notifiy\n");
412 }
413
414 static void
415 _notify_remove_all(struct list_head *list)
416 {
417         queue_notify *item = NULL, *tmp;
418
419         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
420                 LIST_DEL(&item->link);
421                 free(item);
422         }
423 }
424
425 static void
426 _notify_emit(tbm_surface_queue_h surface_queue,
427              struct list_head *list)
428 {
429         queue_notify *item = NULL, *tmp;;
430
431         /*
432                 The item->cb is the outside function of the libtbm.
433                 The tbm user may/can remove the item of the list,
434                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
435         */
436         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
437                 item->cb(surface_queue, item->data);
438 }
439
440 static void
441 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
442             void *data)
443 {
444         TBM_RETURN_IF_FAIL(cb != NULL);
445
446         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
447
448         TBM_RETURN_IF_FAIL(item != NULL);
449
450         LIST_INITHEAD(&item->link);
451         item->cb = cb;
452         item->data = data;
453
454         LIST_ADDTAIL(&item->link, list);
455 }
456
457 static void
458 _trace_remove(struct list_head *list,
459                tbm_surface_queue_trace_cb cb, void *data)
460 {
461         queue_trace *item = NULL, *tmp;
462
463         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
464                 if (item->cb == cb && item->data == data) {
465                         LIST_DEL(&item->link);
466                         free(item);
467                         return;
468                 }
469         }
470
471         TBM_ERR("Cannot find notifiy\n");
472 }
473
474 static void
475 _trace_remove_all(struct list_head *list)
476 {
477         queue_trace *item = NULL, *tmp;
478
479         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
480                 LIST_DEL(&item->link);
481                 free(item);
482         }
483 }
484
485 static void
486 _trace_emit(tbm_surface_queue_h surface_queue,
487              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
488 {
489         queue_trace *item = NULL, *tmp;;
490
491         /*
492                 The item->cb is the outside function of the libtbm.
493                 The tbm user may/can remove the item of the list,
494                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
495         */
496         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
497                 item->cb(surface_queue, surface, trace, item->data);
498 }
499
500 static int
501 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
502 {
503         queue_node *node = NULL;
504         int count = 0;
505
506         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
507                 if (node->type == type)
508                         count++;
509         }
510
511         return count;
512 }
513
514 static void
515 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
516                           tbm_surface_h surface)
517 {
518         queue_node *node;
519
520         node = _queue_node_create();
521         TBM_RETURN_IF_FAIL(node != NULL);
522
523         tbm_surface_internal_ref(surface);
524         node->surface = surface;
525
526         LIST_ADDTAIL(&node->link, &surface_queue->list);
527         surface_queue->num_attached++;
528         _queue_node_push_back(&surface_queue->free_queue, node);
529 }
530
531 static void
532 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
533                           tbm_surface_h surface)
534 {
535         queue_node *node;
536         int queue_type;
537
538         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
539         if (node) {
540                 _queue_delete_node(surface_queue, node);
541                 surface_queue->num_attached--;
542         }
543 }
544
545 static void
546 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
547                            queue_node *node, int push_back)
548 {
549         if (push_back)
550                 _queue_node_push_back(&surface_queue->dirty_queue, node);
551         else
552                 _queue_node_push_front(&surface_queue->dirty_queue, node);
553 }
554
555 static queue_node *
556 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
557 {
558         queue_node *node;
559
560         node = _queue_node_pop_front(&surface_queue->free_queue);
561
562         return node;
563 }
564
565 static queue_node *
566 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
567 {
568         queue_node *node;
569
570         if (_queue_is_empty(&surface_queue->dirty_queue))
571                 return NULL;
572
573         node = _queue_node_pop_front(&surface_queue->dirty_queue);
574
575         return node;
576 }
577
578 static void
579 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
580                            queue_node *node, int push_back)
581 {
582         if (push_back)
583                 _queue_node_push_back(&surface_queue->free_queue, node);
584         else
585                 _queue_node_push_front(&surface_queue->free_queue, node);
586 }
587
588 static void
589 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
590                         int queue_size,
591                         int width, int height, int format,
592                         const tbm_surface_queue_interface *impl, void *data)
593 {
594         TBM_RETURN_IF_FAIL(surface_queue != NULL);
595         TBM_RETURN_IF_FAIL(impl != NULL);
596
597         if (!g_surf_queue_bufmgr)
598                 _init_tbm_surf_queue_bufmgr();
599
600         pthread_mutex_init(&surface_queue->lock, NULL);
601         pthread_cond_init(&surface_queue->free_cond, NULL);
602         pthread_cond_init(&surface_queue->dirty_cond, NULL);
603
604         surface_queue->queue_size = queue_size;
605         surface_queue->width = width;
606         surface_queue->height = height;
607         surface_queue->format = format;
608         surface_queue->impl = impl;
609         surface_queue->impl_data = data;
610         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
611
612         _queue_init(&surface_queue->free_queue);
613         _queue_init(&surface_queue->dirty_queue);
614         LIST_INITHEAD(&surface_queue->list);
615
616         LIST_INITHEAD(&surface_queue->destory_noti);
617         LIST_INITHEAD(&surface_queue->dequeuable_noti);
618         LIST_INITHEAD(&surface_queue->dequeue_noti);
619         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
620         LIST_INITHEAD(&surface_queue->acquirable_noti);
621         LIST_INITHEAD(&surface_queue->reset_noti);
622         LIST_INITHEAD(&surface_queue->trace_noti);
623
624         if (surface_queue->impl && surface_queue->impl->init)
625                 surface_queue->impl->init(surface_queue);
626
627         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
628 }
629
630 tbm_surface_queue_error_e
631 tbm_surface_queue_add_destroy_cb(
632         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
633         void *data)
634 {
635         _tbm_surf_queue_mutex_lock();
636         _tbm_set_last_result(TBM_ERROR_NONE);
637
638         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
639                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
640         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
641                                TBM_ERROR_INVALID_PARAMETER);
642
643         pthread_mutex_lock(&surface_queue->lock);
644
645         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
646
647         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
648
649         pthread_mutex_unlock(&surface_queue->lock);
650
651         _tbm_surf_queue_mutex_unlock();
652
653         return TBM_SURFACE_QUEUE_ERROR_NONE;
654 }
655
656 tbm_surface_queue_error_e
657 tbm_surface_queue_remove_destroy_cb(
658         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
659         void *data)
660 {
661         _tbm_surf_queue_mutex_lock();
662         _tbm_set_last_result(TBM_ERROR_NONE);
663
664         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
665                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
666
667         pthread_mutex_lock(&surface_queue->lock);
668
669         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
670
671         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
672
673         pthread_mutex_unlock(&surface_queue->lock);
674
675         _tbm_surf_queue_mutex_unlock();
676
677         return TBM_SURFACE_QUEUE_ERROR_NONE;
678 }
679
680 tbm_surface_queue_error_e
681 tbm_surface_queue_add_dequeuable_cb(
682         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
683         void *data)
684 {
685         _tbm_surf_queue_mutex_lock();
686         _tbm_set_last_result(TBM_ERROR_NONE);
687
688         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
689                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
690         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
691                                TBM_ERROR_INVALID_PARAMETER);
692
693         pthread_mutex_lock(&surface_queue->lock);
694
695         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
696
697         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
698
699         pthread_mutex_unlock(&surface_queue->lock);
700
701         _tbm_surf_queue_mutex_unlock();
702
703         return TBM_SURFACE_QUEUE_ERROR_NONE;
704 }
705
706 tbm_surface_queue_error_e
707 tbm_surface_queue_remove_dequeuable_cb(
708         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
709         void *data)
710 {
711         _tbm_surf_queue_mutex_lock();
712         _tbm_set_last_result(TBM_ERROR_NONE);
713
714         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
715                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
716
717         pthread_mutex_lock(&surface_queue->lock);
718
719         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
720
721         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
722
723         pthread_mutex_unlock(&surface_queue->lock);
724
725         _tbm_surf_queue_mutex_unlock();
726
727         return TBM_SURFACE_QUEUE_ERROR_NONE;
728 }
729
730 tbm_surface_queue_error_e
731 tbm_surface_queue_add_dequeue_cb(
732         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
733         void *data)
734 {
735         _tbm_surf_queue_mutex_lock();
736         _tbm_set_last_result(TBM_ERROR_NONE);
737
738         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
739                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
740         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
741                                TBM_ERROR_INVALID_PARAMETER);
742
743         pthread_mutex_lock(&surface_queue->lock);
744
745         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
746
747         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
748
749         pthread_mutex_unlock(&surface_queue->lock);
750
751         _tbm_surf_queue_mutex_unlock();
752
753         return TBM_SURFACE_QUEUE_ERROR_NONE;
754 }
755
756 tbm_surface_queue_error_e
757 tbm_surface_queue_remove_dequeue_cb(
758         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
759         void *data)
760 {
761         _tbm_surf_queue_mutex_lock();
762         _tbm_set_last_result(TBM_ERROR_NONE);
763
764         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
765                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
766
767         pthread_mutex_lock(&surface_queue->lock);
768
769         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
770
771         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
772
773         pthread_mutex_unlock(&surface_queue->lock);
774
775         _tbm_surf_queue_mutex_unlock();
776
777         return TBM_SURFACE_QUEUE_ERROR_NONE;
778 }
779
780 tbm_surface_queue_error_e
781 tbm_surface_queue_add_can_dequeue_cb(
782         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
783         void *data)
784 {
785         _tbm_surf_queue_mutex_lock();
786         _tbm_set_last_result(TBM_ERROR_NONE);
787
788         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
789                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
790         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
791                                TBM_ERROR_INVALID_PARAMETER);
792
793         pthread_mutex_lock(&surface_queue->lock);
794
795         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
796
797         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
798
799         pthread_mutex_unlock(&surface_queue->lock);
800
801         _tbm_surf_queue_mutex_unlock();
802
803         return TBM_SURFACE_QUEUE_ERROR_NONE;
804 }
805
806 tbm_surface_queue_error_e
807 tbm_surface_queue_remove_can_dequeue_cb(
808         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
809         void *data)
810 {
811         _tbm_surf_queue_mutex_lock();
812         _tbm_set_last_result(TBM_ERROR_NONE);
813
814         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
815                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
816
817         pthread_mutex_lock(&surface_queue->lock);
818
819         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
820
821         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
822
823         pthread_mutex_unlock(&surface_queue->lock);
824
825         _tbm_surf_queue_mutex_unlock();
826
827         return TBM_SURFACE_QUEUE_ERROR_NONE;
828 }
829
830 tbm_surface_queue_error_e
831 tbm_surface_queue_add_acquirable_cb(
832         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
833         void *data)
834 {
835         _tbm_surf_queue_mutex_lock();
836         _tbm_set_last_result(TBM_ERROR_NONE);
837
838         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
839                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
840         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
841                                TBM_ERROR_INVALID_PARAMETER);
842
843         pthread_mutex_lock(&surface_queue->lock);
844
845         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
846
847         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
848
849         pthread_mutex_unlock(&surface_queue->lock);
850
851         _tbm_surf_queue_mutex_unlock();
852
853         return TBM_SURFACE_QUEUE_ERROR_NONE;
854 }
855
856 tbm_surface_queue_error_e
857 tbm_surface_queue_remove_acquirable_cb(
858         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
859         void *data)
860 {
861         _tbm_surf_queue_mutex_lock();
862         _tbm_set_last_result(TBM_ERROR_NONE);
863
864         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
865                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
866
867         pthread_mutex_lock(&surface_queue->lock);
868
869         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
870
871         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
872
873         pthread_mutex_unlock(&surface_queue->lock);
874
875         _tbm_surf_queue_mutex_unlock();
876
877         return TBM_SURFACE_QUEUE_ERROR_NONE;
878 }
879
880 tbm_surface_queue_error_e
881 tbm_surface_queue_add_trace_cb(
882         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
883         void *data)
884 {
885         _tbm_surf_queue_mutex_lock();
886         _tbm_set_last_result(TBM_ERROR_NONE);
887
888         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
889                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
890         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
891                                TBM_ERROR_INVALID_PARAMETER);
892
893         pthread_mutex_lock(&surface_queue->lock);
894
895         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
896
897         _trace_add(&surface_queue->trace_noti, trace_cb, data);
898
899         pthread_mutex_unlock(&surface_queue->lock);
900
901         _tbm_surf_queue_mutex_unlock();
902
903         return TBM_SURFACE_QUEUE_ERROR_NONE;
904 }
905
906 tbm_surface_queue_error_e
907 tbm_surface_queue_remove_trace_cb(
908         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
909         void *data)
910 {
911         _tbm_surf_queue_mutex_lock();
912         _tbm_set_last_result(TBM_ERROR_NONE);
913
914         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
915                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
916
917         pthread_mutex_lock(&surface_queue->lock);
918
919         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
920
921         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
922
923         pthread_mutex_unlock(&surface_queue->lock);
924
925         _tbm_surf_queue_mutex_unlock();
926
927         return TBM_SURFACE_QUEUE_ERROR_NONE;
928 }
929
930 tbm_surface_queue_error_e
931 tbm_surface_queue_set_alloc_cb(
932         tbm_surface_queue_h surface_queue,
933         tbm_surface_alloc_cb alloc_cb,
934         tbm_surface_free_cb free_cb,
935         void *data)
936 {
937         _tbm_surf_queue_mutex_lock();
938         _tbm_set_last_result(TBM_ERROR_NONE);
939
940         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
941                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
942
943         pthread_mutex_lock(&surface_queue->lock);
944
945         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
946
947         surface_queue->alloc_cb = alloc_cb;
948         surface_queue->free_cb = free_cb;
949         surface_queue->alloc_cb_data = data;
950
951         pthread_mutex_unlock(&surface_queue->lock);
952
953         _tbm_surf_queue_mutex_unlock();
954
955         return TBM_SURFACE_QUEUE_ERROR_NONE;
956 }
957
958 int
959 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
960 {
961         int width;
962
963         _tbm_surf_queue_mutex_lock();
964         _tbm_set_last_result(TBM_ERROR_NONE);
965
966         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
967
968         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
969
970         width = surface_queue->width;
971
972         _tbm_surf_queue_mutex_unlock();
973
974         return width;
975 }
976
977 int
978 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
979 {
980         int height;
981
982         _tbm_surf_queue_mutex_lock();
983         _tbm_set_last_result(TBM_ERROR_NONE);
984
985         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
986
987         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
988
989         height = surface_queue->height;
990
991         _tbm_surf_queue_mutex_unlock();
992
993         return height;
994 }
995
996 int
997 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
998 {
999         int format;
1000
1001         _tbm_surf_queue_mutex_lock();
1002         _tbm_set_last_result(TBM_ERROR_NONE);
1003
1004         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1005
1006         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1007
1008         format = surface_queue->format;
1009
1010         _tbm_surf_queue_mutex_unlock();
1011
1012         return format;
1013 }
1014
1015 int
1016 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1017 {
1018         int queue_size;
1019
1020         _tbm_surf_queue_mutex_lock();
1021         _tbm_set_last_result(TBM_ERROR_NONE);
1022
1023         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1024
1025         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1026
1027         queue_size = surface_queue->queue_size;
1028
1029         _tbm_surf_queue_mutex_unlock();
1030
1031         return queue_size;
1032 }
1033
1034 tbm_surface_queue_error_e
1035 tbm_surface_queue_add_reset_cb(
1036         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1037         void *data)
1038 {
1039         _tbm_surf_queue_mutex_lock();
1040         _tbm_set_last_result(TBM_ERROR_NONE);
1041
1042         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1043                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1044         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
1045                                TBM_ERROR_INVALID_PARAMETER);
1046
1047         pthread_mutex_lock(&surface_queue->lock);
1048
1049         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1050
1051         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1052
1053         pthread_mutex_unlock(&surface_queue->lock);
1054
1055         _tbm_surf_queue_mutex_unlock();
1056
1057         return TBM_SURFACE_QUEUE_ERROR_NONE;
1058 }
1059
1060 tbm_surface_queue_error_e
1061 tbm_surface_queue_remove_reset_cb(
1062         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1063         void *data)
1064 {
1065         _tbm_surf_queue_mutex_lock();
1066         _tbm_set_last_result(TBM_ERROR_NONE);
1067
1068         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1069                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1070
1071         pthread_mutex_lock(&surface_queue->lock);
1072
1073         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1074
1075         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1076
1077         pthread_mutex_unlock(&surface_queue->lock);
1078
1079         _tbm_surf_queue_mutex_unlock();
1080
1081         return TBM_SURFACE_QUEUE_ERROR_NONE;
1082 }
1083
1084 tbm_surface_queue_error_e
1085 tbm_surface_queue_enqueue(tbm_surface_queue_h
1086                           surface_queue, tbm_surface_h surface)
1087 {
1088         queue_node *node;
1089         int queue_type;
1090
1091         _tbm_surf_queue_mutex_lock();
1092         _tbm_set_last_result(TBM_ERROR_NONE);
1093
1094         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1095                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1096         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1097                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1098
1099         if (b_dump_queue)
1100                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1101
1102         pthread_mutex_lock(&surface_queue->lock);
1103
1104         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1105
1106         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1107         if (node == NULL || queue_type != NODE_LIST) {
1108                 TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1109                         node, queue_type);
1110                 pthread_mutex_unlock(&surface_queue->lock);
1111
1112                 _tbm_surf_queue_mutex_unlock();
1113
1114                 if (!node) {
1115                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1116                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1117                 } else {
1118                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1119                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1120                 }
1121         }
1122
1123         if (surface_queue->impl && surface_queue->impl->enqueue)
1124                 surface_queue->impl->enqueue(surface_queue, node);
1125         else
1126                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1127
1128         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1129                 TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1130                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE);
1131                 pthread_mutex_unlock(&surface_queue->lock);
1132
1133                 _tbm_surf_queue_mutex_unlock();
1134                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE;
1135         }
1136
1137         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1138
1139         if (surface_queue->enqueue_sync_count == 1) {
1140                 tbm_surface_info_s info;
1141                 int ret;
1142
1143                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1144                 if (ret == TBM_SURFACE_ERROR_NONE)
1145                         tbm_surface_unmap(surface);
1146         }
1147
1148         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1149
1150         pthread_mutex_unlock(&surface_queue->lock);
1151         pthread_cond_signal(&surface_queue->dirty_cond);
1152
1153         _tbm_surf_queue_mutex_unlock();
1154
1155         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1156
1157         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1158
1159         return TBM_SURFACE_QUEUE_ERROR_NONE;
1160 }
1161
1162 tbm_surface_queue_error_e
1163 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1164                           surface_queue, tbm_surface_h surface)
1165 {
1166         queue_node *node;
1167         int queue_type;
1168
1169         _tbm_surf_queue_mutex_lock();
1170         _tbm_set_last_result(TBM_ERROR_NONE);
1171
1172         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1173                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1174         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1175                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1176
1177         pthread_mutex_lock(&surface_queue->lock);
1178
1179         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1180
1181         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1182         if (node == NULL || queue_type != NODE_LIST) {
1183                 TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1184                         node, queue_type);
1185                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1186                 pthread_mutex_unlock(&surface_queue->lock);
1187
1188                 _tbm_surf_queue_mutex_unlock();
1189                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1190         }
1191
1192         if (node->delete_pending) {
1193                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1194
1195                 _queue_delete_node(surface_queue, node);
1196
1197                 pthread_mutex_unlock(&surface_queue->lock);
1198
1199                 _tbm_surf_queue_mutex_unlock();
1200
1201                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1202
1203                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1204         }
1205
1206         if (surface_queue->queue_size < surface_queue->num_attached) {
1207                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1208
1209                 if (surface_queue->impl && surface_queue->impl->need_detach)
1210                         surface_queue->impl->need_detach(surface_queue, node);
1211                 else
1212                         _tbm_surface_queue_detach(surface_queue, surface);
1213
1214                 pthread_mutex_unlock(&surface_queue->lock);
1215
1216                 _tbm_surf_queue_mutex_unlock();
1217
1218                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1219
1220                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1221         }
1222
1223         if (surface_queue->impl && surface_queue->impl->release)
1224                 surface_queue->impl->release(surface_queue, node);
1225         else
1226                 _tbm_surface_queue_release(surface_queue, node, 1);
1227
1228         if (_queue_is_empty(&surface_queue->free_queue)) {
1229                 TBM_ERR("surface_queue->free_queue is empty.\n");
1230                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1231                 pthread_mutex_unlock(&surface_queue->lock);
1232
1233                 _tbm_surf_queue_mutex_unlock();
1234                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1235         }
1236
1237         node->type = QUEUE_NODE_TYPE_RELEASE;
1238
1239         pthread_mutex_unlock(&surface_queue->lock);
1240         pthread_cond_signal(&surface_queue->free_cond);
1241
1242         _tbm_surf_queue_mutex_unlock();
1243
1244         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1245
1246         return TBM_SURFACE_QUEUE_ERROR_NONE;
1247 }
1248
1249 tbm_surface_queue_error_e
1250 tbm_surface_queue_dequeue(tbm_surface_queue_h
1251                           surface_queue, tbm_surface_h *surface)
1252 {
1253         queue_node *node;
1254
1255         _tbm_surf_queue_mutex_lock();
1256         _tbm_set_last_result(TBM_ERROR_NONE);
1257
1258         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1259                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1260         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1261                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1262
1263         *surface = NULL;
1264
1265         pthread_mutex_lock(&surface_queue->lock);
1266
1267         if (_queue_is_empty(&surface_queue->free_queue)) {
1268                 if (surface_queue->impl && surface_queue->impl->need_attach)
1269                         surface_queue->impl->need_attach(surface_queue);
1270
1271                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1272                         TBM_ERR("surface_queue:%p is invalid", surface_queue);
1273                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1274                         pthread_mutex_unlock(&surface_queue->lock);
1275                         _tbm_surf_queue_mutex_unlock();
1276                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1277                 }
1278         }
1279
1280         if (surface_queue->impl && surface_queue->impl->dequeue)
1281                 node = surface_queue->impl->dequeue(surface_queue);
1282         else
1283                 node = _tbm_surface_queue_dequeue(surface_queue);
1284
1285         if (node == NULL || node->surface == NULL) {
1286                 TBM_ERR("_queue_node_pop_front failed\n");
1287                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1288                 pthread_mutex_unlock(&surface_queue->lock);
1289
1290                 _tbm_surf_queue_mutex_unlock();
1291                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1292         }
1293
1294         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1295         *surface = node->surface;
1296
1297         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1298
1299         pthread_mutex_unlock(&surface_queue->lock);
1300
1301         _tbm_surf_queue_mutex_unlock();
1302
1303         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1304
1305         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1306
1307         return TBM_SURFACE_QUEUE_ERROR_NONE;
1308 }
1309
1310 tbm_surface_queue_error_e
1311 tbm_surface_queue_can_dequeue_wait_timeout(tbm_surface_queue_h surface_queue, int ms_timeout)
1312 {
1313         int ret;
1314         struct timespec tp;
1315
1316         _tbm_surf_queue_mutex_lock();
1317         _tbm_set_last_result(TBM_ERROR_NONE);
1318
1319         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1320                                                                           TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1321
1322         _tbm_surf_queue_mutex_unlock();
1323
1324         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1325
1326         _tbm_surf_queue_mutex_lock();
1327
1328         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1329                                                                           TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1330
1331         pthread_mutex_lock(&surface_queue->lock);
1332
1333         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1334
1335         if (_queue_is_empty(&surface_queue->free_queue)) {
1336                 if (surface_queue->impl && surface_queue->impl->need_attach)
1337                         surface_queue->impl->need_attach(surface_queue);
1338
1339                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1340                         TBM_ERR("surface_queue:%p is invalid", surface_queue);
1341                         pthread_mutex_unlock(&surface_queue->lock);
1342                         _tbm_surf_queue_mutex_unlock();
1343                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1344                 }
1345         }
1346
1347         if (!_queue_is_empty(&surface_queue->free_queue)) {
1348                 pthread_mutex_unlock(&surface_queue->lock);
1349                 _tbm_surf_queue_mutex_unlock();
1350                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1351         }
1352
1353         _tbm_surf_queue_mutex_unlock();
1354
1355         while (1) {
1356                 clock_gettime(CLOCK_REALTIME, &tp);
1357
1358                 if (ms_timeout > 1000)
1359                         tp.tv_sec += ms_timeout / 1000;
1360
1361                 tp.tv_nsec += (ms_timeout % 1000) * 1000000;
1362
1363                 ret = pthread_cond_timedwait(&surface_queue->free_cond, &surface_queue->lock, &tp);
1364                 if (ret) {
1365                         if (ret == ETIMEDOUT) {
1366                                 TBM_ERR("surface_queue:%p can dequeue wait timeout", surface_queue);
1367                                 pthread_mutex_unlock(&surface_queue->lock);
1368                                 return TBM_SURFACE_QUEUE_ERROR_TIMEOUT;
1369                         } else {
1370                                 TBM_INFO("surface_queue:%p timedwait error retry wait", surface_queue);
1371                         }
1372                 }
1373                 else {
1374                         pthread_mutex_unlock(&surface_queue->lock);
1375                         return TBM_SURFACE_QUEUE_ERROR_NONE;
1376                 }
1377         }
1378 }
1379
1380 int
1381 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1382 {
1383         _tbm_surf_queue_mutex_lock();
1384         _tbm_set_last_result(TBM_ERROR_NONE);
1385
1386         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1387
1388         _tbm_surf_queue_mutex_unlock();
1389
1390         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1391
1392         _tbm_surf_queue_mutex_lock();
1393
1394         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1395
1396         pthread_mutex_lock(&surface_queue->lock);
1397
1398         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1399
1400         if (_queue_is_empty(&surface_queue->free_queue)) {
1401                 if (surface_queue->impl && surface_queue->impl->need_attach)
1402                         surface_queue->impl->need_attach(surface_queue);
1403
1404                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1405                         TBM_ERR("surface_queue:%p is invalid", surface_queue);
1406                         pthread_mutex_unlock(&surface_queue->lock);
1407                         _tbm_surf_queue_mutex_unlock();
1408                         return 0;
1409                 }
1410         }
1411
1412         if (!_queue_is_empty(&surface_queue->free_queue)) {
1413                 pthread_mutex_unlock(&surface_queue->lock);
1414                 _tbm_surf_queue_mutex_unlock();
1415                 return 1;
1416         }
1417
1418         if (wait) {
1419                 _tbm_surf_queue_mutex_unlock();
1420                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1421                 pthread_mutex_unlock(&surface_queue->lock);
1422                 return 1;
1423         }
1424
1425         pthread_mutex_unlock(&surface_queue->lock);
1426         _tbm_surf_queue_mutex_unlock();
1427         return 0;
1428 }
1429
1430 tbm_surface_queue_error_e
1431 tbm_surface_queue_release(tbm_surface_queue_h
1432                           surface_queue, tbm_surface_h surface)
1433 {
1434         queue_node *node;
1435         int queue_type;
1436
1437         _tbm_surf_queue_mutex_lock();
1438         _tbm_set_last_result(TBM_ERROR_NONE);
1439
1440         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1441                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1442         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1443                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1444
1445         pthread_mutex_lock(&surface_queue->lock);
1446
1447         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1448
1449         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1450         if (node == NULL || queue_type != NODE_LIST) {
1451                 TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1452                         node, queue_type);
1453                 pthread_mutex_unlock(&surface_queue->lock);
1454
1455                 _tbm_surf_queue_mutex_unlock();
1456
1457                 if (!node) {
1458                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1459                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1460                 } else {
1461                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1462                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1463                 }
1464         }
1465
1466         if (node->delete_pending) {
1467                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1468
1469                 _queue_delete_node(surface_queue, node);
1470
1471                 pthread_mutex_unlock(&surface_queue->lock);
1472
1473                 _tbm_surf_queue_mutex_unlock();
1474
1475                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1476
1477                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1478         }
1479
1480         if (surface_queue->queue_size < surface_queue->num_attached) {
1481                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1482
1483                 if (surface_queue->impl && surface_queue->impl->need_detach)
1484                         surface_queue->impl->need_detach(surface_queue, node);
1485                 else
1486                         _tbm_surface_queue_detach(surface_queue, surface);
1487
1488                 pthread_mutex_unlock(&surface_queue->lock);
1489
1490                 _tbm_surf_queue_mutex_unlock();
1491
1492                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1493
1494                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1495         }
1496
1497         if (surface_queue->impl && surface_queue->impl->release)
1498                 surface_queue->impl->release(surface_queue, node);
1499         else
1500                 _tbm_surface_queue_release(surface_queue, node, 1);
1501
1502         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1503                 TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
1504                 _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
1505                 pthread_mutex_unlock(&surface_queue->lock);
1506
1507                 _tbm_surf_queue_mutex_unlock();
1508                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1509         }
1510
1511         node->type = QUEUE_NODE_TYPE_RELEASE;
1512
1513         pthread_mutex_unlock(&surface_queue->lock);
1514         pthread_cond_signal(&surface_queue->free_cond);
1515
1516         _tbm_surf_queue_mutex_unlock();
1517
1518         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1519
1520         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1521
1522         return TBM_SURFACE_QUEUE_ERROR_NONE;
1523 }
1524
1525 tbm_surface_queue_error_e
1526 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1527                         surface_queue, tbm_surface_h surface)
1528 {
1529         queue_node *node;
1530         int queue_type;
1531
1532         _tbm_surf_queue_mutex_lock();
1533         _tbm_set_last_result(TBM_ERROR_NONE);
1534
1535         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1536                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1537         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1538                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1539
1540         pthread_mutex_lock(&surface_queue->lock);
1541
1542         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1543
1544         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1545         if (node == NULL || queue_type != NODE_LIST) {
1546                 TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1547                         node, queue_type);
1548                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1549                 pthread_mutex_unlock(&surface_queue->lock);
1550
1551                 _tbm_surf_queue_mutex_unlock();
1552                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1553         }
1554
1555         if (surface_queue->impl && surface_queue->impl->enqueue)
1556                 surface_queue->impl->enqueue(surface_queue, node);
1557         else
1558                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1559
1560         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1561                 TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
1562                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1563                 pthread_mutex_unlock(&surface_queue->lock);
1564
1565                 _tbm_surf_queue_mutex_unlock();
1566                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1567         }
1568
1569         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1570
1571         pthread_mutex_unlock(&surface_queue->lock);
1572         pthread_cond_signal(&surface_queue->dirty_cond);
1573
1574         _tbm_surf_queue_mutex_unlock();
1575
1576         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1577
1578         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1579
1580         return TBM_SURFACE_QUEUE_ERROR_NONE;
1581 }
1582
1583 tbm_surface_queue_error_e
1584 tbm_surface_queue_acquire(tbm_surface_queue_h
1585                           surface_queue, tbm_surface_h *surface)
1586 {
1587         queue_node *node;
1588
1589         _tbm_surf_queue_mutex_lock();
1590         _tbm_set_last_result(TBM_ERROR_NONE);
1591
1592         *surface = NULL;
1593
1594         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1595                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1596         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1597                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1598
1599         pthread_mutex_lock(&surface_queue->lock);
1600
1601         if (surface_queue->impl && surface_queue->impl->acquire)
1602                 node = surface_queue->impl->acquire(surface_queue);
1603         else
1604                 node = _tbm_surface_queue_acquire(surface_queue);
1605
1606         if (node == NULL || node->surface == NULL) {
1607                 TBM_ERR("_queue_node_pop_front failed\n");
1608                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1609                 pthread_mutex_unlock(&surface_queue->lock);
1610
1611                 _tbm_surf_queue_mutex_unlock();
1612                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1613         }
1614
1615         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1616
1617         *surface = node->surface;
1618
1619         if (surface_queue->acquire_sync_count == 1) {
1620                 tbm_surface_info_s info;
1621                 int ret;
1622
1623                 TBM_ERR("start map surface:%p", *surface);
1624                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1625                 TBM_ERR("end map surface:%p", *surface);
1626                 if (ret == TBM_SURFACE_ERROR_NONE)
1627                         tbm_surface_unmap(*surface);
1628         }
1629
1630         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1631
1632         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1633
1634         pthread_mutex_unlock(&surface_queue->lock);
1635
1636         _tbm_surf_queue_mutex_unlock();
1637
1638         if (b_dump_queue)
1639                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1640
1641         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1642
1643         return TBM_SURFACE_QUEUE_ERROR_NONE;
1644 }
1645
1646 int
1647 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1648 {
1649         _tbm_surf_queue_mutex_lock();
1650         _tbm_set_last_result(TBM_ERROR_NONE);
1651
1652         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1653
1654         pthread_mutex_lock(&surface_queue->lock);
1655
1656         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1657
1658         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1659                 pthread_mutex_unlock(&surface_queue->lock);
1660                 _tbm_surf_queue_mutex_unlock();
1661                 return 1;
1662         }
1663
1664         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1665                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1666                 _tbm_surf_queue_mutex_unlock();
1667                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1668                 pthread_mutex_unlock(&surface_queue->lock);
1669                 return 1;
1670         }
1671
1672         pthread_mutex_unlock(&surface_queue->lock);
1673         _tbm_surf_queue_mutex_unlock();
1674         return 0;
1675 }
1676
1677 void
1678 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1679 {
1680         queue_node *node = NULL, *tmp;
1681
1682         _tbm_surf_queue_mutex_lock();
1683         _tbm_set_last_result(TBM_ERROR_NONE);
1684
1685         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1686
1687         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1688
1689         LIST_DEL(&surface_queue->item_link);
1690
1691         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1692                 _queue_delete_node(surface_queue, node);
1693
1694         if (surface_queue->impl && surface_queue->impl->destroy)
1695                 surface_queue->impl->destroy(surface_queue);
1696
1697         _notify_emit(surface_queue, &surface_queue->destory_noti);
1698
1699         _notify_remove_all(&surface_queue->destory_noti);
1700         _notify_remove_all(&surface_queue->dequeuable_noti);
1701         _notify_remove_all(&surface_queue->dequeue_noti);
1702         _notify_remove_all(&surface_queue->can_dequeue_noti);
1703         _notify_remove_all(&surface_queue->acquirable_noti);
1704         _notify_remove_all(&surface_queue->reset_noti);
1705         _trace_remove_all(&surface_queue->trace_noti);
1706
1707         pthread_mutex_destroy(&surface_queue->lock);
1708
1709         free(surface_queue);
1710
1711         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1712                 _deinit_tbm_surf_queue_bufmgr();
1713
1714         _tbm_surf_queue_mutex_unlock();
1715 }
1716
1717 tbm_surface_queue_error_e
1718 tbm_surface_queue_reset(tbm_surface_queue_h
1719                         surface_queue, int width, int height, int format)
1720 {
1721         queue_node *node = NULL, *tmp;
1722
1723         _tbm_surf_queue_mutex_lock();
1724         _tbm_set_last_result(TBM_ERROR_NONE);
1725
1726         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1727                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1728
1729         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1730
1731         if (width == surface_queue->width && height == surface_queue->height &&
1732                 format == surface_queue->format) {
1733                 _tbm_surf_queue_mutex_unlock();
1734                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1735         }
1736
1737         pthread_mutex_lock(&surface_queue->lock);
1738
1739         surface_queue->width = width;
1740         surface_queue->height = height;
1741         surface_queue->format = format;
1742
1743         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1744                 /* Destory surface and Push to free_queue */
1745                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1746                         _queue_delete_node(surface_queue, node);
1747
1748                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1749                         node->delete_pending = 1;
1750         } else {
1751                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1752                         _queue_delete_node(surface_queue, node);
1753
1754                 _queue_init(&surface_queue->dirty_queue);
1755                 LIST_INITHEAD(&surface_queue->list);
1756         }
1757
1758         /* Reset queue */
1759         _queue_init(&surface_queue->free_queue);
1760
1761         surface_queue->num_attached = 0;
1762
1763         if (surface_queue->impl && surface_queue->impl->reset)
1764                 surface_queue->impl->reset(surface_queue);
1765
1766         pthread_mutex_unlock(&surface_queue->lock);
1767         pthread_cond_signal(&surface_queue->free_cond);
1768
1769         _tbm_surf_queue_mutex_unlock();
1770
1771         _notify_emit(surface_queue, &surface_queue->reset_noti);
1772
1773         return TBM_SURFACE_QUEUE_ERROR_NONE;
1774 }
1775
1776 tbm_surface_queue_error_e
1777 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1778 {
1779         _tbm_surf_queue_mutex_lock();
1780         _tbm_set_last_result(TBM_ERROR_NONE);
1781
1782         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1783                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1784
1785         _tbm_surf_queue_mutex_unlock();
1786
1787         _notify_emit(surface_queue, &surface_queue->reset_noti);
1788
1789         return TBM_SURFACE_QUEUE_ERROR_NONE;
1790 }
1791
1792 tbm_surface_queue_error_e
1793 tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
1794 {
1795         _tbm_surf_queue_mutex_lock();
1796         _tbm_set_last_result(TBM_ERROR_NONE);
1797
1798         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1799                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1800
1801         pthread_mutex_lock(&surface_queue->lock);
1802         pthread_mutex_unlock(&surface_queue->lock);
1803         pthread_cond_signal(&surface_queue->free_cond);
1804
1805         _tbm_surf_queue_mutex_unlock();
1806
1807         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1808
1809         return TBM_SURFACE_QUEUE_ERROR_NONE;
1810 }
1811
1812 tbm_surface_queue_error_e
1813 tbm_surface_queue_set_size(tbm_surface_queue_h
1814                         surface_queue, int queue_size, int flush)
1815 {
1816         queue_node *node = NULL, *tmp;
1817
1818         _tbm_surf_queue_mutex_lock();
1819         _tbm_set_last_result(TBM_ERROR_NONE);
1820
1821         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1822                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1823         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1824                                         TBM_ERROR_INVALID_PARAMETER);
1825
1826         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1827
1828         if ((surface_queue->queue_size == queue_size) && !flush) {
1829                 _tbm_surf_queue_mutex_unlock();
1830                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1831         }
1832
1833         pthread_mutex_lock(&surface_queue->lock);
1834
1835         if (flush) {
1836                 surface_queue->queue_size = queue_size;
1837
1838                 if (surface_queue->num_attached == 0) {
1839                         pthread_mutex_unlock(&surface_queue->lock);
1840                         _tbm_surf_queue_mutex_unlock();
1841                         return TBM_SURFACE_QUEUE_ERROR_NONE;
1842                 }
1843
1844                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1845                         /* Destory surface and Push to free_queue */
1846                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1847                                 _queue_delete_node(surface_queue, node);
1848
1849                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1850                                 node->delete_pending = 1;
1851                 } else {
1852                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1853                                 _queue_delete_node(surface_queue, node);
1854
1855                         _queue_init(&surface_queue->dirty_queue);
1856                         LIST_INITHEAD(&surface_queue->list);
1857                 }
1858
1859                 /* Reset queue */
1860                 _queue_init(&surface_queue->free_queue);
1861
1862                 surface_queue->num_attached = 0;
1863
1864                 if (surface_queue->impl && surface_queue->impl->reset)
1865                         surface_queue->impl->reset(surface_queue);
1866
1867                 pthread_mutex_unlock(&surface_queue->lock);
1868                 pthread_cond_signal(&surface_queue->free_cond);
1869
1870                 _tbm_surf_queue_mutex_unlock();
1871
1872                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1873
1874                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1875         } else {
1876                 if (surface_queue->queue_size > queue_size) {
1877                         int need_del = surface_queue->queue_size - queue_size;
1878
1879                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1880                                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1881
1882                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1883                                         surface_queue->impl->need_detach(surface_queue, node);
1884                                 else
1885                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1886
1887                                 need_del--;
1888                                 if (need_del == 0)
1889                                         break;
1890                         }
1891                 }
1892
1893                 surface_queue->queue_size = queue_size;
1894
1895                 pthread_mutex_unlock(&surface_queue->lock);
1896
1897                 _tbm_surf_queue_mutex_unlock();
1898
1899                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1900         }
1901 }
1902
1903 tbm_surface_queue_error_e
1904 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1905 {
1906         queue_node *node = NULL;
1907
1908         _tbm_surf_queue_mutex_lock();
1909         _tbm_set_last_result(TBM_ERROR_NONE);
1910
1911         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1912                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1913
1914         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1915
1916         if (surface_queue->num_attached == 0) {
1917                 _tbm_surf_queue_mutex_unlock();
1918                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1919         }
1920
1921         pthread_mutex_lock(&surface_queue->lock);
1922
1923         /* Destory surface in free_queue */
1924         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1925                 if (surface_queue->impl && surface_queue->impl->need_detach)
1926                         surface_queue->impl->need_detach(surface_queue, node);
1927                 else
1928                         _tbm_surface_queue_detach(surface_queue, node->surface);
1929         }
1930
1931         /* Reset queue */
1932         _queue_init(&surface_queue->free_queue);
1933
1934         pthread_mutex_unlock(&surface_queue->lock);
1935         _tbm_surf_queue_mutex_unlock();
1936
1937         return TBM_SURFACE_QUEUE_ERROR_NONE;
1938 }
1939
1940 tbm_surface_queue_error_e
1941 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1942 {
1943         queue_node *node = NULL, *tmp;
1944
1945         _tbm_surf_queue_mutex_lock();
1946         _tbm_set_last_result(TBM_ERROR_NONE);
1947
1948         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1949                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1950
1951         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1952
1953         if (surface_queue->num_attached == 0) {
1954                 _tbm_surf_queue_mutex_unlock();
1955                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1956         }
1957
1958         pthread_mutex_lock(&surface_queue->lock);
1959
1960         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1961                 /* Destory surface and Push to free_queue */
1962                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1963                         _queue_delete_node(surface_queue, node);
1964
1965                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1966                         node->delete_pending = 1;
1967         } else {
1968                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1969                         _queue_delete_node(surface_queue, node);
1970
1971                 _queue_init(&surface_queue->dirty_queue);
1972                 LIST_INITHEAD(&surface_queue->list);
1973         }
1974
1975         /* Reset queue */
1976         _queue_init(&surface_queue->free_queue);
1977
1978         surface_queue->num_attached = 0;
1979
1980         if (surface_queue->impl && surface_queue->impl->reset)
1981                 surface_queue->impl->reset(surface_queue);
1982
1983         pthread_mutex_unlock(&surface_queue->lock);
1984         pthread_cond_signal(&surface_queue->free_cond);
1985
1986         _tbm_surf_queue_mutex_unlock();
1987
1988         _notify_emit(surface_queue, &surface_queue->reset_noti);
1989
1990         return TBM_SURFACE_QUEUE_ERROR_NONE;
1991 }
1992
1993 tbm_surface_queue_error_e
1994 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1995                         tbm_surface_h *surfaces, int *num)
1996 {
1997         queue_node *node = NULL;
1998
1999         _tbm_surf_queue_mutex_lock();
2000         _tbm_set_last_result(TBM_ERROR_NONE);
2001
2002         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2003                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2004         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2005                                TBM_ERROR_INVALID_PARAMETER);
2006
2007         *num = 0;
2008
2009         pthread_mutex_lock(&surface_queue->lock);
2010
2011         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
2012                 if (node->delete_pending) continue;
2013
2014                 if (surfaces)
2015                         surfaces[*num] = node->surface;
2016
2017                 *num = *num + 1;
2018         }
2019
2020         pthread_mutex_unlock(&surface_queue->lock);
2021
2022         _tbm_surf_queue_mutex_unlock();
2023
2024         return TBM_SURFACE_QUEUE_ERROR_NONE;
2025 }
2026
2027 tbm_surface_queue_error_e
2028 tbm_surface_queue_get_acquirable_surfaces(tbm_surface_queue_h surface_queue,
2029                         tbm_surface_h *surfaces, int *num)
2030 {
2031         queue_node *node = NULL;
2032
2033         _tbm_surf_queue_mutex_lock();
2034
2035         *num = 0;
2036
2037         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2038                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2039         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2040                                TBM_ERROR_INVALID_PARAMETER);
2041
2042         pthread_mutex_lock(&surface_queue->lock);
2043
2044         LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head, item_link) {
2045                 if (surfaces)
2046                         surfaces[*num] = node->surface;
2047
2048                 *num = *num + 1;
2049         }
2050
2051         pthread_mutex_unlock(&surface_queue->lock);
2052
2053         _tbm_surf_queue_mutex_unlock();
2054
2055         return TBM_SURFACE_QUEUE_ERROR_NONE;
2056 }
2057
2058 tbm_surface_queue_error_e
2059 tbm_surface_queue_get_trace_surface_num(
2060                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
2061 {
2062         _tbm_surf_queue_mutex_lock();
2063         _tbm_set_last_result(TBM_ERROR_NONE);
2064
2065         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2066                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2067         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2068                                TBM_ERROR_INVALID_PARAMETER);
2069
2070         *num = 0;
2071
2072         pthread_mutex_lock(&surface_queue->lock);
2073
2074         switch (trace) {
2075         case TBM_SURFACE_QUEUE_TRACE_NONE:
2076                 *num = 0;
2077                 break;
2078         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
2079                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2080                 break;
2081         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
2082                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2083                 break;
2084         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
2085                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
2086                 break;
2087         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
2088                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
2089                 break;
2090         default:
2091                 break;
2092         }
2093
2094         pthread_mutex_unlock(&surface_queue->lock);
2095
2096         _tbm_surf_queue_mutex_unlock();
2097
2098         return TBM_SURFACE_QUEUE_ERROR_NONE;
2099 }
2100
2101 typedef struct {
2102         int flags;
2103 } tbm_queue_default;
2104
2105 static void
2106 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
2107 {
2108         free(surface_queue->impl_data);
2109 }
2110
2111 static void
2112 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
2113 {
2114         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
2115         tbm_surface_h surface;
2116
2117         if (surface_queue->queue_size == surface_queue->num_attached)
2118                 return;
2119
2120         if (surface_queue->alloc_cb) {
2121                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2122
2123                 /* silent return */
2124                 if (!surface)
2125                         return;
2126
2127                 tbm_surface_internal_ref(surface);
2128         } else {
2129                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2130                                 surface_queue->height,
2131                                 surface_queue->format,
2132                                 data->flags);
2133                 TBM_RETURN_IF_FAIL(surface != NULL);
2134         }
2135
2136         _tbm_surface_queue_attach(surface_queue, surface);
2137         tbm_surface_internal_unref(surface);
2138 }
2139
2140 static const tbm_surface_queue_interface tbm_queue_default_impl = {
2141         NULL,                           /*__tbm_queue_default_init*/
2142         NULL,                           /*__tbm_queue_default_reset*/
2143         __tbm_queue_default_destroy,
2144         __tbm_queue_default_need_attach,
2145         NULL,                           /*__tbm_queue_default_enqueue*/
2146         NULL,                           /*__tbm_queue_default_release*/
2147         NULL,                           /*__tbm_queue_default_dequeue*/
2148         NULL,                           /*__tbm_queue_default_acquire*/
2149         NULL,                           /*__tbm_queue_default_need_detach*/
2150 };
2151
2152 tbm_surface_queue_h
2153 tbm_surface_queue_create(int queue_size, int width,
2154                          int height, int format, int flags)
2155 {
2156         _tbm_surf_queue_mutex_lock();
2157         _tbm_set_last_result(TBM_ERROR_NONE);
2158
2159         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2160         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2161         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2162         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2163
2164         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2165                                             sizeof(struct _tbm_surface_queue));
2166         if (!surface_queue) {
2167                 TBM_ERR("cannot allocate the surface_queue.\n");
2168                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2169                 _tbm_surf_queue_mutex_unlock();
2170                 return NULL;
2171         }
2172
2173         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2174
2175         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
2176                                   sizeof(tbm_queue_default));
2177         if (data == NULL) {
2178                 TBM_ERR("cannot allocate the tbm_queue_default.\n");
2179                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2180                 free(surface_queue);
2181                 _tbm_surf_queue_mutex_unlock();
2182                 return NULL;
2183         }
2184
2185         data->flags = flags;
2186         _tbm_surface_queue_init(surface_queue,
2187                                 queue_size,
2188                                 width, height, format,
2189                                 &tbm_queue_default_impl, data);
2190
2191         _tbm_surf_queue_mutex_unlock();
2192
2193         return surface_queue;
2194 }
2195
2196 typedef struct {
2197         int flags;
2198         queue dequeue_list;
2199 } tbm_queue_sequence;
2200
2201 static void
2202 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2203 {
2204         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2205
2206         _queue_init(&data->dequeue_list);
2207 }
2208
2209 static void
2210 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2211 {
2212         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2213
2214         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2215                 return;
2216
2217         _queue_init(&data->dequeue_list);
2218 }
2219
2220 static void
2221 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2222 {
2223         free(surface_queue->impl_data);
2224 }
2225
2226 static void
2227 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2228 {
2229         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2230         tbm_surface_h surface;
2231
2232         if (surface_queue->queue_size == surface_queue->num_attached)
2233                 return;
2234
2235         if (surface_queue->alloc_cb) {
2236                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2237
2238                 /* silent return */
2239                 if (!surface)
2240                         return;
2241
2242                 tbm_surface_internal_ref(surface);
2243         } else {
2244                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2245                                 surface_queue->height,
2246                                 surface_queue->format,
2247                                 data->flags);
2248                 TBM_RETURN_IF_FAIL(surface != NULL);
2249         }
2250
2251         _tbm_surface_queue_attach(surface_queue, surface);
2252         tbm_surface_internal_unref(surface);
2253 }
2254
2255 static void
2256 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2257                              queue_node *node)
2258 {
2259         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2260         queue_node *first = NULL;
2261
2262         first = container_of(data->dequeue_list.head.next, first, item_link);
2263         if (first != node) {
2264                 return;
2265         }
2266
2267         node->priv_flags = 0;
2268
2269         _queue_node_pop(&data->dequeue_list, node);
2270         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2271 }
2272
2273 static void
2274 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2275                                 queue_node *node)
2276 {
2277         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2278
2279         if (node->priv_flags) {
2280                 node->priv_flags = 0;
2281                 _queue_node_pop(&data->dequeue_list, node);
2282         }
2283
2284         _tbm_surface_queue_release(surface_queue, node, 1);
2285 }
2286
2287 static queue_node *
2288 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2289                              surface_queue)
2290 {
2291         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2292         queue_node *node;
2293
2294         node = _tbm_surface_queue_dequeue(surface_queue);
2295         if (node) {
2296                 _queue_node_push_back(&data->dequeue_list, node);
2297                 node->priv_flags = 1;
2298         }
2299
2300         return node;
2301 }
2302
2303 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2304         __tbm_queue_sequence_init,
2305         __tbm_queue_sequence_reset,
2306         __tbm_queue_sequence_destroy,
2307         __tbm_queue_sequence_need_attach,
2308         __tbm_queue_sequence_enqueue,
2309         __tbm_queue_sequence_release,
2310         __tbm_queue_sequence_dequeue,
2311         NULL,                                   /*__tbm_queue_sequence_acquire*/
2312         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2313 };
2314
2315 tbm_surface_queue_h
2316 tbm_surface_queue_sequence_create(int queue_size, int width,
2317                                   int height, int format, int flags)
2318 {
2319         _tbm_surf_queue_mutex_lock();
2320         _tbm_set_last_result(TBM_ERROR_NONE);
2321
2322         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2323         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2324         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2325         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2326
2327         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2328                                             sizeof(struct _tbm_surface_queue));
2329         if (surface_queue == NULL) {
2330                 TBM_ERR("cannot allocate the surface_queue.\n");
2331                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2332                 _tbm_surf_queue_mutex_unlock();
2333                 return NULL;
2334         }
2335
2336         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2337
2338         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2339                                    sizeof(tbm_queue_sequence));
2340         if (data == NULL) {
2341                 TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
2342                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2343                 free(surface_queue);
2344                 _tbm_surf_queue_mutex_unlock();
2345                 return NULL;
2346         }
2347
2348         data->flags = flags;
2349         _tbm_surface_queue_init(surface_queue,
2350                                 queue_size,
2351                                 width, height, format,
2352                                 &tbm_queue_sequence_impl, data);
2353
2354         _tbm_surf_queue_mutex_unlock();
2355
2356         return surface_queue;
2357 }
2358
2359 tbm_surface_queue_error_e
2360 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2361                                   int modes)
2362 {
2363         _tbm_surf_queue_mutex_lock();
2364         _tbm_set_last_result(TBM_ERROR_NONE);
2365
2366         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2367                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2368
2369         pthread_mutex_lock(&surface_queue->lock);
2370
2371         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2372                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2373         else
2374                 surface_queue->modes |= modes;
2375
2376         pthread_mutex_unlock(&surface_queue->lock);
2377
2378         _tbm_surf_queue_mutex_unlock();
2379
2380         return TBM_SURFACE_QUEUE_ERROR_NONE;
2381 }
2382
2383 tbm_surface_queue_error_e
2384 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2385                                   unsigned int sync_count)
2386 {
2387         int dequeue_num, enqueue_num;
2388
2389         _tbm_surf_queue_mutex_lock();
2390         _tbm_set_last_result(TBM_ERROR_NONE);
2391
2392         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2393                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2394
2395         pthread_mutex_lock(&surface_queue->lock);
2396
2397         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2398         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2399
2400         if (dequeue_num + sync_count == 0)
2401                 surface_queue->acquire_sync_count = enqueue_num;
2402         else
2403                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2404
2405         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2406                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2407
2408         pthread_mutex_unlock(&surface_queue->lock);
2409
2410         _tbm_surf_queue_mutex_unlock();
2411
2412         return TBM_SURFACE_QUEUE_ERROR_NONE;
2413 }