Fix tiled format issue.
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36 #include <time.h>
37
38 #define FREE_QUEUE      1
39 #define DIRTY_QUEUE     2
40 #define NODE_LIST       4
41
42 static tbm_bufmgr g_surf_queue_bufmgr;
43 static pthread_mutex_t tbm_surf_queue_lock;
44 void _tbm_surface_queue_mutex_unlock(void);
45
46 /* check condition */
47 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
48         if (!(cond)) {\
49                 TBM_ERR("'%s' failed.\n", #cond);\
50                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
51                 _tbm_surf_queue_mutex_unlock();\
52                 return;\
53         } \
54 }
55
56 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
57         if (!(cond)) {\
58                 TBM_ERR("'%s' failed.\n", #cond);\
59                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
60                 _tbm_surf_queue_mutex_unlock();\
61                 return val;\
62         } \
63 }
64
65 typedef enum _queue_node_type {
66         QUEUE_NODE_TYPE_NONE,
67         QUEUE_NODE_TYPE_DEQUEUE,
68         QUEUE_NODE_TYPE_ENQUEUE,
69         QUEUE_NODE_TYPE_ACQUIRE,
70         QUEUE_NODE_TYPE_RELEASE
71 } Queue_Node_Type;
72
73 typedef struct {
74         struct list_head head;
75         int count;
76 } queue;
77
78 typedef struct {
79         tbm_surface_h surface;
80
81         struct list_head item_link;
82         struct list_head link;
83
84         Queue_Node_Type type;
85
86         unsigned int priv_flags;        /*for each queue*/
87
88         int delete_pending;
89 } queue_node;
90
91 typedef struct {
92         struct list_head link;
93
94         tbm_surface_queue_notify_cb cb;
95         void *data;
96 } queue_notify;
97
98 typedef struct {
99         struct list_head link;
100
101         tbm_surface_queue_trace_cb cb;
102         void *data;
103 } queue_trace;
104
105 typedef struct _tbm_surface_queue_interface {
106         void (*init)(tbm_surface_queue_h queue);
107         void (*reset)(tbm_surface_queue_h queue);
108         void (*destroy)(tbm_surface_queue_h queue);
109         void (*need_attach)(tbm_surface_queue_h queue);
110
111         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
112         void (*release)(tbm_surface_queue_h queue, queue_node *node);
113         queue_node *(*dequeue)(tbm_surface_queue_h queue);
114         queue_node *(*acquire)(tbm_surface_queue_h queue);
115         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
116 } tbm_surface_queue_interface;
117
118 struct _tbm_surface_queue {
119         int width;
120         int height;
121         int format;
122         int queue_size;
123         int num_attached;
124
125         queue free_queue;
126         queue dirty_queue;
127         struct list_head list;
128
129         struct list_head destory_noti;
130         struct list_head dequeuable_noti;
131         struct list_head dequeue_noti;
132         struct list_head can_dequeue_noti;
133         struct list_head acquirable_noti;
134         struct list_head reset_noti;
135         struct list_head trace_noti;
136
137         pthread_mutex_t lock;
138         pthread_cond_t free_cond;
139         pthread_cond_t dirty_cond;
140
141         const tbm_surface_queue_interface *impl;
142         void *impl_data;
143
144         //For external buffer allocation
145         tbm_surface_alloc_cb alloc_cb;
146         tbm_surface_free_cb free_cb;
147         void *alloc_cb_data;
148
149         struct list_head item_link; /* link of surface queue */
150
151         int modes;
152         unsigned int enqueue_sync_count;
153         unsigned int acquire_sync_count;
154 };
155
156 static bool
157 _tbm_surf_queue_mutex_init(void)
158 {
159         static bool tbm_surf_queue_mutex_init = false;
160
161         if (tbm_surf_queue_mutex_init)
162                 return true;
163
164         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
165                 TBM_ERR("fail: pthread_mutex_init\n");
166                 return false;
167         }
168
169         tbm_surf_queue_mutex_init = true;
170
171         return true;
172 }
173
174 static void
175 _tbm_surf_queue_mutex_lock(void)
176 {
177         if (!_tbm_surf_queue_mutex_init()) {
178                 TBM_ERR("fail: _tbm_surf_queue_mutex_init\n");
179                 return;
180         }
181
182         pthread_mutex_lock(&tbm_surf_queue_lock);
183 }
184
185 static void
186 _tbm_surf_queue_mutex_unlock(void)
187 {
188         pthread_mutex_unlock(&tbm_surf_queue_lock);
189 }
190
191 static void
192 _init_tbm_surf_queue_bufmgr(void)
193 {
194         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
195 }
196
197 static void
198 _deinit_tbm_surf_queue_bufmgr(void)
199 {
200         if (!g_surf_queue_bufmgr)
201                 return;
202
203         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
204         g_surf_queue_bufmgr = NULL;
205 }
206
207 static int
208 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
209 {
210         tbm_surface_queue_h old_data = NULL;
211
212         if (surface_queue == NULL) {
213                 TBM_ERR("error: surface_queue is NULL.\n");
214                 return 0;
215         }
216
217         if (g_surf_queue_bufmgr == NULL) {
218                 TBM_ERR("error: g_surf_queue_bufmgr is NULL.\n");
219                 return 0;
220         }
221
222         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
223                 TBM_ERR("error: surf_queue_list is empty\n");
224                 return 0;
225         }
226
227         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
228                                 item_link) {
229                 if (old_data == surface_queue) {
230                         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
231                         return 1;
232                 }
233         }
234
235         TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
236
237         return 0;
238 }
239
240 static queue_node *
241 _queue_node_create(void)
242 {
243         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
244
245         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
246
247         return node;
248 }
249
250 static void
251 _queue_node_delete(queue_node *node)
252 {
253         LIST_DEL(&node->item_link);
254         LIST_DEL(&node->link);
255         free(node);
256 }
257
258 static int
259 _queue_is_empty(queue *queue)
260 {
261         if (LIST_IS_EMPTY(&queue->head))
262                 return 1;
263
264         return 0;
265 }
266
267 static void
268 _queue_node_push_back(queue *queue, queue_node *node)
269 {
270         LIST_ADDTAIL(&node->item_link, &queue->head);
271         queue->count++;
272 }
273
274 static void
275 _queue_node_push_front(queue *queue, queue_node *node)
276 {
277         LIST_ADD(&node->item_link, &queue->head);
278         queue->count++;
279 }
280
281 static queue_node *
282 _queue_node_pop_front(queue *queue)
283 {
284         queue_node *node;
285
286         if (!queue->head.next) return NULL;
287         if (!queue->count) return NULL;
288
289         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
290
291         LIST_DELINIT(&node->item_link);
292         queue->count--;
293
294         return node;
295 }
296
297 static queue_node *
298 _queue_node_pop(queue *queue, queue_node *node)
299 {
300         LIST_DELINIT(&node->item_link);
301         queue->count--;
302
303         return node;
304 }
305
306 static queue_node *
307 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
308                 tbm_surface_h surface, int *out_type)
309 {
310         queue_node *node = NULL;
311
312         if (type == 0)
313                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
314         if (out_type)
315                 *out_type = 0;
316
317         if (type & FREE_QUEUE) {
318                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
319                                          item_link) {
320                         if (node->surface == surface) {
321                                 if (out_type)
322                                         *out_type = FREE_QUEUE;
323
324                                 return node;
325                         }
326                 }
327         }
328
329         if (type & DIRTY_QUEUE) {
330                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
331                                          item_link) {
332                         if (node->surface == surface) {
333                                 if (out_type)
334                                         *out_type = DIRTY_QUEUE;
335
336                                 return node;
337                         }
338                 }
339         }
340
341         if (type & NODE_LIST) {
342                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
343                         if (node->surface == surface) {
344                                 if (out_type)
345                                         *out_type = NODE_LIST;
346
347                                 return node;
348                         }
349                 }
350         }
351
352         TBM_ERR("fail to get the queue_node.\n");
353
354         return NULL;
355 }
356
357 static void
358 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
359 {
360         if (node->surface) {
361                 if (surface_queue->free_cb) {
362                         surface_queue->free_cb(surface_queue,
363                                         surface_queue->alloc_cb_data,
364                                         node->surface);
365                 }
366
367                 tbm_surface_destroy(node->surface);
368         }
369
370         _queue_node_delete(node);
371 }
372
373 static void
374 _queue_init(queue *queue)
375 {
376         LIST_INITHEAD(&queue->head);
377
378         queue->count = 0;
379 }
380
381 static void
382 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
383             void *data)
384 {
385         TBM_RETURN_IF_FAIL(cb != NULL);
386
387         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
388
389         TBM_RETURN_IF_FAIL(item != NULL);
390
391         LIST_INITHEAD(&item->link);
392         item->cb = cb;
393         item->data = data;
394
395         LIST_ADDTAIL(&item->link, list);
396 }
397
398 static void
399 _notify_remove(struct list_head *list,
400                tbm_surface_queue_notify_cb cb, void *data)
401 {
402         queue_notify *item = NULL, *tmp;
403
404         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
405                 if (item->cb == cb && item->data == data) {
406                         LIST_DEL(&item->link);
407                         free(item);
408                         return;
409                 }
410         }
411
412         TBM_ERR("Cannot find notifiy\n");
413 }
414
415 static void
416 _notify_remove_all(struct list_head *list)
417 {
418         queue_notify *item = NULL, *tmp;
419
420         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
421                 LIST_DEL(&item->link);
422                 free(item);
423         }
424 }
425
426 static void
427 _notify_emit(tbm_surface_queue_h surface_queue,
428              struct list_head *list)
429 {
430         queue_notify *item = NULL, *tmp;;
431
432         /*
433                 The item->cb is the outside function of the libtbm.
434                 The tbm user may/can remove the item of the list,
435                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
436         */
437         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
438                 item->cb(surface_queue, item->data);
439 }
440
441 static void
442 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
443             void *data)
444 {
445         TBM_RETURN_IF_FAIL(cb != NULL);
446
447         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
448
449         TBM_RETURN_IF_FAIL(item != NULL);
450
451         LIST_INITHEAD(&item->link);
452         item->cb = cb;
453         item->data = data;
454
455         LIST_ADDTAIL(&item->link, list);
456 }
457
458 static void
459 _trace_remove(struct list_head *list,
460                tbm_surface_queue_trace_cb cb, void *data)
461 {
462         queue_trace *item = NULL, *tmp;
463
464         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
465                 if (item->cb == cb && item->data == data) {
466                         LIST_DEL(&item->link);
467                         free(item);
468                         return;
469                 }
470         }
471
472         TBM_ERR("Cannot find notifiy\n");
473 }
474
475 static void
476 _trace_remove_all(struct list_head *list)
477 {
478         queue_trace *item = NULL, *tmp;
479
480         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
481                 LIST_DEL(&item->link);
482                 free(item);
483         }
484 }
485
486 static void
487 _trace_emit(tbm_surface_queue_h surface_queue,
488              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
489 {
490         queue_trace *item = NULL, *tmp;;
491
492         /*
493                 The item->cb is the outside function of the libtbm.
494                 The tbm user may/can remove the item of the list,
495                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
496         */
497         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
498                 item->cb(surface_queue, surface, trace, item->data);
499 }
500
501 static int
502 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
503 {
504         queue_node *node = NULL;
505         int count = 0;
506
507         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
508                 if (node->type == type)
509                         count++;
510         }
511
512         return count;
513 }
514
515 static void
516 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
517                           tbm_surface_h surface)
518 {
519         queue_node *node;
520
521         node = _queue_node_create();
522         TBM_RETURN_IF_FAIL(node != NULL);
523
524         tbm_surface_internal_ref(surface);
525         node->surface = surface;
526
527         LIST_ADDTAIL(&node->link, &surface_queue->list);
528         surface_queue->num_attached++;
529         _queue_node_push_back(&surface_queue->free_queue, node);
530 }
531
532 static void
533 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
534                           tbm_surface_h surface)
535 {
536         queue_node *node;
537         int queue_type;
538
539         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
540         if (node) {
541                 _queue_delete_node(surface_queue, node);
542                 surface_queue->num_attached--;
543         }
544 }
545
546 static void
547 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
548                            queue_node *node, int push_back)
549 {
550         if (push_back)
551                 _queue_node_push_back(&surface_queue->dirty_queue, node);
552         else
553                 _queue_node_push_front(&surface_queue->dirty_queue, node);
554 }
555
556 static queue_node *
557 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
558 {
559         queue_node *node;
560
561         node = _queue_node_pop_front(&surface_queue->free_queue);
562
563         return node;
564 }
565
566 static queue_node *
567 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
568 {
569         queue_node *node;
570
571         if (_queue_is_empty(&surface_queue->dirty_queue))
572                 return NULL;
573
574         node = _queue_node_pop_front(&surface_queue->dirty_queue);
575
576         return node;
577 }
578
579 static void
580 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
581                            queue_node *node, int push_back)
582 {
583         if (push_back)
584                 _queue_node_push_back(&surface_queue->free_queue, node);
585         else
586                 _queue_node_push_front(&surface_queue->free_queue, node);
587 }
588
589 static void
590 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
591                         int queue_size,
592                         int width, int height, int format,
593                         const tbm_surface_queue_interface *impl, void *data)
594 {
595         pthread_condattr_t free_attr, dirty_attr;
596
597         TBM_RETURN_IF_FAIL(surface_queue != NULL);
598         TBM_RETURN_IF_FAIL(impl != NULL);
599
600         if (!g_surf_queue_bufmgr)
601                 _init_tbm_surf_queue_bufmgr();
602
603         pthread_mutex_init(&surface_queue->lock, NULL);
604
605         pthread_condattr_init(&free_attr);
606         pthread_condattr_setclock(&free_attr, CLOCK_MONOTONIC);
607         pthread_cond_init(&surface_queue->free_cond, &free_attr);
608         pthread_condattr_destroy(&free_attr);
609
610         pthread_condattr_init(&dirty_attr);
611         pthread_condattr_setclock(&dirty_attr, CLOCK_MONOTONIC);
612         pthread_cond_init(&surface_queue->dirty_cond, &dirty_attr);
613         pthread_condattr_destroy(&dirty_attr);
614
615         surface_queue->queue_size = queue_size;
616         surface_queue->width = width;
617         surface_queue->height = height;
618         surface_queue->format = format;
619         surface_queue->impl = impl;
620         surface_queue->impl_data = data;
621         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
622
623         _queue_init(&surface_queue->free_queue);
624         _queue_init(&surface_queue->dirty_queue);
625         LIST_INITHEAD(&surface_queue->list);
626
627         LIST_INITHEAD(&surface_queue->destory_noti);
628         LIST_INITHEAD(&surface_queue->dequeuable_noti);
629         LIST_INITHEAD(&surface_queue->dequeue_noti);
630         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
631         LIST_INITHEAD(&surface_queue->acquirable_noti);
632         LIST_INITHEAD(&surface_queue->reset_noti);
633         LIST_INITHEAD(&surface_queue->trace_noti);
634
635         if (surface_queue->impl && surface_queue->impl->init)
636                 surface_queue->impl->init(surface_queue);
637
638         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
639 }
640
641 tbm_surface_queue_error_e
642 tbm_surface_queue_add_destroy_cb(
643         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
644         void *data)
645 {
646         _tbm_surf_queue_mutex_lock();
647         _tbm_set_last_result(TBM_ERROR_NONE);
648
649         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
650                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
651         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
652                                TBM_ERROR_INVALID_PARAMETER);
653
654         pthread_mutex_lock(&surface_queue->lock);
655
656         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
657
658         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
659
660         pthread_mutex_unlock(&surface_queue->lock);
661
662         _tbm_surf_queue_mutex_unlock();
663
664         return TBM_SURFACE_QUEUE_ERROR_NONE;
665 }
666
667 tbm_surface_queue_error_e
668 tbm_surface_queue_remove_destroy_cb(
669         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
670         void *data)
671 {
672         _tbm_surf_queue_mutex_lock();
673         _tbm_set_last_result(TBM_ERROR_NONE);
674
675         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
676                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
677
678         pthread_mutex_lock(&surface_queue->lock);
679
680         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
681
682         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
683
684         pthread_mutex_unlock(&surface_queue->lock);
685
686         _tbm_surf_queue_mutex_unlock();
687
688         return TBM_SURFACE_QUEUE_ERROR_NONE;
689 }
690
691 tbm_surface_queue_error_e
692 tbm_surface_queue_add_dequeuable_cb(
693         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
694         void *data)
695 {
696         _tbm_surf_queue_mutex_lock();
697         _tbm_set_last_result(TBM_ERROR_NONE);
698
699         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
700                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
701         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
702                                TBM_ERROR_INVALID_PARAMETER);
703
704         pthread_mutex_lock(&surface_queue->lock);
705
706         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
707
708         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
709
710         pthread_mutex_unlock(&surface_queue->lock);
711
712         _tbm_surf_queue_mutex_unlock();
713
714         return TBM_SURFACE_QUEUE_ERROR_NONE;
715 }
716
717 tbm_surface_queue_error_e
718 tbm_surface_queue_remove_dequeuable_cb(
719         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
720         void *data)
721 {
722         _tbm_surf_queue_mutex_lock();
723         _tbm_set_last_result(TBM_ERROR_NONE);
724
725         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
726                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
727
728         pthread_mutex_lock(&surface_queue->lock);
729
730         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
731
732         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
733
734         pthread_mutex_unlock(&surface_queue->lock);
735
736         _tbm_surf_queue_mutex_unlock();
737
738         return TBM_SURFACE_QUEUE_ERROR_NONE;
739 }
740
741 tbm_surface_queue_error_e
742 tbm_surface_queue_add_dequeue_cb(
743         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
744         void *data)
745 {
746         _tbm_surf_queue_mutex_lock();
747         _tbm_set_last_result(TBM_ERROR_NONE);
748
749         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
750                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
751         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
752                                TBM_ERROR_INVALID_PARAMETER);
753
754         pthread_mutex_lock(&surface_queue->lock);
755
756         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
757
758         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
759
760         pthread_mutex_unlock(&surface_queue->lock);
761
762         _tbm_surf_queue_mutex_unlock();
763
764         return TBM_SURFACE_QUEUE_ERROR_NONE;
765 }
766
767 tbm_surface_queue_error_e
768 tbm_surface_queue_remove_dequeue_cb(
769         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
770         void *data)
771 {
772         _tbm_surf_queue_mutex_lock();
773         _tbm_set_last_result(TBM_ERROR_NONE);
774
775         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
776                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
777
778         pthread_mutex_lock(&surface_queue->lock);
779
780         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
781
782         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
783
784         pthread_mutex_unlock(&surface_queue->lock);
785
786         _tbm_surf_queue_mutex_unlock();
787
788         return TBM_SURFACE_QUEUE_ERROR_NONE;
789 }
790
791 tbm_surface_queue_error_e
792 tbm_surface_queue_add_can_dequeue_cb(
793         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
794         void *data)
795 {
796         _tbm_surf_queue_mutex_lock();
797         _tbm_set_last_result(TBM_ERROR_NONE);
798
799         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
800                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
801         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
802                                TBM_ERROR_INVALID_PARAMETER);
803
804         pthread_mutex_lock(&surface_queue->lock);
805
806         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
807
808         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
809
810         pthread_mutex_unlock(&surface_queue->lock);
811
812         _tbm_surf_queue_mutex_unlock();
813
814         return TBM_SURFACE_QUEUE_ERROR_NONE;
815 }
816
817 tbm_surface_queue_error_e
818 tbm_surface_queue_remove_can_dequeue_cb(
819         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
820         void *data)
821 {
822         _tbm_surf_queue_mutex_lock();
823         _tbm_set_last_result(TBM_ERROR_NONE);
824
825         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
826                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
827
828         pthread_mutex_lock(&surface_queue->lock);
829
830         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
831
832         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
833
834         pthread_mutex_unlock(&surface_queue->lock);
835
836         _tbm_surf_queue_mutex_unlock();
837
838         return TBM_SURFACE_QUEUE_ERROR_NONE;
839 }
840
841 tbm_surface_queue_error_e
842 tbm_surface_queue_add_acquirable_cb(
843         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
844         void *data)
845 {
846         _tbm_surf_queue_mutex_lock();
847         _tbm_set_last_result(TBM_ERROR_NONE);
848
849         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
850                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
851         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
852                                TBM_ERROR_INVALID_PARAMETER);
853
854         pthread_mutex_lock(&surface_queue->lock);
855
856         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
857
858         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
859
860         pthread_mutex_unlock(&surface_queue->lock);
861
862         _tbm_surf_queue_mutex_unlock();
863
864         return TBM_SURFACE_QUEUE_ERROR_NONE;
865 }
866
867 tbm_surface_queue_error_e
868 tbm_surface_queue_remove_acquirable_cb(
869         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
870         void *data)
871 {
872         _tbm_surf_queue_mutex_lock();
873         _tbm_set_last_result(TBM_ERROR_NONE);
874
875         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
876                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
877
878         pthread_mutex_lock(&surface_queue->lock);
879
880         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
881
882         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
883
884         pthread_mutex_unlock(&surface_queue->lock);
885
886         _tbm_surf_queue_mutex_unlock();
887
888         return TBM_SURFACE_QUEUE_ERROR_NONE;
889 }
890
891 tbm_surface_queue_error_e
892 tbm_surface_queue_add_trace_cb(
893         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
894         void *data)
895 {
896         _tbm_surf_queue_mutex_lock();
897         _tbm_set_last_result(TBM_ERROR_NONE);
898
899         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
900                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
901         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
902                                TBM_ERROR_INVALID_PARAMETER);
903
904         pthread_mutex_lock(&surface_queue->lock);
905
906         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
907
908         _trace_add(&surface_queue->trace_noti, trace_cb, data);
909
910         pthread_mutex_unlock(&surface_queue->lock);
911
912         _tbm_surf_queue_mutex_unlock();
913
914         return TBM_SURFACE_QUEUE_ERROR_NONE;
915 }
916
917 tbm_surface_queue_error_e
918 tbm_surface_queue_remove_trace_cb(
919         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
920         void *data)
921 {
922         _tbm_surf_queue_mutex_lock();
923         _tbm_set_last_result(TBM_ERROR_NONE);
924
925         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
926                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
927
928         pthread_mutex_lock(&surface_queue->lock);
929
930         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
931
932         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
933
934         pthread_mutex_unlock(&surface_queue->lock);
935
936         _tbm_surf_queue_mutex_unlock();
937
938         return TBM_SURFACE_QUEUE_ERROR_NONE;
939 }
940
941 tbm_surface_queue_error_e
942 tbm_surface_queue_set_alloc_cb(
943         tbm_surface_queue_h surface_queue,
944         tbm_surface_alloc_cb alloc_cb,
945         tbm_surface_free_cb free_cb,
946         void *data)
947 {
948         _tbm_surf_queue_mutex_lock();
949         _tbm_set_last_result(TBM_ERROR_NONE);
950
951         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
952                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
953
954         pthread_mutex_lock(&surface_queue->lock);
955
956         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
957
958         surface_queue->alloc_cb = alloc_cb;
959         surface_queue->free_cb = free_cb;
960         surface_queue->alloc_cb_data = data;
961
962         pthread_mutex_unlock(&surface_queue->lock);
963
964         _tbm_surf_queue_mutex_unlock();
965
966         return TBM_SURFACE_QUEUE_ERROR_NONE;
967 }
968
969 int
970 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
971 {
972         int width;
973
974         _tbm_surf_queue_mutex_lock();
975         _tbm_set_last_result(TBM_ERROR_NONE);
976
977         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
978
979         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
980
981         width = surface_queue->width;
982
983         _tbm_surf_queue_mutex_unlock();
984
985         return width;
986 }
987
988 int
989 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
990 {
991         int height;
992
993         _tbm_surf_queue_mutex_lock();
994         _tbm_set_last_result(TBM_ERROR_NONE);
995
996         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
997
998         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
999
1000         height = surface_queue->height;
1001
1002         _tbm_surf_queue_mutex_unlock();
1003
1004         return height;
1005 }
1006
1007 int
1008 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
1009 {
1010         int format;
1011
1012         _tbm_surf_queue_mutex_lock();
1013         _tbm_set_last_result(TBM_ERROR_NONE);
1014
1015         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1016
1017         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1018
1019         format = surface_queue->format;
1020
1021         _tbm_surf_queue_mutex_unlock();
1022
1023         return format;
1024 }
1025
1026 int
1027 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1028 {
1029         int queue_size;
1030
1031         _tbm_surf_queue_mutex_lock();
1032         _tbm_set_last_result(TBM_ERROR_NONE);
1033
1034         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1035
1036         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1037
1038         queue_size = surface_queue->queue_size;
1039
1040         _tbm_surf_queue_mutex_unlock();
1041
1042         return queue_size;
1043 }
1044
1045 tbm_surface_queue_error_e
1046 tbm_surface_queue_add_reset_cb(
1047         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1048         void *data)
1049 {
1050         _tbm_surf_queue_mutex_lock();
1051         _tbm_set_last_result(TBM_ERROR_NONE);
1052
1053         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1054                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1055         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
1056                                TBM_ERROR_INVALID_PARAMETER);
1057
1058         pthread_mutex_lock(&surface_queue->lock);
1059
1060         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1061
1062         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1063
1064         pthread_mutex_unlock(&surface_queue->lock);
1065
1066         _tbm_surf_queue_mutex_unlock();
1067
1068         return TBM_SURFACE_QUEUE_ERROR_NONE;
1069 }
1070
1071 tbm_surface_queue_error_e
1072 tbm_surface_queue_remove_reset_cb(
1073         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1074         void *data)
1075 {
1076         _tbm_surf_queue_mutex_lock();
1077         _tbm_set_last_result(TBM_ERROR_NONE);
1078
1079         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1080                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1081
1082         pthread_mutex_lock(&surface_queue->lock);
1083
1084         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1085
1086         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1087
1088         pthread_mutex_unlock(&surface_queue->lock);
1089
1090         _tbm_surf_queue_mutex_unlock();
1091
1092         return TBM_SURFACE_QUEUE_ERROR_NONE;
1093 }
1094
1095 tbm_surface_queue_error_e
1096 tbm_surface_queue_enqueue(tbm_surface_queue_h
1097                           surface_queue, tbm_surface_h surface)
1098 {
1099         queue_node *node;
1100         int queue_type;
1101
1102         _tbm_surf_queue_mutex_lock();
1103         _tbm_set_last_result(TBM_ERROR_NONE);
1104
1105         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1106                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1107         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1108                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1109
1110         if (b_dump_queue)
1111                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1112
1113         pthread_mutex_lock(&surface_queue->lock);
1114
1115         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1116
1117         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1118         if (node == NULL || queue_type != NODE_LIST) {
1119                 TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1120                         node, queue_type);
1121                 pthread_mutex_unlock(&surface_queue->lock);
1122
1123                 _tbm_surf_queue_mutex_unlock();
1124
1125                 if (!node) {
1126                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1127                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1128                 } else {
1129                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1130                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1131                 }
1132         }
1133
1134         if (surface_queue->impl && surface_queue->impl->enqueue)
1135                 surface_queue->impl->enqueue(surface_queue, node);
1136         else
1137                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1138
1139         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1140                 TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1141                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE);
1142                 pthread_mutex_unlock(&surface_queue->lock);
1143
1144                 _tbm_surf_queue_mutex_unlock();
1145                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE;
1146         }
1147
1148         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1149
1150         if (surface_queue->enqueue_sync_count == 1) {
1151                 tbm_surface_info_s info;
1152                 int ret;
1153
1154                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1155                 if (ret == TBM_SURFACE_ERROR_NONE)
1156                         tbm_surface_unmap(surface);
1157         }
1158
1159         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1160
1161         pthread_mutex_unlock(&surface_queue->lock);
1162         pthread_cond_signal(&surface_queue->dirty_cond);
1163
1164         _tbm_surf_queue_mutex_unlock();
1165
1166         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1167
1168         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1169
1170         return TBM_SURFACE_QUEUE_ERROR_NONE;
1171 }
1172
1173 tbm_surface_queue_error_e
1174 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1175                           surface_queue, tbm_surface_h surface)
1176 {
1177         queue_node *node;
1178         int queue_type;
1179
1180         _tbm_surf_queue_mutex_lock();
1181         _tbm_set_last_result(TBM_ERROR_NONE);
1182
1183         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1184                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1185         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1186                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1187
1188         pthread_mutex_lock(&surface_queue->lock);
1189
1190         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1191
1192         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1193         if (node == NULL || queue_type != NODE_LIST) {
1194                 TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1195                         node, queue_type);
1196                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1197                 pthread_mutex_unlock(&surface_queue->lock);
1198
1199                 _tbm_surf_queue_mutex_unlock();
1200                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1201         }
1202
1203         if (node->delete_pending) {
1204                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1205
1206                 _queue_delete_node(surface_queue, node);
1207
1208                 pthread_mutex_unlock(&surface_queue->lock);
1209
1210                 _tbm_surf_queue_mutex_unlock();
1211
1212                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1213
1214                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1215         }
1216
1217         if (surface_queue->queue_size < surface_queue->num_attached) {
1218                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1219
1220                 if (surface_queue->impl && surface_queue->impl->need_detach)
1221                         surface_queue->impl->need_detach(surface_queue, node);
1222                 else
1223                         _tbm_surface_queue_detach(surface_queue, surface);
1224
1225                 pthread_mutex_unlock(&surface_queue->lock);
1226
1227                 _tbm_surf_queue_mutex_unlock();
1228
1229                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1230
1231                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1232         }
1233
1234         if (surface_queue->impl && surface_queue->impl->release)
1235                 surface_queue->impl->release(surface_queue, node);
1236         else
1237                 _tbm_surface_queue_release(surface_queue, node, 1);
1238
1239         if (_queue_is_empty(&surface_queue->free_queue)) {
1240                 TBM_ERR("surface_queue->free_queue is empty.\n");
1241                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1242                 pthread_mutex_unlock(&surface_queue->lock);
1243
1244                 _tbm_surf_queue_mutex_unlock();
1245                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1246         }
1247
1248         node->type = QUEUE_NODE_TYPE_RELEASE;
1249
1250         pthread_mutex_unlock(&surface_queue->lock);
1251         pthread_cond_signal(&surface_queue->free_cond);
1252
1253         _tbm_surf_queue_mutex_unlock();
1254
1255         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1256
1257         return TBM_SURFACE_QUEUE_ERROR_NONE;
1258 }
1259
1260 tbm_surface_queue_error_e
1261 tbm_surface_queue_dequeue(tbm_surface_queue_h
1262                           surface_queue, tbm_surface_h *surface)
1263 {
1264         queue_node *node;
1265
1266         _tbm_surf_queue_mutex_lock();
1267         _tbm_set_last_result(TBM_ERROR_NONE);
1268
1269         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1270                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1271         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1272                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1273
1274         *surface = NULL;
1275
1276         pthread_mutex_lock(&surface_queue->lock);
1277
1278         if (_queue_is_empty(&surface_queue->free_queue)) {
1279                 if (surface_queue->impl && surface_queue->impl->need_attach)
1280                         surface_queue->impl->need_attach(surface_queue);
1281         }
1282
1283         if (surface_queue->impl && surface_queue->impl->dequeue)
1284                 node = surface_queue->impl->dequeue(surface_queue);
1285         else
1286                 node = _tbm_surface_queue_dequeue(surface_queue);
1287
1288         if (node == NULL || node->surface == NULL) {
1289                 TBM_ERR("_queue_node_pop_front failed\n");
1290                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1291                 pthread_mutex_unlock(&surface_queue->lock);
1292
1293                 _tbm_surf_queue_mutex_unlock();
1294                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1295         }
1296
1297         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1298         *surface = node->surface;
1299
1300         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1301
1302         pthread_mutex_unlock(&surface_queue->lock);
1303
1304         _tbm_surf_queue_mutex_unlock();
1305
1306         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1307
1308         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1309
1310         return TBM_SURFACE_QUEUE_ERROR_NONE;
1311 }
1312
1313 tbm_surface_queue_error_e
1314 tbm_surface_queue_can_dequeue_wait_timeout(tbm_surface_queue_h surface_queue, int ms_timeout)
1315 {
1316         int ret;
1317         struct timespec tp;
1318
1319         _tbm_surf_queue_mutex_lock();
1320         _tbm_set_last_result(TBM_ERROR_NONE);
1321
1322         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1323                                                                           TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1324
1325         _tbm_surf_queue_mutex_unlock();
1326
1327         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1328
1329         _tbm_surf_queue_mutex_lock();
1330
1331         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1332                                                                           TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1333
1334         pthread_mutex_lock(&surface_queue->lock);
1335
1336         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1337
1338         if (_queue_is_empty(&surface_queue->free_queue)) {
1339                 if (surface_queue->impl && surface_queue->impl->need_attach)
1340                         surface_queue->impl->need_attach(surface_queue);
1341         }
1342
1343         if (!_queue_is_empty(&surface_queue->free_queue)) {
1344                 pthread_mutex_unlock(&surface_queue->lock);
1345                 _tbm_surf_queue_mutex_unlock();
1346                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1347         }
1348
1349         _tbm_surf_queue_mutex_unlock();
1350
1351         while (1) {
1352                 clock_gettime(CLOCK_MONOTONIC, &tp);
1353
1354                 if (ms_timeout > 1000)
1355                         tp.tv_sec += ms_timeout / 1000;
1356
1357                 tp.tv_nsec += (ms_timeout % 1000) * 1000000;
1358
1359                 if (tp.tv_nsec > 1000000000L) {
1360                         tp.tv_sec++;
1361                         tp.tv_nsec -= 1000000000L;
1362                 }
1363
1364                 ret = pthread_cond_timedwait(&surface_queue->free_cond, &surface_queue->lock, &tp);
1365                 if (ret) {
1366                         if (ret == ETIMEDOUT) {
1367                                 TBM_ERR("surface_queue:%p can dequeue wait timeout", surface_queue);
1368                                 pthread_mutex_unlock(&surface_queue->lock);
1369                                 return TBM_SURFACE_QUEUE_ERROR_TIMEOUT;
1370                         } else {
1371                                 TBM_INFO("surface_queue:%p timedwait error retry wait", surface_queue);
1372                         }
1373                 } else {
1374                         pthread_mutex_unlock(&surface_queue->lock);
1375                         return TBM_SURFACE_QUEUE_ERROR_NONE;
1376                 }
1377         }
1378 }
1379
1380 int
1381 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1382 {
1383         _tbm_surf_queue_mutex_lock();
1384         _tbm_set_last_result(TBM_ERROR_NONE);
1385
1386         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1387
1388         _tbm_surf_queue_mutex_unlock();
1389
1390         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1391
1392         _tbm_surf_queue_mutex_lock();
1393
1394         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1395
1396         pthread_mutex_lock(&surface_queue->lock);
1397
1398         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1399
1400         if (_queue_is_empty(&surface_queue->free_queue)) {
1401                 if (surface_queue->impl && surface_queue->impl->need_attach)
1402                         surface_queue->impl->need_attach(surface_queue);
1403         }
1404
1405         if (!_queue_is_empty(&surface_queue->free_queue)) {
1406                 pthread_mutex_unlock(&surface_queue->lock);
1407                 _tbm_surf_queue_mutex_unlock();
1408                 return 1;
1409         }
1410
1411         if (wait) {
1412                 _tbm_surf_queue_mutex_unlock();
1413                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1414                 pthread_mutex_unlock(&surface_queue->lock);
1415                 return 1;
1416         }
1417
1418         pthread_mutex_unlock(&surface_queue->lock);
1419         _tbm_surf_queue_mutex_unlock();
1420         return 0;
1421 }
1422
1423 tbm_surface_queue_error_e
1424 tbm_surface_queue_release(tbm_surface_queue_h
1425                           surface_queue, tbm_surface_h surface)
1426 {
1427         queue_node *node;
1428         int queue_type;
1429
1430         _tbm_surf_queue_mutex_lock();
1431         _tbm_set_last_result(TBM_ERROR_NONE);
1432
1433         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1434                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1435         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1436                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1437
1438         pthread_mutex_lock(&surface_queue->lock);
1439
1440         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1441
1442         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1443         if (node == NULL || queue_type != NODE_LIST) {
1444                 TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1445                         node, queue_type);
1446                 pthread_mutex_unlock(&surface_queue->lock);
1447
1448                 _tbm_surf_queue_mutex_unlock();
1449
1450                 if (!node) {
1451                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1452                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1453                 } else {
1454                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1455                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1456                 }
1457         }
1458
1459         if (node->delete_pending) {
1460                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1461
1462                 _queue_delete_node(surface_queue, node);
1463
1464                 pthread_mutex_unlock(&surface_queue->lock);
1465
1466                 _tbm_surf_queue_mutex_unlock();
1467
1468                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1469
1470                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1471         }
1472
1473         if (surface_queue->queue_size < surface_queue->num_attached) {
1474                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1475
1476                 if (surface_queue->impl && surface_queue->impl->need_detach)
1477                         surface_queue->impl->need_detach(surface_queue, node);
1478                 else
1479                         _tbm_surface_queue_detach(surface_queue, surface);
1480
1481                 pthread_mutex_unlock(&surface_queue->lock);
1482
1483                 _tbm_surf_queue_mutex_unlock();
1484
1485                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1486
1487                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1488         }
1489
1490         if (surface_queue->impl && surface_queue->impl->release)
1491                 surface_queue->impl->release(surface_queue, node);
1492         else
1493                 _tbm_surface_queue_release(surface_queue, node, 1);
1494
1495         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1496                 TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
1497                 _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
1498                 pthread_mutex_unlock(&surface_queue->lock);
1499
1500                 _tbm_surf_queue_mutex_unlock();
1501                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1502         }
1503
1504         node->type = QUEUE_NODE_TYPE_RELEASE;
1505
1506         pthread_mutex_unlock(&surface_queue->lock);
1507         pthread_cond_signal(&surface_queue->free_cond);
1508
1509         _tbm_surf_queue_mutex_unlock();
1510
1511         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1512
1513         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1514
1515         return TBM_SURFACE_QUEUE_ERROR_NONE;
1516 }
1517
1518 tbm_surface_queue_error_e
1519 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1520                         surface_queue, tbm_surface_h surface)
1521 {
1522         queue_node *node;
1523         int queue_type;
1524
1525         _tbm_surf_queue_mutex_lock();
1526         _tbm_set_last_result(TBM_ERROR_NONE);
1527
1528         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1529                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1530         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1531                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1532
1533         pthread_mutex_lock(&surface_queue->lock);
1534
1535         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1536
1537         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1538         if (node == NULL || queue_type != NODE_LIST) {
1539                 TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1540                         node, queue_type);
1541                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1542                 pthread_mutex_unlock(&surface_queue->lock);
1543
1544                 _tbm_surf_queue_mutex_unlock();
1545                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1546         }
1547
1548         if (surface_queue->impl && surface_queue->impl->enqueue)
1549                 surface_queue->impl->enqueue(surface_queue, node);
1550         else
1551                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1552
1553         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1554                 TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
1555                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1556                 pthread_mutex_unlock(&surface_queue->lock);
1557
1558                 _tbm_surf_queue_mutex_unlock();
1559                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1560         }
1561
1562         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1563
1564         pthread_mutex_unlock(&surface_queue->lock);
1565         pthread_cond_signal(&surface_queue->dirty_cond);
1566
1567         _tbm_surf_queue_mutex_unlock();
1568
1569         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1570
1571         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1572
1573         return TBM_SURFACE_QUEUE_ERROR_NONE;
1574 }
1575
1576 tbm_surface_queue_error_e
1577 tbm_surface_queue_acquire(tbm_surface_queue_h
1578                           surface_queue, tbm_surface_h *surface)
1579 {
1580         queue_node *node;
1581
1582         _tbm_surf_queue_mutex_lock();
1583         _tbm_set_last_result(TBM_ERROR_NONE);
1584
1585         *surface = NULL;
1586
1587         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1588                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1589         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1590                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1591
1592         pthread_mutex_lock(&surface_queue->lock);
1593
1594         if (surface_queue->impl && surface_queue->impl->acquire)
1595                 node = surface_queue->impl->acquire(surface_queue);
1596         else
1597                 node = _tbm_surface_queue_acquire(surface_queue);
1598
1599         if (node == NULL || node->surface == NULL) {
1600                 TBM_ERR("_queue_node_pop_front failed\n");
1601                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1602                 pthread_mutex_unlock(&surface_queue->lock);
1603
1604                 _tbm_surf_queue_mutex_unlock();
1605                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1606         }
1607
1608         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1609
1610         *surface = node->surface;
1611
1612         if (surface_queue->acquire_sync_count == 1) {
1613                 tbm_surface_info_s info;
1614                 int ret;
1615
1616                 TBM_ERR("start map surface:%p", *surface);
1617                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1618                 TBM_ERR("end map surface:%p", *surface);
1619                 if (ret == TBM_SURFACE_ERROR_NONE)
1620                         tbm_surface_unmap(*surface);
1621         }
1622
1623         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1624
1625         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1626
1627         pthread_mutex_unlock(&surface_queue->lock);
1628
1629         _tbm_surf_queue_mutex_unlock();
1630
1631         if (b_dump_queue)
1632                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1633
1634         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1635
1636         return TBM_SURFACE_QUEUE_ERROR_NONE;
1637 }
1638
1639 int
1640 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1641 {
1642         _tbm_surf_queue_mutex_lock();
1643         _tbm_set_last_result(TBM_ERROR_NONE);
1644
1645         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1646
1647         pthread_mutex_lock(&surface_queue->lock);
1648
1649         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1650
1651         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1652                 pthread_mutex_unlock(&surface_queue->lock);
1653                 _tbm_surf_queue_mutex_unlock();
1654                 return 1;
1655         }
1656
1657         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1658                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1659                 _tbm_surf_queue_mutex_unlock();
1660                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1661                 pthread_mutex_unlock(&surface_queue->lock);
1662                 return 1;
1663         }
1664
1665         pthread_mutex_unlock(&surface_queue->lock);
1666         _tbm_surf_queue_mutex_unlock();
1667         return 0;
1668 }
1669
1670 void
1671 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1672 {
1673         queue_node *node = NULL, *tmp;
1674
1675         _tbm_surf_queue_mutex_lock();
1676         _tbm_set_last_result(TBM_ERROR_NONE);
1677
1678         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1679
1680         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1681
1682         LIST_DEL(&surface_queue->item_link);
1683
1684         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1685                 _queue_delete_node(surface_queue, node);
1686
1687         if (surface_queue->impl && surface_queue->impl->destroy)
1688                 surface_queue->impl->destroy(surface_queue);
1689
1690         _notify_emit(surface_queue, &surface_queue->destory_noti);
1691
1692         _notify_remove_all(&surface_queue->destory_noti);
1693         _notify_remove_all(&surface_queue->dequeuable_noti);
1694         _notify_remove_all(&surface_queue->dequeue_noti);
1695         _notify_remove_all(&surface_queue->can_dequeue_noti);
1696         _notify_remove_all(&surface_queue->acquirable_noti);
1697         _notify_remove_all(&surface_queue->reset_noti);
1698         _trace_remove_all(&surface_queue->trace_noti);
1699
1700         pthread_mutex_destroy(&surface_queue->lock);
1701
1702         free(surface_queue);
1703
1704         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1705                 _deinit_tbm_surf_queue_bufmgr();
1706
1707         _tbm_surf_queue_mutex_unlock();
1708 }
1709
1710 tbm_surface_queue_error_e
1711 tbm_surface_queue_reset(tbm_surface_queue_h
1712                         surface_queue, int width, int height, int format)
1713 {
1714         queue_node *node = NULL, *tmp;
1715
1716         _tbm_surf_queue_mutex_lock();
1717         _tbm_set_last_result(TBM_ERROR_NONE);
1718
1719         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1720                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1721
1722         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1723
1724         if (width == surface_queue->width && height == surface_queue->height &&
1725                 format == surface_queue->format) {
1726                 _tbm_surf_queue_mutex_unlock();
1727                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1728         }
1729
1730         pthread_mutex_lock(&surface_queue->lock);
1731
1732         surface_queue->width = width;
1733         surface_queue->height = height;
1734         surface_queue->format = format;
1735
1736         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1737                 /* Destory surface and Push to free_queue */
1738                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1739                         _queue_delete_node(surface_queue, node);
1740
1741                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1742                         node->delete_pending = 1;
1743         } else {
1744                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1745                         _queue_delete_node(surface_queue, node);
1746
1747                 _queue_init(&surface_queue->dirty_queue);
1748                 LIST_INITHEAD(&surface_queue->list);
1749         }
1750
1751         /* Reset queue */
1752         _queue_init(&surface_queue->free_queue);
1753
1754         surface_queue->num_attached = 0;
1755
1756         if (surface_queue->impl && surface_queue->impl->reset)
1757                 surface_queue->impl->reset(surface_queue);
1758
1759         pthread_mutex_unlock(&surface_queue->lock);
1760         pthread_cond_signal(&surface_queue->free_cond);
1761
1762         _tbm_surf_queue_mutex_unlock();
1763
1764         _notify_emit(surface_queue, &surface_queue->reset_noti);
1765
1766         return TBM_SURFACE_QUEUE_ERROR_NONE;
1767 }
1768
1769 tbm_surface_queue_error_e
1770 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1771 {
1772         _tbm_surf_queue_mutex_lock();
1773         _tbm_set_last_result(TBM_ERROR_NONE);
1774
1775         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1776                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1777
1778         _tbm_surf_queue_mutex_unlock();
1779
1780         _notify_emit(surface_queue, &surface_queue->reset_noti);
1781
1782         return TBM_SURFACE_QUEUE_ERROR_NONE;
1783 }
1784
1785 tbm_surface_queue_error_e
1786 tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
1787 {
1788         _tbm_surf_queue_mutex_lock();
1789         _tbm_set_last_result(TBM_ERROR_NONE);
1790
1791         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1792                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1793
1794         pthread_mutex_lock(&surface_queue->lock);
1795         pthread_mutex_unlock(&surface_queue->lock);
1796         pthread_cond_signal(&surface_queue->free_cond);
1797
1798         _tbm_surf_queue_mutex_unlock();
1799
1800         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1801
1802         return TBM_SURFACE_QUEUE_ERROR_NONE;
1803 }
1804
1805 tbm_surface_queue_error_e
1806 tbm_surface_queue_set_size(tbm_surface_queue_h
1807                         surface_queue, int queue_size, int flush)
1808 {
1809         queue_node *node = NULL, *tmp;
1810
1811         _tbm_surf_queue_mutex_lock();
1812         _tbm_set_last_result(TBM_ERROR_NONE);
1813
1814         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1815                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1816         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1817                                         TBM_ERROR_INVALID_PARAMETER);
1818
1819         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1820
1821         if ((surface_queue->queue_size == queue_size) && !flush) {
1822                 _tbm_surf_queue_mutex_unlock();
1823                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1824         }
1825
1826         pthread_mutex_lock(&surface_queue->lock);
1827
1828         if (flush) {
1829                 surface_queue->queue_size = queue_size;
1830
1831                 if (surface_queue->num_attached == 0) {
1832                         pthread_mutex_unlock(&surface_queue->lock);
1833                         _tbm_surf_queue_mutex_unlock();
1834                         return TBM_SURFACE_QUEUE_ERROR_NONE;
1835                 }
1836
1837                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1838                         /* Destory surface and Push to free_queue */
1839                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1840                                 _queue_delete_node(surface_queue, node);
1841
1842                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1843                                 node->delete_pending = 1;
1844                 } else {
1845                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1846                                 _queue_delete_node(surface_queue, node);
1847
1848                         _queue_init(&surface_queue->dirty_queue);
1849                         LIST_INITHEAD(&surface_queue->list);
1850                 }
1851
1852                 /* Reset queue */
1853                 _queue_init(&surface_queue->free_queue);
1854
1855                 surface_queue->num_attached = 0;
1856
1857                 if (surface_queue->impl && surface_queue->impl->reset)
1858                         surface_queue->impl->reset(surface_queue);
1859
1860                 pthread_mutex_unlock(&surface_queue->lock);
1861                 pthread_cond_signal(&surface_queue->free_cond);
1862
1863                 _tbm_surf_queue_mutex_unlock();
1864
1865                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1866
1867                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1868         } else {
1869                 if (surface_queue->queue_size > queue_size) {
1870                         int need_del = surface_queue->queue_size - queue_size;
1871
1872                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1873                                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1874
1875                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1876                                         surface_queue->impl->need_detach(surface_queue, node);
1877                                 else
1878                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1879
1880                                 need_del--;
1881                                 if (need_del == 0)
1882                                         break;
1883                         }
1884                 }
1885
1886                 surface_queue->queue_size = queue_size;
1887
1888                 pthread_mutex_unlock(&surface_queue->lock);
1889
1890                 _tbm_surf_queue_mutex_unlock();
1891
1892                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1893         }
1894 }
1895
1896 tbm_surface_queue_error_e
1897 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1898 {
1899         queue_node *node = NULL;
1900
1901         _tbm_surf_queue_mutex_lock();
1902         _tbm_set_last_result(TBM_ERROR_NONE);
1903
1904         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1905                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1906
1907         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1908
1909         if (surface_queue->num_attached == 0) {
1910                 _tbm_surf_queue_mutex_unlock();
1911                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1912         }
1913
1914         pthread_mutex_lock(&surface_queue->lock);
1915
1916         /* Destory surface in free_queue */
1917         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1918                 if (surface_queue->impl && surface_queue->impl->need_detach)
1919                         surface_queue->impl->need_detach(surface_queue, node);
1920                 else
1921                         _tbm_surface_queue_detach(surface_queue, node->surface);
1922         }
1923
1924         /* Reset queue */
1925         _queue_init(&surface_queue->free_queue);
1926
1927         pthread_mutex_unlock(&surface_queue->lock);
1928         _tbm_surf_queue_mutex_unlock();
1929
1930         return TBM_SURFACE_QUEUE_ERROR_NONE;
1931 }
1932
1933 tbm_surface_queue_error_e
1934 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1935 {
1936         queue_node *node = NULL, *tmp;
1937
1938         _tbm_surf_queue_mutex_lock();
1939         _tbm_set_last_result(TBM_ERROR_NONE);
1940
1941         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1942                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1943
1944         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1945
1946         if (surface_queue->num_attached == 0) {
1947                 _tbm_surf_queue_mutex_unlock();
1948                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1949         }
1950
1951         pthread_mutex_lock(&surface_queue->lock);
1952
1953         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1954                 /* Destory surface and Push to free_queue */
1955                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1956                         _queue_delete_node(surface_queue, node);
1957
1958                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1959                         node->delete_pending = 1;
1960         } else {
1961                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1962                         _queue_delete_node(surface_queue, node);
1963
1964                 _queue_init(&surface_queue->dirty_queue);
1965                 LIST_INITHEAD(&surface_queue->list);
1966         }
1967
1968         /* Reset queue */
1969         _queue_init(&surface_queue->free_queue);
1970
1971         surface_queue->num_attached = 0;
1972
1973         if (surface_queue->impl && surface_queue->impl->reset)
1974                 surface_queue->impl->reset(surface_queue);
1975
1976         pthread_mutex_unlock(&surface_queue->lock);
1977         pthread_cond_signal(&surface_queue->free_cond);
1978
1979         _tbm_surf_queue_mutex_unlock();
1980
1981         _notify_emit(surface_queue, &surface_queue->reset_noti);
1982
1983         return TBM_SURFACE_QUEUE_ERROR_NONE;
1984 }
1985
1986 tbm_surface_queue_error_e
1987 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1988                         tbm_surface_h *surfaces, int *num)
1989 {
1990         queue_node *node = NULL;
1991
1992         _tbm_surf_queue_mutex_lock();
1993         _tbm_set_last_result(TBM_ERROR_NONE);
1994
1995         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1996                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1997         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1998                                TBM_ERROR_INVALID_PARAMETER);
1999
2000         *num = 0;
2001
2002         pthread_mutex_lock(&surface_queue->lock);
2003
2004         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
2005                 if (node->delete_pending) continue;
2006
2007                 if (surfaces)
2008                         surfaces[*num] = node->surface;
2009
2010                 *num = *num + 1;
2011         }
2012
2013         pthread_mutex_unlock(&surface_queue->lock);
2014
2015         _tbm_surf_queue_mutex_unlock();
2016
2017         return TBM_SURFACE_QUEUE_ERROR_NONE;
2018 }
2019
2020 tbm_surface_queue_error_e
2021 tbm_surface_queue_get_acquirable_surfaces(tbm_surface_queue_h surface_queue,
2022                         tbm_surface_h *surfaces, int *num)
2023 {
2024         queue_node *node = NULL;
2025
2026         _tbm_surf_queue_mutex_lock();
2027
2028         *num = 0;
2029
2030         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2031                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2032         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2033                                TBM_ERROR_INVALID_PARAMETER);
2034
2035         pthread_mutex_lock(&surface_queue->lock);
2036
2037         LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head, item_link) {
2038                 if (surfaces)
2039                         surfaces[*num] = node->surface;
2040
2041                 *num = *num + 1;
2042         }
2043
2044         pthread_mutex_unlock(&surface_queue->lock);
2045
2046         _tbm_surf_queue_mutex_unlock();
2047
2048         return TBM_SURFACE_QUEUE_ERROR_NONE;
2049 }
2050
2051 tbm_surface_queue_error_e
2052 tbm_surface_queue_get_trace_surface_num(
2053                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
2054 {
2055         _tbm_surf_queue_mutex_lock();
2056         _tbm_set_last_result(TBM_ERROR_NONE);
2057
2058         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2059                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2060         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2061                                TBM_ERROR_INVALID_PARAMETER);
2062
2063         *num = 0;
2064
2065         pthread_mutex_lock(&surface_queue->lock);
2066
2067         switch (trace) {
2068         case TBM_SURFACE_QUEUE_TRACE_NONE:
2069                 *num = 0;
2070                 break;
2071         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
2072                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2073                 break;
2074         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
2075                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2076                 break;
2077         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
2078                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
2079                 break;
2080         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
2081                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
2082                 break;
2083         default:
2084                 break;
2085         }
2086
2087         pthread_mutex_unlock(&surface_queue->lock);
2088
2089         _tbm_surf_queue_mutex_unlock();
2090
2091         return TBM_SURFACE_QUEUE_ERROR_NONE;
2092 }
2093
2094 typedef struct {
2095         int flags;
2096 } tbm_queue_default;
2097
2098 static void
2099 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
2100 {
2101         free(surface_queue->impl_data);
2102 }
2103
2104 static void
2105 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
2106 {
2107         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
2108         tbm_surface_h surface;
2109
2110         if (surface_queue->queue_size == surface_queue->num_attached)
2111                 return;
2112
2113         if (surface_queue->alloc_cb) {
2114                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2115
2116                 /* silent return */
2117                 if (!surface)
2118                         return;
2119
2120                 tbm_surface_internal_ref(surface);
2121         } else {
2122                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2123                                 surface_queue->height,
2124                                 surface_queue->format,
2125                                 data->flags);
2126                 TBM_RETURN_IF_FAIL(surface != NULL);
2127         }
2128
2129         _tbm_surface_queue_attach(surface_queue, surface);
2130         tbm_surface_internal_unref(surface);
2131 }
2132
2133 static const tbm_surface_queue_interface tbm_queue_default_impl = {
2134         NULL,                           /*__tbm_queue_default_init*/
2135         NULL,                           /*__tbm_queue_default_reset*/
2136         __tbm_queue_default_destroy,
2137         __tbm_queue_default_need_attach,
2138         NULL,                           /*__tbm_queue_default_enqueue*/
2139         NULL,                           /*__tbm_queue_default_release*/
2140         NULL,                           /*__tbm_queue_default_dequeue*/
2141         NULL,                           /*__tbm_queue_default_acquire*/
2142         NULL,                           /*__tbm_queue_default_need_detach*/
2143 };
2144
2145 tbm_surface_queue_h
2146 tbm_surface_queue_create(int queue_size, int width,
2147                          int height, int format, int flags)
2148 {
2149         _tbm_surf_queue_mutex_lock();
2150         _tbm_set_last_result(TBM_ERROR_NONE);
2151
2152         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2153         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2154         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2155         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2156
2157         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2158                                             sizeof(struct _tbm_surface_queue));
2159         if (!surface_queue) {
2160                 TBM_ERR("cannot allocate the surface_queue.\n");
2161                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2162                 _tbm_surf_queue_mutex_unlock();
2163                 return NULL;
2164         }
2165
2166         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2167
2168         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
2169                                   sizeof(tbm_queue_default));
2170         if (data == NULL) {
2171                 TBM_ERR("cannot allocate the tbm_queue_default.\n");
2172                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2173                 free(surface_queue);
2174                 _tbm_surf_queue_mutex_unlock();
2175                 return NULL;
2176         }
2177
2178         data->flags = flags;
2179         _tbm_surface_queue_init(surface_queue,
2180                                 queue_size,
2181                                 width, height, format,
2182                                 &tbm_queue_default_impl, data);
2183
2184         _tbm_surf_queue_mutex_unlock();
2185
2186         return surface_queue;
2187 }
2188
2189 typedef struct {
2190         int flags;
2191         queue dequeue_list;
2192 } tbm_queue_sequence;
2193
2194 static void
2195 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2196 {
2197         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2198
2199         _queue_init(&data->dequeue_list);
2200 }
2201
2202 static void
2203 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2204 {
2205         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2206
2207         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2208                 return;
2209
2210         _queue_init(&data->dequeue_list);
2211 }
2212
2213 static void
2214 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2215 {
2216         free(surface_queue->impl_data);
2217 }
2218
2219 static void
2220 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2221 {
2222         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2223         tbm_surface_h surface;
2224
2225         if (surface_queue->queue_size == surface_queue->num_attached)
2226                 return;
2227
2228         if (surface_queue->alloc_cb) {
2229                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2230
2231                 /* silent return */
2232                 if (!surface)
2233                         return;
2234
2235                 tbm_surface_internal_ref(surface);
2236         } else {
2237                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2238                                 surface_queue->height,
2239                                 surface_queue->format,
2240                                 data->flags);
2241                 TBM_RETURN_IF_FAIL(surface != NULL);
2242         }
2243
2244         _tbm_surface_queue_attach(surface_queue, surface);
2245         tbm_surface_internal_unref(surface);
2246 }
2247
2248 static void
2249 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2250                              queue_node *node)
2251 {
2252         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2253         queue_node *first = NULL;
2254
2255         first = container_of(data->dequeue_list.head.next, first, item_link);
2256         if (first != node) {
2257                 return;
2258         }
2259
2260         node->priv_flags = 0;
2261
2262         _queue_node_pop(&data->dequeue_list, node);
2263         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2264 }
2265
2266 static void
2267 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2268                                 queue_node *node)
2269 {
2270         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2271
2272         if (node->priv_flags) {
2273                 node->priv_flags = 0;
2274                 _queue_node_pop(&data->dequeue_list, node);
2275         }
2276
2277         _tbm_surface_queue_release(surface_queue, node, 1);
2278 }
2279
2280 static queue_node *
2281 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2282                              surface_queue)
2283 {
2284         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2285         queue_node *node;
2286
2287         node = _tbm_surface_queue_dequeue(surface_queue);
2288         if (node) {
2289                 _queue_node_push_back(&data->dequeue_list, node);
2290                 node->priv_flags = 1;
2291         }
2292
2293         return node;
2294 }
2295
2296 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2297         __tbm_queue_sequence_init,
2298         __tbm_queue_sequence_reset,
2299         __tbm_queue_sequence_destroy,
2300         __tbm_queue_sequence_need_attach,
2301         __tbm_queue_sequence_enqueue,
2302         __tbm_queue_sequence_release,
2303         __tbm_queue_sequence_dequeue,
2304         NULL,                                   /*__tbm_queue_sequence_acquire*/
2305         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2306 };
2307
2308 tbm_surface_queue_h
2309 tbm_surface_queue_sequence_create(int queue_size, int width,
2310                                   int height, int format, int flags)
2311 {
2312         _tbm_surf_queue_mutex_lock();
2313         _tbm_set_last_result(TBM_ERROR_NONE);
2314
2315         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2316         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2317         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2318         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2319
2320         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2321                                             sizeof(struct _tbm_surface_queue));
2322         if (surface_queue == NULL) {
2323                 TBM_ERR("cannot allocate the surface_queue.\n");
2324                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2325                 _tbm_surf_queue_mutex_unlock();
2326                 return NULL;
2327         }
2328
2329         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2330
2331         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2332                                    sizeof(tbm_queue_sequence));
2333         if (data == NULL) {
2334                 TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
2335                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2336                 free(surface_queue);
2337                 _tbm_surf_queue_mutex_unlock();
2338                 return NULL;
2339         }
2340
2341         data->flags = flags;
2342         _tbm_surface_queue_init(surface_queue,
2343                                 queue_size,
2344                                 width, height, format,
2345                                 &tbm_queue_sequence_impl, data);
2346
2347         _tbm_surf_queue_mutex_unlock();
2348
2349         return surface_queue;
2350 }
2351
2352 tbm_surface_queue_error_e
2353 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2354                                   int modes)
2355 {
2356         _tbm_surf_queue_mutex_lock();
2357         _tbm_set_last_result(TBM_ERROR_NONE);
2358
2359         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2360                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2361
2362         pthread_mutex_lock(&surface_queue->lock);
2363
2364         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2365                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2366         else
2367                 surface_queue->modes |= modes;
2368
2369         pthread_mutex_unlock(&surface_queue->lock);
2370
2371         _tbm_surf_queue_mutex_unlock();
2372
2373         return TBM_SURFACE_QUEUE_ERROR_NONE;
2374 }
2375
2376 tbm_surface_queue_error_e
2377 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2378                                   unsigned int sync_count)
2379 {
2380         int dequeue_num, enqueue_num;
2381
2382         _tbm_surf_queue_mutex_lock();
2383         _tbm_set_last_result(TBM_ERROR_NONE);
2384
2385         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2386                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2387
2388         pthread_mutex_lock(&surface_queue->lock);
2389
2390         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2391         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2392
2393         if (dequeue_num + sync_count == 0)
2394                 surface_queue->acquire_sync_count = enqueue_num;
2395         else
2396                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2397
2398         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2399                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2400
2401         pthread_mutex_unlock(&surface_queue->lock);
2402
2403         _tbm_surf_queue_mutex_unlock();
2404
2405         return TBM_SURFACE_QUEUE_ERROR_NONE;
2406 }