remove getenv() function.
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36 #include <time.h>
37
38 #define FREE_QUEUE      1
39 #define DIRTY_QUEUE     2
40 #define NODE_LIST       4
41
42 static tbm_bufmgr g_surf_queue_bufmgr;
43 static pthread_mutex_t tbm_surf_queue_lock = PTHREAD_MUTEX_INITIALIZER;
44 void _tbm_surface_queue_mutex_unlock(void);
45
46 /* check condition */
47 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
48         if (!(cond)) {\
49                 TBM_ERR("'%s' failed.\n", #cond);\
50                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
51                 _tbm_surf_queue_mutex_unlock();\
52                 return;\
53         } \
54 }
55
56 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
57         if (!(cond)) {\
58                 TBM_ERR("'%s' failed.\n", #cond);\
59                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
60                 _tbm_surf_queue_mutex_unlock();\
61                 return val;\
62         } \
63 }
64
65 typedef enum _queue_node_type {
66         QUEUE_NODE_TYPE_NONE,
67         QUEUE_NODE_TYPE_DEQUEUE,
68         QUEUE_NODE_TYPE_ENQUEUE,
69         QUEUE_NODE_TYPE_ACQUIRE,
70         QUEUE_NODE_TYPE_RELEASE
71 } Queue_Node_Type;
72
73 typedef struct {
74         struct list_head head;
75         int count;
76 } queue;
77
78 typedef struct {
79         tbm_surface_h surface;
80
81         struct list_head item_link;
82         struct list_head link;
83
84         Queue_Node_Type type;
85
86         unsigned int priv_flags;        /*for each queue*/
87
88         int delete_pending;
89 } queue_node;
90
91 typedef struct {
92         struct list_head link;
93
94         tbm_surface_queue_notify_cb cb;
95         void *data;
96 } queue_notify;
97
98 typedef struct {
99         struct list_head link;
100
101         tbm_surface_queue_trace_cb cb;
102         void *data;
103 } queue_trace;
104
105 typedef struct _tbm_surface_queue_interface {
106         void (*init)(tbm_surface_queue_h queue);
107         void (*reset)(tbm_surface_queue_h queue);
108         void (*destroy)(tbm_surface_queue_h queue);
109         void (*need_attach)(tbm_surface_queue_h queue);
110
111         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
112         void (*release)(tbm_surface_queue_h queue, queue_node *node);
113         queue_node *(*dequeue)(tbm_surface_queue_h queue);
114         queue_node *(*acquire)(tbm_surface_queue_h queue);
115         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
116 } tbm_surface_queue_interface;
117
118 struct _tbm_surface_queue {
119         int width;
120         int height;
121         int format;
122         int queue_size;
123         int flags;
124         int num_attached;
125
126         queue free_queue;
127         queue dirty_queue;
128         struct list_head list;
129
130         struct list_head destory_noti;
131         struct list_head dequeuable_noti;
132         struct list_head dequeue_noti;
133         struct list_head can_dequeue_noti;
134         struct list_head acquirable_noti;
135         struct list_head reset_noti;
136         struct list_head trace_noti;
137
138         pthread_mutex_t lock;
139         pthread_cond_t free_cond;
140         pthread_cond_t dirty_cond;
141
142         const tbm_surface_queue_interface *impl;
143         void *impl_data;
144
145         //For external buffer allocation
146         tbm_surface_alloc_cb alloc_cb;
147         tbm_surface_free_cb free_cb;
148         void *alloc_cb_data;
149
150         struct list_head item_link; /* link of surface queue */
151
152         int modes;
153         unsigned int enqueue_sync_count;
154         unsigned int acquire_sync_count;
155 };
156
157 static void
158 _tbm_surf_queue_mutex_lock(void)
159 {
160         pthread_mutex_lock(&tbm_surf_queue_lock);
161 }
162
163 static void
164 _tbm_surf_queue_mutex_unlock(void)
165 {
166         pthread_mutex_unlock(&tbm_surf_queue_lock);
167 }
168
169 static void
170 _init_tbm_surf_queue_bufmgr(void)
171 {
172         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
173 }
174
175 static void
176 _deinit_tbm_surf_queue_bufmgr(void)
177 {
178         if (!g_surf_queue_bufmgr)
179                 return;
180
181         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
182         g_surf_queue_bufmgr = NULL;
183 }
184
185 static int
186 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
187 {
188         tbm_surface_queue_h old_data = NULL;
189
190         if (surface_queue == NULL) {
191                 TBM_ERR("error: surface_queue is NULL.\n");
192                 return 0;
193         }
194
195         if (g_surf_queue_bufmgr == NULL) {
196                 TBM_ERR("error: g_surf_queue_bufmgr is NULL.\n");
197                 return 0;
198         }
199
200         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
201                 TBM_ERR("error: surf_queue_list is empty\n");
202                 return 0;
203         }
204
205         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
206                                 item_link) {
207                 if (old_data == surface_queue)
208                         return 1;
209         }
210
211         TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
212
213         return 0;
214 }
215
216 static queue_node *
217 _queue_node_create(void)
218 {
219         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
220
221         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
222
223         return node;
224 }
225
226 static void
227 _queue_node_delete(queue_node *node)
228 {
229         LIST_DEL(&node->item_link);
230         LIST_DEL(&node->link);
231         free(node);
232 }
233
234 static int
235 _queue_is_empty(queue *queue)
236 {
237         if (LIST_IS_EMPTY(&queue->head))
238                 return 1;
239
240         return 0;
241 }
242
243 static void
244 _queue_node_push_back(queue *queue, queue_node *node)
245 {
246         LIST_ADDTAIL(&node->item_link, &queue->head);
247         queue->count++;
248 }
249
250 static void
251 _queue_node_push_front(queue *queue, queue_node *node)
252 {
253         LIST_ADD(&node->item_link, &queue->head);
254         queue->count++;
255 }
256
257 static queue_node *
258 _queue_node_pop_front(queue *queue)
259 {
260         queue_node *node;
261
262         if (!queue->head.next) return NULL;
263         if (!queue->count) return NULL;
264
265         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
266
267         LIST_DELINIT(&node->item_link);
268         queue->count--;
269
270         return node;
271 }
272
273 static queue_node *
274 _queue_node_pop(queue *queue, queue_node *node)
275 {
276         LIST_DELINIT(&node->item_link);
277         queue->count--;
278
279         return node;
280 }
281
282 static queue_node *
283 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
284                 tbm_surface_h surface, int *out_type)
285 {
286         queue_node *node = NULL;
287
288         if (type == 0)
289                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
290         if (out_type)
291                 *out_type = 0;
292
293         if (type & FREE_QUEUE) {
294                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
295                                          item_link) {
296                         if (node->surface == surface) {
297                                 if (out_type)
298                                         *out_type = FREE_QUEUE;
299
300                                 return node;
301                         }
302                 }
303         }
304
305         if (type & DIRTY_QUEUE) {
306                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
307                                          item_link) {
308                         if (node->surface == surface) {
309                                 if (out_type)
310                                         *out_type = DIRTY_QUEUE;
311
312                                 return node;
313                         }
314                 }
315         }
316
317         if (type & NODE_LIST) {
318                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
319                         if (node->surface == surface) {
320                                 if (out_type)
321                                         *out_type = NODE_LIST;
322
323                                 return node;
324                         }
325                 }
326         }
327
328         TBM_ERR("fail to get the queue_node.\n");
329
330         return NULL;
331 }
332
333 static void
334 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
335 {
336         if (node->surface) {
337                 if (surface_queue->free_cb) {
338                         surface_queue->free_cb(surface_queue,
339                                         surface_queue->alloc_cb_data,
340                                         node->surface);
341                 }
342
343                 tbm_surface_destroy(node->surface);
344         }
345
346         _queue_node_delete(node);
347 }
348
349 static void
350 _queue_init(queue *queue)
351 {
352         LIST_INITHEAD(&queue->head);
353
354         queue->count = 0;
355 }
356
357 static void
358 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
359             void *data)
360 {
361         TBM_RETURN_IF_FAIL(cb != NULL);
362
363         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
364
365         TBM_RETURN_IF_FAIL(item != NULL);
366
367         LIST_INITHEAD(&item->link);
368         item->cb = cb;
369         item->data = data;
370
371         LIST_ADDTAIL(&item->link, list);
372 }
373
374 static void
375 _notify_remove(struct list_head *list,
376                tbm_surface_queue_notify_cb cb, void *data)
377 {
378         queue_notify *item = NULL, *tmp;
379
380         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
381                 if (item->cb == cb && item->data == data) {
382                         LIST_DEL(&item->link);
383                         free(item);
384                         return;
385                 }
386         }
387
388         TBM_ERR("Cannot find notifiy\n");
389 }
390
391 static void
392 _notify_remove_all(struct list_head *list)
393 {
394         queue_notify *item = NULL, *tmp;
395
396         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
397                 LIST_DEL(&item->link);
398                 free(item);
399         }
400 }
401
402 static void
403 _notify_emit(tbm_surface_queue_h surface_queue,
404              struct list_head *list)
405 {
406         queue_notify *item = NULL, *tmp;;
407
408         /*
409                 The item->cb is the outside function of the libtbm.
410                 The tbm user may/can remove the item of the list,
411                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
412         */
413         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
414                 item->cb(surface_queue, item->data);
415 }
416
417 static void
418 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
419             void *data)
420 {
421         TBM_RETURN_IF_FAIL(cb != NULL);
422
423         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
424
425         TBM_RETURN_IF_FAIL(item != NULL);
426
427         LIST_INITHEAD(&item->link);
428         item->cb = cb;
429         item->data = data;
430
431         LIST_ADDTAIL(&item->link, list);
432 }
433
434 static void
435 _trace_remove(struct list_head *list,
436                tbm_surface_queue_trace_cb cb, void *data)
437 {
438         queue_trace *item = NULL, *tmp;
439
440         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
441                 if (item->cb == cb && item->data == data) {
442                         LIST_DEL(&item->link);
443                         free(item);
444                         return;
445                 }
446         }
447
448         TBM_ERR("Cannot find notifiy\n");
449 }
450
451 static void
452 _trace_remove_all(struct list_head *list)
453 {
454         queue_trace *item = NULL, *tmp;
455
456         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
457                 LIST_DEL(&item->link);
458                 free(item);
459         }
460 }
461
462 static void
463 _trace_emit(tbm_surface_queue_h surface_queue,
464              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
465 {
466         queue_trace *item = NULL, *tmp;;
467
468         /*
469                 The item->cb is the outside function of the libtbm.
470                 The tbm user may/can remove the item of the list,
471                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
472         */
473         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
474                 item->cb(surface_queue, surface, trace, item->data);
475 }
476
477 static int
478 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
479 {
480         queue_node *node = NULL;
481         int count = 0;
482
483         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
484                 if (node->type == type)
485                         count++;
486         }
487
488         return count;
489 }
490
491 static void
492 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
493                           tbm_surface_h surface)
494 {
495         queue_node *node;
496
497         node = _queue_node_create();
498         TBM_RETURN_IF_FAIL(node != NULL);
499
500         tbm_surface_internal_ref(surface);
501         node->surface = surface;
502
503         LIST_ADDTAIL(&node->link, &surface_queue->list);
504         surface_queue->num_attached++;
505         _queue_node_push_back(&surface_queue->free_queue, node);
506 }
507
508 static void
509 _tbm_surface_queue_need_attach(tbm_surface_queue_h surface_queue)
510 {
511         tbm_surface_h surface;
512
513         if (surface_queue->queue_size == surface_queue->num_attached)
514                 return;
515
516         if (surface_queue->alloc_cb) {
517                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
518
519                 /* silent return */
520                 if (!surface)
521                         return;
522
523                 tbm_surface_internal_ref(surface);
524         } else {
525                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
526                                 surface_queue->height,
527                                 surface_queue->format,
528                                 surface_queue->flags);
529                 TBM_RETURN_IF_FAIL(surface != NULL);
530         }
531
532         _tbm_surface_queue_attach(surface_queue, surface);
533         tbm_surface_internal_unref(surface);
534 }
535
536 static void
537 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
538                           tbm_surface_h surface)
539 {
540         queue_node *node;
541         int queue_type;
542
543         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
544         if (node) {
545                 _queue_delete_node(surface_queue, node);
546                 surface_queue->num_attached--;
547         }
548 }
549
550 static void
551 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
552                            queue_node *node, int push_back)
553 {
554         if (push_back)
555                 _queue_node_push_back(&surface_queue->dirty_queue, node);
556         else
557                 _queue_node_push_front(&surface_queue->dirty_queue, node);
558 }
559
560 static queue_node *
561 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
562 {
563         queue_node *node;
564
565         node = _queue_node_pop_front(&surface_queue->free_queue);
566
567         return node;
568 }
569
570 static queue_node *
571 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
572 {
573         queue_node *node;
574
575         if (_queue_is_empty(&surface_queue->dirty_queue))
576                 return NULL;
577
578         node = _queue_node_pop_front(&surface_queue->dirty_queue);
579
580         return node;
581 }
582
583 static void
584 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
585                            queue_node *node, int push_back)
586 {
587         if (push_back)
588                 _queue_node_push_back(&surface_queue->free_queue, node);
589         else
590                 _queue_node_push_front(&surface_queue->free_queue, node);
591 }
592
593 static void
594 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
595                         int queue_size,
596                         int width, int height, int format, int flags,
597                         const tbm_surface_queue_interface *impl, void *data)
598 {
599         pthread_condattr_t free_attr, dirty_attr;
600
601         TBM_RETURN_IF_FAIL(surface_queue != NULL);
602         TBM_RETURN_IF_FAIL(impl != NULL);
603
604         if (!g_surf_queue_bufmgr)
605                 _init_tbm_surf_queue_bufmgr();
606
607         pthread_mutex_init(&surface_queue->lock, NULL);
608
609         pthread_condattr_init(&free_attr);
610         pthread_condattr_setclock(&free_attr, CLOCK_MONOTONIC);
611         pthread_cond_init(&surface_queue->free_cond, &free_attr);
612         pthread_condattr_destroy(&free_attr);
613
614         pthread_condattr_init(&dirty_attr);
615         pthread_condattr_setclock(&dirty_attr, CLOCK_MONOTONIC);
616         pthread_cond_init(&surface_queue->dirty_cond, &dirty_attr);
617         pthread_condattr_destroy(&dirty_attr);
618
619         surface_queue->queue_size = queue_size;
620         surface_queue->width = width;
621         surface_queue->height = height;
622         surface_queue->format = format;
623         surface_queue->flags = flags;
624         surface_queue->impl = impl;
625         surface_queue->impl_data = data;
626         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
627
628         _queue_init(&surface_queue->free_queue);
629         _queue_init(&surface_queue->dirty_queue);
630         LIST_INITHEAD(&surface_queue->list);
631
632         LIST_INITHEAD(&surface_queue->destory_noti);
633         LIST_INITHEAD(&surface_queue->dequeuable_noti);
634         LIST_INITHEAD(&surface_queue->dequeue_noti);
635         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
636         LIST_INITHEAD(&surface_queue->acquirable_noti);
637         LIST_INITHEAD(&surface_queue->reset_noti);
638         LIST_INITHEAD(&surface_queue->trace_noti);
639
640         if (surface_queue->impl && surface_queue->impl->init)
641                 surface_queue->impl->init(surface_queue);
642
643         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
644 }
645
646 tbm_surface_queue_error_e
647 tbm_surface_queue_add_destroy_cb(
648         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
649         void *data)
650 {
651         _tbm_surf_queue_mutex_lock();
652         _tbm_set_last_result(TBM_ERROR_NONE);
653
654         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
655                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
656         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
657                                TBM_ERROR_INVALID_PARAMETER);
658
659         pthread_mutex_lock(&surface_queue->lock);
660
661         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
662
663         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
664
665         pthread_mutex_unlock(&surface_queue->lock);
666
667         _tbm_surf_queue_mutex_unlock();
668
669         return TBM_SURFACE_QUEUE_ERROR_NONE;
670 }
671
672 tbm_surface_queue_error_e
673 tbm_surface_queue_remove_destroy_cb(
674         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
675         void *data)
676 {
677         _tbm_surf_queue_mutex_lock();
678         _tbm_set_last_result(TBM_ERROR_NONE);
679
680         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
681                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
682
683         pthread_mutex_lock(&surface_queue->lock);
684
685         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
686
687         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
688
689         pthread_mutex_unlock(&surface_queue->lock);
690
691         _tbm_surf_queue_mutex_unlock();
692
693         return TBM_SURFACE_QUEUE_ERROR_NONE;
694 }
695
696 tbm_surface_queue_error_e
697 tbm_surface_queue_add_dequeuable_cb(
698         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
699         void *data)
700 {
701         _tbm_surf_queue_mutex_lock();
702         _tbm_set_last_result(TBM_ERROR_NONE);
703
704         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
705                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
706         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
707                                TBM_ERROR_INVALID_PARAMETER);
708
709         pthread_mutex_lock(&surface_queue->lock);
710
711         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
712
713         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
714
715         pthread_mutex_unlock(&surface_queue->lock);
716
717         _tbm_surf_queue_mutex_unlock();
718
719         return TBM_SURFACE_QUEUE_ERROR_NONE;
720 }
721
722 tbm_surface_queue_error_e
723 tbm_surface_queue_remove_dequeuable_cb(
724         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
725         void *data)
726 {
727         _tbm_surf_queue_mutex_lock();
728         _tbm_set_last_result(TBM_ERROR_NONE);
729
730         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
731                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
732
733         pthread_mutex_lock(&surface_queue->lock);
734
735         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
736
737         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
738
739         pthread_mutex_unlock(&surface_queue->lock);
740
741         _tbm_surf_queue_mutex_unlock();
742
743         return TBM_SURFACE_QUEUE_ERROR_NONE;
744 }
745
746 tbm_surface_queue_error_e
747 tbm_surface_queue_add_dequeue_cb(
748         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
749         void *data)
750 {
751         _tbm_surf_queue_mutex_lock();
752         _tbm_set_last_result(TBM_ERROR_NONE);
753
754         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
755                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
756         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
757                                TBM_ERROR_INVALID_PARAMETER);
758
759         pthread_mutex_lock(&surface_queue->lock);
760
761         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
762
763         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
764
765         pthread_mutex_unlock(&surface_queue->lock);
766
767         _tbm_surf_queue_mutex_unlock();
768
769         return TBM_SURFACE_QUEUE_ERROR_NONE;
770 }
771
772 tbm_surface_queue_error_e
773 tbm_surface_queue_remove_dequeue_cb(
774         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
775         void *data)
776 {
777         _tbm_surf_queue_mutex_lock();
778         _tbm_set_last_result(TBM_ERROR_NONE);
779
780         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
781                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
782
783         pthread_mutex_lock(&surface_queue->lock);
784
785         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
786
787         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
788
789         pthread_mutex_unlock(&surface_queue->lock);
790
791         _tbm_surf_queue_mutex_unlock();
792
793         return TBM_SURFACE_QUEUE_ERROR_NONE;
794 }
795
796 tbm_surface_queue_error_e
797 tbm_surface_queue_add_can_dequeue_cb(
798         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
799         void *data)
800 {
801         _tbm_surf_queue_mutex_lock();
802         _tbm_set_last_result(TBM_ERROR_NONE);
803
804         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
805                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
806         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
807                                TBM_ERROR_INVALID_PARAMETER);
808
809         pthread_mutex_lock(&surface_queue->lock);
810
811         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
812
813         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
814
815         pthread_mutex_unlock(&surface_queue->lock);
816
817         _tbm_surf_queue_mutex_unlock();
818
819         return TBM_SURFACE_QUEUE_ERROR_NONE;
820 }
821
822 tbm_surface_queue_error_e
823 tbm_surface_queue_remove_can_dequeue_cb(
824         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
825         void *data)
826 {
827         _tbm_surf_queue_mutex_lock();
828         _tbm_set_last_result(TBM_ERROR_NONE);
829
830         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
831                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
832
833         pthread_mutex_lock(&surface_queue->lock);
834
835         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
836
837         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
838
839         pthread_mutex_unlock(&surface_queue->lock);
840
841         _tbm_surf_queue_mutex_unlock();
842
843         return TBM_SURFACE_QUEUE_ERROR_NONE;
844 }
845
846 tbm_surface_queue_error_e
847 tbm_surface_queue_add_acquirable_cb(
848         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
849         void *data)
850 {
851         _tbm_surf_queue_mutex_lock();
852         _tbm_set_last_result(TBM_ERROR_NONE);
853
854         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
855                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
856         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
857                                TBM_ERROR_INVALID_PARAMETER);
858
859         pthread_mutex_lock(&surface_queue->lock);
860
861         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
862
863         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
864
865         pthread_mutex_unlock(&surface_queue->lock);
866
867         _tbm_surf_queue_mutex_unlock();
868
869         return TBM_SURFACE_QUEUE_ERROR_NONE;
870 }
871
872 tbm_surface_queue_error_e
873 tbm_surface_queue_remove_acquirable_cb(
874         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
875         void *data)
876 {
877         _tbm_surf_queue_mutex_lock();
878         _tbm_set_last_result(TBM_ERROR_NONE);
879
880         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
881                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
882
883         pthread_mutex_lock(&surface_queue->lock);
884
885         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
886
887         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
888
889         pthread_mutex_unlock(&surface_queue->lock);
890
891         _tbm_surf_queue_mutex_unlock();
892
893         return TBM_SURFACE_QUEUE_ERROR_NONE;
894 }
895
896 tbm_surface_queue_error_e
897 tbm_surface_queue_add_trace_cb(
898         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
899         void *data)
900 {
901         _tbm_surf_queue_mutex_lock();
902         _tbm_set_last_result(TBM_ERROR_NONE);
903
904         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
905                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
906         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
907                                TBM_ERROR_INVALID_PARAMETER);
908
909         pthread_mutex_lock(&surface_queue->lock);
910
911         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
912
913         _trace_add(&surface_queue->trace_noti, trace_cb, data);
914
915         pthread_mutex_unlock(&surface_queue->lock);
916
917         _tbm_surf_queue_mutex_unlock();
918
919         return TBM_SURFACE_QUEUE_ERROR_NONE;
920 }
921
922 tbm_surface_queue_error_e
923 tbm_surface_queue_remove_trace_cb(
924         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
925         void *data)
926 {
927         _tbm_surf_queue_mutex_lock();
928         _tbm_set_last_result(TBM_ERROR_NONE);
929
930         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
931                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
932
933         pthread_mutex_lock(&surface_queue->lock);
934
935         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
936
937         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
938
939         pthread_mutex_unlock(&surface_queue->lock);
940
941         _tbm_surf_queue_mutex_unlock();
942
943         return TBM_SURFACE_QUEUE_ERROR_NONE;
944 }
945
946 tbm_surface_queue_error_e
947 tbm_surface_queue_set_alloc_cb(
948         tbm_surface_queue_h surface_queue,
949         tbm_surface_alloc_cb alloc_cb,
950         tbm_surface_free_cb free_cb,
951         void *data)
952 {
953         _tbm_surf_queue_mutex_lock();
954         _tbm_set_last_result(TBM_ERROR_NONE);
955
956         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
957                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
958
959         pthread_mutex_lock(&surface_queue->lock);
960
961         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
962
963         surface_queue->alloc_cb = alloc_cb;
964         surface_queue->free_cb = free_cb;
965         surface_queue->alloc_cb_data = data;
966
967         pthread_mutex_unlock(&surface_queue->lock);
968
969         _tbm_surf_queue_mutex_unlock();
970
971         return TBM_SURFACE_QUEUE_ERROR_NONE;
972 }
973
974 int
975 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
976 {
977         int width;
978
979         _tbm_surf_queue_mutex_lock();
980         _tbm_set_last_result(TBM_ERROR_NONE);
981
982         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
983
984         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
985
986         width = surface_queue->width;
987
988         _tbm_surf_queue_mutex_unlock();
989
990         return width;
991 }
992
993 int
994 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
995 {
996         int height;
997
998         _tbm_surf_queue_mutex_lock();
999         _tbm_set_last_result(TBM_ERROR_NONE);
1000
1001         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1002
1003         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1004
1005         height = surface_queue->height;
1006
1007         _tbm_surf_queue_mutex_unlock();
1008
1009         return height;
1010 }
1011
1012 int
1013 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
1014 {
1015         int format;
1016
1017         _tbm_surf_queue_mutex_lock();
1018         _tbm_set_last_result(TBM_ERROR_NONE);
1019
1020         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1021
1022         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1023
1024         format = surface_queue->format;
1025
1026         _tbm_surf_queue_mutex_unlock();
1027
1028         return format;
1029 }
1030
1031 int
1032 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1033 {
1034         int queue_size;
1035
1036         _tbm_surf_queue_mutex_lock();
1037         _tbm_set_last_result(TBM_ERROR_NONE);
1038
1039         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1040
1041         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1042
1043         queue_size = surface_queue->queue_size;
1044
1045         _tbm_surf_queue_mutex_unlock();
1046
1047         return queue_size;
1048 }
1049
1050 tbm_surface_queue_error_e
1051 tbm_surface_queue_add_reset_cb(
1052         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1053         void *data)
1054 {
1055         _tbm_surf_queue_mutex_lock();
1056         _tbm_set_last_result(TBM_ERROR_NONE);
1057
1058         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1059                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1060         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
1061                                TBM_ERROR_INVALID_PARAMETER);
1062
1063         pthread_mutex_lock(&surface_queue->lock);
1064
1065         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1066
1067         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1068
1069         pthread_mutex_unlock(&surface_queue->lock);
1070
1071         _tbm_surf_queue_mutex_unlock();
1072
1073         return TBM_SURFACE_QUEUE_ERROR_NONE;
1074 }
1075
1076 tbm_surface_queue_error_e
1077 tbm_surface_queue_remove_reset_cb(
1078         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1079         void *data)
1080 {
1081         _tbm_surf_queue_mutex_lock();
1082         _tbm_set_last_result(TBM_ERROR_NONE);
1083
1084         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1085                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1086
1087         pthread_mutex_lock(&surface_queue->lock);
1088
1089         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1090
1091         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1092
1093         pthread_mutex_unlock(&surface_queue->lock);
1094
1095         _tbm_surf_queue_mutex_unlock();
1096
1097         return TBM_SURFACE_QUEUE_ERROR_NONE;
1098 }
1099
1100 tbm_surface_queue_error_e
1101 tbm_surface_queue_enqueue(tbm_surface_queue_h
1102                           surface_queue, tbm_surface_h surface)
1103 {
1104         queue_node *node;
1105         int queue_type;
1106
1107         _tbm_surf_queue_mutex_lock();
1108         _tbm_set_last_result(TBM_ERROR_NONE);
1109
1110         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1111                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1112         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1113                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1114
1115         if (b_dump_queue)
1116                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1117
1118         pthread_mutex_lock(&surface_queue->lock);
1119
1120         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1121
1122         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1123         if (node == NULL || queue_type != NODE_LIST) {
1124                 TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1125                         node, queue_type);
1126                 pthread_mutex_unlock(&surface_queue->lock);
1127
1128                 _tbm_surf_queue_mutex_unlock();
1129
1130                 if (!node) {
1131                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1132                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1133                 } else {
1134                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1135                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1136                 }
1137         }
1138
1139         if (surface_queue->impl && surface_queue->impl->enqueue)
1140                 surface_queue->impl->enqueue(surface_queue, node);
1141         else
1142                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1143
1144         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1145                 TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1146                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE);
1147                 pthread_mutex_unlock(&surface_queue->lock);
1148
1149                 _tbm_surf_queue_mutex_unlock();
1150                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE;
1151         }
1152
1153         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1154
1155         if (surface_queue->enqueue_sync_count == 1) {
1156                 tbm_surface_info_s info;
1157                 int ret;
1158
1159                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1160                 if (ret == TBM_SURFACE_ERROR_NONE)
1161                         tbm_surface_unmap(surface);
1162         }
1163
1164         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1165
1166         pthread_mutex_unlock(&surface_queue->lock);
1167         pthread_cond_signal(&surface_queue->dirty_cond);
1168
1169         _tbm_surf_queue_mutex_unlock();
1170
1171         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1172
1173         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1174
1175         return TBM_SURFACE_QUEUE_ERROR_NONE;
1176 }
1177
1178 tbm_surface_queue_error_e
1179 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1180                           surface_queue, tbm_surface_h surface)
1181 {
1182         queue_node *node;
1183         int queue_type;
1184
1185         _tbm_surf_queue_mutex_lock();
1186         _tbm_set_last_result(TBM_ERROR_NONE);
1187
1188         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1189                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1190         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1191                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1192
1193         pthread_mutex_lock(&surface_queue->lock);
1194
1195         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1196
1197         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1198         if (node == NULL || queue_type != NODE_LIST) {
1199                 TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1200                         node, queue_type);
1201                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1202                 pthread_mutex_unlock(&surface_queue->lock);
1203
1204                 _tbm_surf_queue_mutex_unlock();
1205                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1206         }
1207
1208         if (node->delete_pending) {
1209                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1210
1211                 _queue_delete_node(surface_queue, node);
1212
1213                 pthread_mutex_unlock(&surface_queue->lock);
1214
1215                 _tbm_surf_queue_mutex_unlock();
1216
1217                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1218
1219                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1220         }
1221
1222         if (surface_queue->queue_size < surface_queue->num_attached) {
1223                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1224
1225                 if (surface_queue->impl && surface_queue->impl->need_detach)
1226                         surface_queue->impl->need_detach(surface_queue, node);
1227                 else
1228                         _tbm_surface_queue_detach(surface_queue, surface);
1229
1230                 pthread_mutex_unlock(&surface_queue->lock);
1231
1232                 _tbm_surf_queue_mutex_unlock();
1233
1234                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1235
1236                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1237         }
1238
1239         if (surface_queue->impl && surface_queue->impl->release)
1240                 surface_queue->impl->release(surface_queue, node);
1241         else
1242                 _tbm_surface_queue_release(surface_queue, node, 1);
1243
1244         if (_queue_is_empty(&surface_queue->free_queue)) {
1245                 TBM_ERR("surface_queue->free_queue is empty.\n");
1246                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1247                 pthread_mutex_unlock(&surface_queue->lock);
1248
1249                 _tbm_surf_queue_mutex_unlock();
1250                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1251         }
1252
1253         node->type = QUEUE_NODE_TYPE_RELEASE;
1254
1255         pthread_mutex_unlock(&surface_queue->lock);
1256         pthread_cond_signal(&surface_queue->free_cond);
1257
1258         _tbm_surf_queue_mutex_unlock();
1259
1260         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1261
1262         return TBM_SURFACE_QUEUE_ERROR_NONE;
1263 }
1264
1265 tbm_surface_queue_error_e
1266 tbm_surface_queue_dequeue(tbm_surface_queue_h
1267                           surface_queue, tbm_surface_h *surface)
1268 {
1269         queue_node *node;
1270
1271         _tbm_surf_queue_mutex_lock();
1272         _tbm_set_last_result(TBM_ERROR_NONE);
1273
1274         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1275                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1276         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1277                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1278
1279         *surface = NULL;
1280
1281         pthread_mutex_lock(&surface_queue->lock);
1282
1283         if (_queue_is_empty(&surface_queue->free_queue)) {
1284                 if (surface_queue->impl && surface_queue->impl->need_attach)
1285                         surface_queue->impl->need_attach(surface_queue);
1286                 else
1287                         _tbm_surface_queue_need_attach(surface_queue);
1288         }
1289
1290         if (surface_queue->impl && surface_queue->impl->dequeue)
1291                 node = surface_queue->impl->dequeue(surface_queue);
1292         else
1293                 node = _tbm_surface_queue_dequeue(surface_queue);
1294
1295         if (node == NULL || node->surface == NULL) {
1296                 TBM_ERR("_queue_node_pop_front failed\n");
1297                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1298                 pthread_mutex_unlock(&surface_queue->lock);
1299
1300                 _tbm_surf_queue_mutex_unlock();
1301                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1302         }
1303
1304         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1305         *surface = node->surface;
1306
1307         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1308
1309         pthread_mutex_unlock(&surface_queue->lock);
1310
1311         _tbm_surf_queue_mutex_unlock();
1312
1313         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1314
1315         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1316
1317         return TBM_SURFACE_QUEUE_ERROR_NONE;
1318 }
1319
1320 tbm_surface_queue_error_e
1321 tbm_surface_queue_can_dequeue_wait_timeout(tbm_surface_queue_h surface_queue, int ms_timeout)
1322 {
1323         int ret;
1324         struct timespec tp;
1325
1326         _tbm_surf_queue_mutex_lock();
1327         _tbm_set_last_result(TBM_ERROR_NONE);
1328
1329         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1330                                                                           TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1331
1332         _tbm_surf_queue_mutex_unlock();
1333
1334         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1335
1336         _tbm_surf_queue_mutex_lock();
1337
1338         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1339                                                                           TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1340
1341         pthread_mutex_lock(&surface_queue->lock);
1342
1343         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1344
1345         if (_queue_is_empty(&surface_queue->free_queue)) {
1346                 if (surface_queue->impl && surface_queue->impl->need_attach)
1347                         surface_queue->impl->need_attach(surface_queue);
1348                 else
1349                         _tbm_surface_queue_need_attach(surface_queue);
1350         }
1351
1352         if (!_queue_is_empty(&surface_queue->free_queue)) {
1353                 pthread_mutex_unlock(&surface_queue->lock);
1354                 _tbm_surf_queue_mutex_unlock();
1355                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1356         }
1357
1358         _tbm_surf_queue_mutex_unlock();
1359
1360         while (1) {
1361                 clock_gettime(CLOCK_MONOTONIC, &tp);
1362
1363                 if (ms_timeout > 1000)
1364                         tp.tv_sec += ms_timeout / 1000;
1365
1366                 tp.tv_nsec += (ms_timeout % 1000) * 1000000;
1367
1368                 if (tp.tv_nsec > 1000000000L) {
1369                         tp.tv_sec++;
1370                         tp.tv_nsec -= 1000000000L;
1371                 }
1372
1373                 ret = pthread_cond_timedwait(&surface_queue->free_cond, &surface_queue->lock, &tp);
1374                 if (ret) {
1375                         if (ret == ETIMEDOUT) {
1376                                 TBM_ERR("surface_queue:%p can dequeue wait timeout", surface_queue);
1377                                 pthread_mutex_unlock(&surface_queue->lock);
1378                                 return TBM_SURFACE_QUEUE_ERROR_TIMEOUT;
1379                         } else {
1380                                 TBM_INFO("surface_queue:%p timedwait error retry wait", surface_queue);
1381                         }
1382                 } else {
1383                         if (surface_queue->impl && surface_queue->impl->need_attach)
1384                                 surface_queue->impl->need_attach(surface_queue);
1385                         else
1386                                 _tbm_surface_queue_need_attach(surface_queue);
1387
1388                         if (!_queue_is_empty(&surface_queue->free_queue)) {
1389                                 pthread_mutex_unlock(&surface_queue->lock);
1390                                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1391                         }
1392                 }
1393         }
1394 }
1395
1396 int
1397 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1398 {
1399         _tbm_surf_queue_mutex_lock();
1400         _tbm_set_last_result(TBM_ERROR_NONE);
1401
1402         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1403
1404         _tbm_surf_queue_mutex_unlock();
1405
1406         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1407
1408         _tbm_surf_queue_mutex_lock();
1409
1410         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1411
1412         pthread_mutex_lock(&surface_queue->lock);
1413
1414         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1415
1416         if (_queue_is_empty(&surface_queue->free_queue)) {
1417                 if (surface_queue->impl && surface_queue->impl->need_attach)
1418                         surface_queue->impl->need_attach(surface_queue);
1419                 else
1420                         _tbm_surface_queue_need_attach(surface_queue);
1421         }
1422
1423         if (!_queue_is_empty(&surface_queue->free_queue)) {
1424                 pthread_mutex_unlock(&surface_queue->lock);
1425                 _tbm_surf_queue_mutex_unlock();
1426                 return 1;
1427         }
1428
1429         if (wait) {
1430                 _tbm_surf_queue_mutex_unlock();
1431                 while (1) {
1432                         pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1433
1434                         if (surface_queue->impl && surface_queue->impl->need_attach)
1435                                 surface_queue->impl->need_attach(surface_queue);
1436                         else
1437                                 _tbm_surface_queue_need_attach(surface_queue);
1438
1439                         if (!_queue_is_empty(&surface_queue->free_queue)) {
1440                                 pthread_mutex_unlock(&surface_queue->lock);
1441                                 return 1;
1442                         }
1443                 }
1444         }
1445
1446         pthread_mutex_unlock(&surface_queue->lock);
1447         _tbm_surf_queue_mutex_unlock();
1448         return 0;
1449 }
1450
1451 tbm_surface_queue_error_e
1452 tbm_surface_queue_release(tbm_surface_queue_h
1453                           surface_queue, tbm_surface_h surface)
1454 {
1455         queue_node *node;
1456         int queue_type;
1457
1458         _tbm_surf_queue_mutex_lock();
1459         _tbm_set_last_result(TBM_ERROR_NONE);
1460
1461         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1462                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1463         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1464                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1465
1466         pthread_mutex_lock(&surface_queue->lock);
1467
1468         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1469
1470         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1471         if (node == NULL || queue_type != NODE_LIST) {
1472                 TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1473                         node, queue_type);
1474                 pthread_mutex_unlock(&surface_queue->lock);
1475
1476                 _tbm_surf_queue_mutex_unlock();
1477
1478                 if (!node) {
1479                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1480                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1481                 } else {
1482                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1483                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1484                 }
1485         }
1486
1487         if (node->delete_pending) {
1488                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1489
1490                 _queue_delete_node(surface_queue, node);
1491
1492                 pthread_mutex_unlock(&surface_queue->lock);
1493
1494                 _tbm_surf_queue_mutex_unlock();
1495
1496                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1497
1498                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1499         }
1500
1501         if (surface_queue->queue_size < surface_queue->num_attached) {
1502                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1503
1504                 if (surface_queue->impl && surface_queue->impl->need_detach)
1505                         surface_queue->impl->need_detach(surface_queue, node);
1506                 else
1507                         _tbm_surface_queue_detach(surface_queue, surface);
1508
1509                 pthread_mutex_unlock(&surface_queue->lock);
1510
1511                 _tbm_surf_queue_mutex_unlock();
1512
1513                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1514
1515                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1516         }
1517
1518         if (surface_queue->impl && surface_queue->impl->release)
1519                 surface_queue->impl->release(surface_queue, node);
1520         else
1521                 _tbm_surface_queue_release(surface_queue, node, 1);
1522
1523         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1524                 TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
1525                 _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
1526                 pthread_mutex_unlock(&surface_queue->lock);
1527
1528                 _tbm_surf_queue_mutex_unlock();
1529                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1530         }
1531
1532         node->type = QUEUE_NODE_TYPE_RELEASE;
1533
1534         pthread_mutex_unlock(&surface_queue->lock);
1535         pthread_cond_signal(&surface_queue->free_cond);
1536
1537         _tbm_surf_queue_mutex_unlock();
1538
1539         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1540
1541         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1542
1543         return TBM_SURFACE_QUEUE_ERROR_NONE;
1544 }
1545
1546 tbm_surface_queue_error_e
1547 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1548                         surface_queue, tbm_surface_h surface)
1549 {
1550         queue_node *node;
1551         int queue_type;
1552
1553         _tbm_surf_queue_mutex_lock();
1554         _tbm_set_last_result(TBM_ERROR_NONE);
1555
1556         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1557                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1558         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1559                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1560
1561         pthread_mutex_lock(&surface_queue->lock);
1562
1563         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1564
1565         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1566         if (node == NULL || queue_type != NODE_LIST) {
1567                 TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1568                         node, queue_type);
1569                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1570                 pthread_mutex_unlock(&surface_queue->lock);
1571
1572                 _tbm_surf_queue_mutex_unlock();
1573                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1574         }
1575
1576         if (surface_queue->impl && surface_queue->impl->enqueue)
1577                 surface_queue->impl->enqueue(surface_queue, node);
1578         else
1579                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1580
1581         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1582                 TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
1583                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1584                 pthread_mutex_unlock(&surface_queue->lock);
1585
1586                 _tbm_surf_queue_mutex_unlock();
1587                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1588         }
1589
1590         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1591
1592         pthread_mutex_unlock(&surface_queue->lock);
1593         pthread_cond_signal(&surface_queue->dirty_cond);
1594
1595         _tbm_surf_queue_mutex_unlock();
1596
1597         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1598
1599         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1600
1601         return TBM_SURFACE_QUEUE_ERROR_NONE;
1602 }
1603
1604 tbm_surface_queue_error_e
1605 tbm_surface_queue_acquire(tbm_surface_queue_h
1606                           surface_queue, tbm_surface_h *surface)
1607 {
1608         queue_node *node;
1609
1610         _tbm_surf_queue_mutex_lock();
1611         _tbm_set_last_result(TBM_ERROR_NONE);
1612
1613         *surface = NULL;
1614
1615         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1616                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1617         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1618                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1619
1620         pthread_mutex_lock(&surface_queue->lock);
1621
1622         if (surface_queue->impl && surface_queue->impl->acquire)
1623                 node = surface_queue->impl->acquire(surface_queue);
1624         else
1625                 node = _tbm_surface_queue_acquire(surface_queue);
1626
1627         if (node == NULL || node->surface == NULL) {
1628                 TBM_ERR("_queue_node_pop_front failed\n");
1629                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1630                 pthread_mutex_unlock(&surface_queue->lock);
1631
1632                 _tbm_surf_queue_mutex_unlock();
1633                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1634         }
1635
1636         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1637
1638         *surface = node->surface;
1639
1640         if (surface_queue->acquire_sync_count == 1) {
1641                 tbm_surface_info_s info;
1642                 int ret;
1643
1644                 TBM_ERR("start map surface:%p", *surface);
1645                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1646                 TBM_ERR("end map surface:%p", *surface);
1647                 if (ret == TBM_SURFACE_ERROR_NONE)
1648                         tbm_surface_unmap(*surface);
1649         }
1650
1651         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1652
1653         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1654
1655         pthread_mutex_unlock(&surface_queue->lock);
1656
1657         _tbm_surf_queue_mutex_unlock();
1658
1659         if (b_dump_queue)
1660                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1661
1662         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1663
1664         return TBM_SURFACE_QUEUE_ERROR_NONE;
1665 }
1666
1667 int
1668 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1669 {
1670         _tbm_surf_queue_mutex_lock();
1671         _tbm_set_last_result(TBM_ERROR_NONE);
1672
1673         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1674
1675         pthread_mutex_lock(&surface_queue->lock);
1676
1677         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1678
1679         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1680                 pthread_mutex_unlock(&surface_queue->lock);
1681                 _tbm_surf_queue_mutex_unlock();
1682                 return 1;
1683         }
1684
1685         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1686                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1687                 _tbm_surf_queue_mutex_unlock();
1688                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1689                 pthread_mutex_unlock(&surface_queue->lock);
1690                 return 1;
1691         }
1692
1693         pthread_mutex_unlock(&surface_queue->lock);
1694         _tbm_surf_queue_mutex_unlock();
1695         return 0;
1696 }
1697
1698 void
1699 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1700 {
1701         queue_node *node = NULL, *tmp;
1702
1703         _tbm_surf_queue_mutex_lock();
1704         _tbm_set_last_result(TBM_ERROR_NONE);
1705
1706         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1707
1708         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1709
1710         LIST_DEL(&surface_queue->item_link);
1711
1712         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1713                 _queue_delete_node(surface_queue, node);
1714
1715         if (surface_queue->impl && surface_queue->impl->destroy)
1716                 surface_queue->impl->destroy(surface_queue);
1717
1718         _notify_emit(surface_queue, &surface_queue->destory_noti);
1719
1720         _notify_remove_all(&surface_queue->destory_noti);
1721         _notify_remove_all(&surface_queue->dequeuable_noti);
1722         _notify_remove_all(&surface_queue->dequeue_noti);
1723         _notify_remove_all(&surface_queue->can_dequeue_noti);
1724         _notify_remove_all(&surface_queue->acquirable_noti);
1725         _notify_remove_all(&surface_queue->reset_noti);
1726         _trace_remove_all(&surface_queue->trace_noti);
1727
1728         pthread_mutex_destroy(&surface_queue->lock);
1729
1730         free(surface_queue);
1731
1732         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1733                 _deinit_tbm_surf_queue_bufmgr();
1734
1735         _tbm_surf_queue_mutex_unlock();
1736 }
1737
1738 tbm_surface_queue_error_e
1739 tbm_surface_queue_reset(tbm_surface_queue_h
1740                         surface_queue, int width, int height, int format)
1741 {
1742         queue_node *node = NULL, *tmp;
1743
1744         _tbm_surf_queue_mutex_lock();
1745         _tbm_set_last_result(TBM_ERROR_NONE);
1746
1747         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1748                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1749
1750         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1751
1752         if (width == surface_queue->width && height == surface_queue->height &&
1753                 format == surface_queue->format) {
1754                 _tbm_surf_queue_mutex_unlock();
1755                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1756         }
1757
1758         pthread_mutex_lock(&surface_queue->lock);
1759
1760         surface_queue->width = width;
1761         surface_queue->height = height;
1762         surface_queue->format = format;
1763
1764         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1765                 /* Destory surface and Push to free_queue */
1766                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1767                         _queue_delete_node(surface_queue, node);
1768
1769                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1770                         node->delete_pending = 1;
1771         } else {
1772                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1773                         _queue_delete_node(surface_queue, node);
1774
1775                 _queue_init(&surface_queue->dirty_queue);
1776                 LIST_INITHEAD(&surface_queue->list);
1777         }
1778
1779         /* Reset queue */
1780         _queue_init(&surface_queue->free_queue);
1781
1782         surface_queue->num_attached = 0;
1783
1784         if (surface_queue->impl && surface_queue->impl->reset)
1785                 surface_queue->impl->reset(surface_queue);
1786
1787         pthread_mutex_unlock(&surface_queue->lock);
1788         pthread_cond_signal(&surface_queue->free_cond);
1789
1790         _tbm_surf_queue_mutex_unlock();
1791
1792         _notify_emit(surface_queue, &surface_queue->reset_noti);
1793
1794         return TBM_SURFACE_QUEUE_ERROR_NONE;
1795 }
1796
1797 tbm_surface_queue_error_e
1798 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1799 {
1800         _tbm_surf_queue_mutex_lock();
1801         _tbm_set_last_result(TBM_ERROR_NONE);
1802
1803         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1804                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1805
1806         _tbm_surf_queue_mutex_unlock();
1807
1808         _notify_emit(surface_queue, &surface_queue->reset_noti);
1809
1810         return TBM_SURFACE_QUEUE_ERROR_NONE;
1811 }
1812
1813 tbm_surface_queue_error_e
1814 tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
1815 {
1816         _tbm_surf_queue_mutex_lock();
1817         _tbm_set_last_result(TBM_ERROR_NONE);
1818
1819         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1820                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1821
1822         pthread_mutex_lock(&surface_queue->lock);
1823         pthread_mutex_unlock(&surface_queue->lock);
1824         pthread_cond_signal(&surface_queue->free_cond);
1825
1826         _tbm_surf_queue_mutex_unlock();
1827
1828         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1829
1830         return TBM_SURFACE_QUEUE_ERROR_NONE;
1831 }
1832
1833 tbm_surface_queue_error_e
1834 tbm_surface_queue_set_size(tbm_surface_queue_h
1835                         surface_queue, int queue_size, int flush)
1836 {
1837         queue_node *node = NULL, *tmp;
1838
1839         _tbm_surf_queue_mutex_lock();
1840         _tbm_set_last_result(TBM_ERROR_NONE);
1841
1842         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1843                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1844         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1845                                         TBM_ERROR_INVALID_PARAMETER);
1846
1847         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1848
1849         if ((surface_queue->queue_size == queue_size) && !flush) {
1850                 _tbm_surf_queue_mutex_unlock();
1851                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1852         }
1853
1854         pthread_mutex_lock(&surface_queue->lock);
1855
1856         if (flush) {
1857                 surface_queue->queue_size = queue_size;
1858
1859                 if (surface_queue->num_attached == 0) {
1860                         pthread_mutex_unlock(&surface_queue->lock);
1861                         _tbm_surf_queue_mutex_unlock();
1862                         return TBM_SURFACE_QUEUE_ERROR_NONE;
1863                 }
1864
1865                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1866                         /* Destory surface and Push to free_queue */
1867                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1868                                 _queue_delete_node(surface_queue, node);
1869
1870                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1871                                 node->delete_pending = 1;
1872                 } else {
1873                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1874                                 _queue_delete_node(surface_queue, node);
1875
1876                         _queue_init(&surface_queue->dirty_queue);
1877                         LIST_INITHEAD(&surface_queue->list);
1878                 }
1879
1880                 /* Reset queue */
1881                 _queue_init(&surface_queue->free_queue);
1882
1883                 surface_queue->num_attached = 0;
1884
1885                 if (surface_queue->impl && surface_queue->impl->reset)
1886                         surface_queue->impl->reset(surface_queue);
1887
1888                 pthread_mutex_unlock(&surface_queue->lock);
1889                 pthread_cond_signal(&surface_queue->free_cond);
1890
1891                 _tbm_surf_queue_mutex_unlock();
1892
1893                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1894
1895                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1896         } else {
1897                 if (surface_queue->queue_size > queue_size) {
1898                         int need_del = surface_queue->queue_size - queue_size;
1899
1900                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1901                                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1902
1903                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1904                                         surface_queue->impl->need_detach(surface_queue, node);
1905                                 else
1906                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1907
1908                                 need_del--;
1909                                 if (need_del == 0)
1910                                         break;
1911                         }
1912                 }
1913
1914                 surface_queue->queue_size = queue_size;
1915
1916                 pthread_mutex_unlock(&surface_queue->lock);
1917
1918                 _tbm_surf_queue_mutex_unlock();
1919
1920                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1921         }
1922 }
1923
1924 tbm_surface_queue_error_e
1925 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1926 {
1927         queue_node *node = NULL;
1928
1929         _tbm_surf_queue_mutex_lock();
1930         _tbm_set_last_result(TBM_ERROR_NONE);
1931
1932         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1933                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1934
1935         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1936
1937         if (surface_queue->num_attached == 0) {
1938                 _tbm_surf_queue_mutex_unlock();
1939                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1940         }
1941
1942         pthread_mutex_lock(&surface_queue->lock);
1943
1944         /* Destory surface in free_queue */
1945         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1946                 if (surface_queue->impl && surface_queue->impl->need_detach)
1947                         surface_queue->impl->need_detach(surface_queue, node);
1948                 else
1949                         _tbm_surface_queue_detach(surface_queue, node->surface);
1950         }
1951
1952         /* Reset queue */
1953         _queue_init(&surface_queue->free_queue);
1954
1955         pthread_mutex_unlock(&surface_queue->lock);
1956         _tbm_surf_queue_mutex_unlock();
1957
1958         return TBM_SURFACE_QUEUE_ERROR_NONE;
1959 }
1960
1961 tbm_surface_queue_error_e
1962 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1963 {
1964         queue_node *node = NULL, *tmp;
1965
1966         _tbm_surf_queue_mutex_lock();
1967         _tbm_set_last_result(TBM_ERROR_NONE);
1968
1969         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1970                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1971
1972         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1973
1974         if (surface_queue->num_attached == 0) {
1975                 _tbm_surf_queue_mutex_unlock();
1976                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1977         }
1978
1979         pthread_mutex_lock(&surface_queue->lock);
1980
1981         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1982                 /* Destory surface and Push to free_queue */
1983                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1984                         _queue_delete_node(surface_queue, node);
1985
1986                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1987                         node->delete_pending = 1;
1988         } else {
1989                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1990                         _queue_delete_node(surface_queue, node);
1991
1992                 _queue_init(&surface_queue->dirty_queue);
1993                 LIST_INITHEAD(&surface_queue->list);
1994         }
1995
1996         /* Reset queue */
1997         _queue_init(&surface_queue->free_queue);
1998
1999         surface_queue->num_attached = 0;
2000
2001         if (surface_queue->impl && surface_queue->impl->reset)
2002                 surface_queue->impl->reset(surface_queue);
2003
2004         pthread_mutex_unlock(&surface_queue->lock);
2005         pthread_cond_signal(&surface_queue->free_cond);
2006
2007         _tbm_surf_queue_mutex_unlock();
2008
2009         _notify_emit(surface_queue, &surface_queue->reset_noti);
2010
2011         return TBM_SURFACE_QUEUE_ERROR_NONE;
2012 }
2013
2014 tbm_surface_queue_error_e
2015 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
2016                         tbm_surface_h *surfaces, int *num)
2017 {
2018         queue_node *node = NULL;
2019
2020         _tbm_surf_queue_mutex_lock();
2021         _tbm_set_last_result(TBM_ERROR_NONE);
2022
2023         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2024                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2025         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2026                                TBM_ERROR_INVALID_PARAMETER);
2027
2028         *num = 0;
2029
2030         pthread_mutex_lock(&surface_queue->lock);
2031
2032         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
2033                 if (node->delete_pending) continue;
2034
2035                 if (surfaces)
2036                         surfaces[*num] = node->surface;
2037
2038                 *num = *num + 1;
2039         }
2040
2041         pthread_mutex_unlock(&surface_queue->lock);
2042
2043         _tbm_surf_queue_mutex_unlock();
2044
2045         return TBM_SURFACE_QUEUE_ERROR_NONE;
2046 }
2047
2048 tbm_surface_queue_error_e
2049 tbm_surface_queue_get_acquirable_surfaces(tbm_surface_queue_h surface_queue,
2050                         tbm_surface_h *surfaces, int *num)
2051 {
2052         queue_node *node = NULL;
2053
2054         _tbm_surf_queue_mutex_lock();
2055
2056         *num = 0;
2057
2058         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2059                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2060         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2061                                TBM_ERROR_INVALID_PARAMETER);
2062
2063         pthread_mutex_lock(&surface_queue->lock);
2064
2065         LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head, item_link) {
2066                 if (surfaces)
2067                         surfaces[*num] = node->surface;
2068
2069                 *num = *num + 1;
2070         }
2071
2072         pthread_mutex_unlock(&surface_queue->lock);
2073
2074         _tbm_surf_queue_mutex_unlock();
2075
2076         return TBM_SURFACE_QUEUE_ERROR_NONE;
2077 }
2078
2079 tbm_surface_queue_error_e
2080 tbm_surface_queue_get_trace_surface_num(
2081                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
2082 {
2083         _tbm_surf_queue_mutex_lock();
2084         _tbm_set_last_result(TBM_ERROR_NONE);
2085
2086         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2087                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2088         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2089                                TBM_ERROR_INVALID_PARAMETER);
2090
2091         *num = 0;
2092
2093         pthread_mutex_lock(&surface_queue->lock);
2094
2095         switch (trace) {
2096         case TBM_SURFACE_QUEUE_TRACE_NONE:
2097                 *num = 0;
2098                 break;
2099         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
2100                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2101                 break;
2102         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
2103                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2104                 break;
2105         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
2106                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
2107                 break;
2108         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
2109                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
2110                 break;
2111         default:
2112                 break;
2113         }
2114
2115         pthread_mutex_unlock(&surface_queue->lock);
2116
2117         _tbm_surf_queue_mutex_unlock();
2118
2119         return TBM_SURFACE_QUEUE_ERROR_NONE;
2120 }
2121
2122 static const tbm_surface_queue_interface tbm_queue_default_impl = {
2123         NULL,                           /*__tbm_queue_default_init*/
2124         NULL,                           /*__tbm_queue_default_reset*/
2125         NULL,                           /*__tbm_queue_default_destroy*/
2126         NULL,                           /*__tbm_queue_default_need_attach*/
2127         NULL,                           /*__tbm_queue_default_enqueue*/
2128         NULL,                           /*__tbm_queue_default_release*/
2129         NULL,                           /*__tbm_queue_default_dequeue*/
2130         NULL,                           /*__tbm_queue_default_acquire*/
2131         NULL,                           /*__tbm_queue_default_need_detach*/
2132 };
2133
2134 tbm_surface_queue_h
2135 tbm_surface_queue_create(int queue_size, int width,
2136                          int height, int format, int flags)
2137 {
2138         _tbm_surf_queue_mutex_lock();
2139         _tbm_set_last_result(TBM_ERROR_NONE);
2140
2141         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2142         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2143         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2144         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2145
2146         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2147                                             sizeof(struct _tbm_surface_queue));
2148         if (!surface_queue) {
2149                 TBM_ERR("cannot allocate the surface_queue.\n");
2150                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2151                 _tbm_surf_queue_mutex_unlock();
2152                 return NULL;
2153         }
2154
2155         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2156
2157         _tbm_surface_queue_init(surface_queue,
2158                                 queue_size,
2159                                 width, height, format, flags,
2160                                 &tbm_queue_default_impl, NULL);
2161
2162         _tbm_surf_queue_mutex_unlock();
2163
2164         return surface_queue;
2165 }
2166
2167 typedef struct {
2168         queue dequeue_list;
2169 } tbm_queue_sequence;
2170
2171 static void
2172 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2173 {
2174         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2175
2176         _queue_init(&data->dequeue_list);
2177 }
2178
2179 static void
2180 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2181 {
2182         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2183
2184         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2185                 return;
2186
2187         _queue_init(&data->dequeue_list);
2188 }
2189
2190 static void
2191 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2192 {
2193         free(surface_queue->impl_data);
2194 }
2195
2196 static void
2197 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2198                              queue_node *node)
2199 {
2200         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2201         queue_node *first = NULL;
2202
2203         first = container_of(data->dequeue_list.head.next, first, item_link);
2204         if (first != node) {
2205                 return;
2206         }
2207
2208         node->priv_flags = 0;
2209
2210         _queue_node_pop(&data->dequeue_list, node);
2211         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2212 }
2213
2214 static void
2215 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2216                                 queue_node *node)
2217 {
2218         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2219
2220         if (node->priv_flags) {
2221                 node->priv_flags = 0;
2222                 _queue_node_pop(&data->dequeue_list, node);
2223         }
2224
2225         _tbm_surface_queue_release(surface_queue, node, 1);
2226 }
2227
2228 static queue_node *
2229 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2230                              surface_queue)
2231 {
2232         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2233         queue_node *node;
2234
2235         node = _tbm_surface_queue_dequeue(surface_queue);
2236         if (node) {
2237                 _queue_node_push_back(&data->dequeue_list, node);
2238                 node->priv_flags = 1;
2239         }
2240
2241         return node;
2242 }
2243
2244 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2245         __tbm_queue_sequence_init,
2246         __tbm_queue_sequence_reset,
2247         __tbm_queue_sequence_destroy,
2248         NULL,
2249         __tbm_queue_sequence_enqueue,
2250         __tbm_queue_sequence_release,
2251         __tbm_queue_sequence_dequeue,
2252         NULL,                                   /*__tbm_queue_sequence_acquire*/
2253         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2254 };
2255
2256 tbm_surface_queue_h
2257 tbm_surface_queue_sequence_create(int queue_size, int width,
2258                                   int height, int format, int flags)
2259 {
2260         _tbm_surf_queue_mutex_lock();
2261         _tbm_set_last_result(TBM_ERROR_NONE);
2262
2263         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2264         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2265         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2266         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2267
2268         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2269                                             sizeof(struct _tbm_surface_queue));
2270         if (surface_queue == NULL) {
2271                 TBM_ERR("cannot allocate the surface_queue.\n");
2272                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2273                 _tbm_surf_queue_mutex_unlock();
2274                 return NULL;
2275         }
2276
2277         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2278
2279         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2280                                    sizeof(tbm_queue_sequence));
2281         if (data == NULL) {
2282                 TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
2283                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2284                 free(surface_queue);
2285                 _tbm_surf_queue_mutex_unlock();
2286                 return NULL;
2287         }
2288
2289         _tbm_surface_queue_init(surface_queue,
2290                                 queue_size,
2291                                 width, height, format, flags,
2292                                 &tbm_queue_sequence_impl, data);
2293
2294         _tbm_surf_queue_mutex_unlock();
2295
2296         return surface_queue;
2297 }
2298
2299 tbm_surface_queue_error_e
2300 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2301                                   int modes)
2302 {
2303         _tbm_surf_queue_mutex_lock();
2304         _tbm_set_last_result(TBM_ERROR_NONE);
2305
2306         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2307                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2308
2309         pthread_mutex_lock(&surface_queue->lock);
2310
2311         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2312                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2313         else
2314                 surface_queue->modes |= modes;
2315
2316         pthread_mutex_unlock(&surface_queue->lock);
2317
2318         _tbm_surf_queue_mutex_unlock();
2319
2320         return TBM_SURFACE_QUEUE_ERROR_NONE;
2321 }
2322
2323 tbm_surface_queue_error_e
2324 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2325                                   unsigned int sync_count)
2326 {
2327         int dequeue_num, enqueue_num;
2328
2329         _tbm_surf_queue_mutex_lock();
2330         _tbm_set_last_result(TBM_ERROR_NONE);
2331
2332         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2333                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2334
2335         pthread_mutex_lock(&surface_queue->lock);
2336
2337         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2338         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2339
2340         if (dequeue_num + sync_count == 0)
2341                 surface_queue->acquire_sync_count = enqueue_num;
2342         else
2343                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2344
2345         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2346                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2347
2348         pthread_mutex_unlock(&surface_queue->lock);
2349
2350         _tbm_surf_queue_mutex_unlock();
2351
2352         return TBM_SURFACE_QUEUE_ERROR_NONE;
2353 }