added tbm_surface_queue_set_sync_count
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 #define TBM_QUEUE_DEBUG 0
42
43 #ifdef TRACE
44 #define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
45 #else
46 #define TBM_QUEUE_TRACE(fmt, ...)
47 #endif /* TRACE */
48
49 #if TBM_QUEUE_DEBUG
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
52 #else
53 #define TBM_LOCK()
54 #define TBM_UNLOCK()
55 #endif
56
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
60
61 /* check condition */
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
63         if (!(cond)) {\
64                 TBM_LOG_E("'%s' failed.\n", #cond);\
65                 _tbm_surf_queue_mutex_unlock();\
66                 return;\
67         } \
68 }
69
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
71         if (!(cond)) {\
72                 TBM_LOG_E("'%s' failed.\n", #cond);\
73                 _tbm_surf_queue_mutex_unlock();\
74                 return val;\
75         } \
76 }
77
78 typedef enum _queue_node_type {
79         QUEUE_NODE_TYPE_NONE,
80         QUEUE_NODE_TYPE_DEQUEUE,
81         QUEUE_NODE_TYPE_ENQUEUE,
82         QUEUE_NODE_TYPE_ACQUIRE,
83         QUEUE_NODE_TYPE_RELEASE
84 } Queue_Node_Type;
85
86 typedef struct {
87         struct list_head head;
88         int count;
89 } queue;
90
91 typedef struct {
92         tbm_surface_h surface;
93
94         struct list_head item_link;
95         struct list_head link;
96
97         Queue_Node_Type type;
98
99         unsigned int priv_flags;        /*for each queue*/
100
101         int delete_pending;
102 } queue_node;
103
104 typedef struct {
105         struct list_head link;
106
107         tbm_surface_queue_notify_cb cb;
108         void *data;
109 } queue_notify;
110
111 typedef struct {
112         struct list_head link;
113
114         tbm_surface_queue_trace_cb cb;
115         void *data;
116 } queue_trace;
117
118 typedef struct _tbm_surface_queue_interface {
119         void (*init)(tbm_surface_queue_h queue);
120         void (*reset)(tbm_surface_queue_h queue);
121         void (*destroy)(tbm_surface_queue_h queue);
122         void (*need_attach)(tbm_surface_queue_h queue);
123
124         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
125         void (*release)(tbm_surface_queue_h queue, queue_node *node);
126         queue_node *(*dequeue)(tbm_surface_queue_h queue);
127         queue_node *(*acquire)(tbm_surface_queue_h queue);
128         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
129 } tbm_surface_queue_interface;
130
131 struct _tbm_surface_queue {
132         int width;
133         int height;
134         int format;
135         int queue_size;
136         int num_attached;
137
138         queue free_queue;
139         queue dirty_queue;
140         struct list_head list;
141
142         struct list_head destory_noti;
143         struct list_head dequeuable_noti;
144         struct list_head dequeue_noti;
145         struct list_head can_dequeue_noti;
146         struct list_head acquirable_noti;
147         struct list_head reset_noti;
148         struct list_head trace_noti;
149
150         pthread_mutex_t lock;
151         pthread_cond_t free_cond;
152         pthread_cond_t dirty_cond;
153
154         const tbm_surface_queue_interface *impl;
155         void *impl_data;
156
157         //For external buffer allocation
158         tbm_surface_alloc_cb alloc_cb;
159         tbm_surface_free_cb free_cb;
160         void *alloc_cb_data;
161
162         struct list_head item_link; /* link of surface queue */
163
164         int modes;
165         unsigned int enqueue_sync_count;
166         unsigned int acquire_sync_count;
167 };
168
169 static bool
170 _tbm_surf_queue_mutex_init(void)
171 {
172         static bool tbm_surf_queue_mutex_init = false;
173
174         if (tbm_surf_queue_mutex_init)
175                 return true;
176
177         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
178                 TBM_LOG_E("fail: pthread_mutex_init\n");
179                 return false;
180         }
181
182         tbm_surf_queue_mutex_init = true;
183
184         return true;
185 }
186
187 static void
188 _tbm_surf_queue_mutex_lock(void)
189 {
190         if (!_tbm_surf_queue_mutex_init()) {
191                 TBM_LOG_E("fail: _tbm_surf_queue_mutex_init\n");
192                 return;
193         }
194
195         pthread_mutex_lock(&tbm_surf_queue_lock);
196 }
197
198 static void
199 _tbm_surf_queue_mutex_unlock(void)
200 {
201         pthread_mutex_unlock(&tbm_surf_queue_lock);
202 }
203
204 static void
205 _init_tbm_surf_queue_bufmgr(void)
206 {
207         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
208 }
209
210 static void
211 _deinit_tbm_surf_queue_bufmgr(void)
212 {
213         if (!g_surf_queue_bufmgr)
214                 return;
215
216         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
217         g_surf_queue_bufmgr = NULL;
218 }
219
220 static int
221 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
222 {
223         tbm_surface_queue_h old_data = NULL;
224
225         if (surface_queue == NULL) {
226                 TBM_LOG_E("error: surface_queue is NULL.\n");
227                 return 0;
228         }
229
230         if (g_surf_queue_bufmgr == NULL) {
231                 TBM_LOG_E("error: g_surf_queue_bufmgr is NULL.\n");
232                 return 0;
233         }
234
235         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
236                 TBM_LOG_E("error: surf_queue_list is empty\n");
237                 return 0;
238         }
239
240         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
241                                 item_link) {
242                 if (old_data == surface_queue) {
243                         TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
244                         return 1;
245                 }
246         }
247
248         TBM_LOG_E("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
249
250         return 0;
251 }
252
253 static queue_node *
254 _queue_node_create(void)
255 {
256         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
257
258         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
259
260         return node;
261 }
262
263 static void
264 _queue_node_delete(queue_node *node)
265 {
266         LIST_DEL(&node->item_link);
267         LIST_DEL(&node->link);
268         free(node);
269 }
270
271 static int
272 _queue_is_empty(queue *queue)
273 {
274         if (LIST_IS_EMPTY(&queue->head))
275                 return 1;
276
277         return 0;
278 }
279
280 static void
281 _queue_node_push_back(queue *queue, queue_node *node)
282 {
283         LIST_ADDTAIL(&node->item_link, &queue->head);
284         queue->count++;
285 }
286
287 static void
288 _queue_node_push_front(queue *queue, queue_node *node)
289 {
290         LIST_ADD(&node->item_link, &queue->head);
291         queue->count++;
292 }
293
294 static queue_node *
295 _queue_node_pop_front(queue *queue)
296 {
297         queue_node *node;
298
299         if (!queue->head.next) return NULL;
300         if (!queue->count) return NULL;
301
302         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
303
304         LIST_DELINIT(&node->item_link);
305         queue->count--;
306
307         return node;
308 }
309
310 static queue_node *
311 _queue_node_pop(queue *queue, queue_node *node)
312 {
313         LIST_DELINIT(&node->item_link);
314         queue->count--;
315
316         return node;
317 }
318
319 static queue_node *
320 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
321                 tbm_surface_h surface, int *out_type)
322 {
323         queue_node *node = NULL;
324
325         if (type == 0)
326                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
327         if (out_type)
328                 *out_type = 0;
329
330         if (type & FREE_QUEUE) {
331                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
332                                          item_link) {
333                         if (node->surface == surface) {
334                                 if (out_type)
335                                         *out_type = FREE_QUEUE;
336
337                                 return node;
338                         }
339                 }
340         }
341
342         if (type & DIRTY_QUEUE) {
343                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
344                                          item_link) {
345                         if (node->surface == surface) {
346                                 if (out_type)
347                                         *out_type = DIRTY_QUEUE;
348
349                                 return node;
350                         }
351                 }
352         }
353
354         if (type & NODE_LIST) {
355                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
356                         if (node->surface == surface) {
357                                 if (out_type)
358                                         *out_type = NODE_LIST;
359
360                                 return node;
361                         }
362                 }
363         }
364
365         TBM_LOG_E("fail to get the queue_node.\n");
366
367         return NULL;
368 }
369
370 static void
371 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
372 {
373         if (node->surface) {
374                 if (surface_queue->free_cb) {
375                         surface_queue->free_cb(surface_queue,
376                                         surface_queue->alloc_cb_data,
377                                         node->surface);
378                 }
379
380                 tbm_surface_destroy(node->surface);
381         }
382
383         _queue_node_delete(node);
384 }
385
386 static void
387 _queue_init(queue *queue)
388 {
389         LIST_INITHEAD(&queue->head);
390
391         queue->count = 0;
392 }
393
394 static void
395 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
396             void *data)
397 {
398         TBM_RETURN_IF_FAIL(cb != NULL);
399
400         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
401
402         TBM_RETURN_IF_FAIL(item != NULL);
403
404         LIST_INITHEAD(&item->link);
405         item->cb = cb;
406         item->data = data;
407
408         LIST_ADDTAIL(&item->link, list);
409 }
410
411 static void
412 _notify_remove(struct list_head *list,
413                tbm_surface_queue_notify_cb cb, void *data)
414 {
415         queue_notify *item = NULL, *tmp;
416
417         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
418                 if (item->cb == cb && item->data == data) {
419                         LIST_DEL(&item->link);
420                         free(item);
421                         return;
422                 }
423         }
424
425         TBM_LOG_E("Cannot find notifiy\n");
426 }
427
428 static void
429 _notify_remove_all(struct list_head *list)
430 {
431         queue_notify *item = NULL, *tmp;
432
433         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
434                 LIST_DEL(&item->link);
435                 free(item);
436         }
437 }
438
439 static void
440 _notify_emit(tbm_surface_queue_h surface_queue,
441              struct list_head *list)
442 {
443         queue_notify *item = NULL, *tmp;;
444
445         /*
446                 The item->cb is the outside function of the libtbm.
447                 The tbm user may/can remove the item of the list,
448                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
449         */
450         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
451                 item->cb(surface_queue, item->data);
452 }
453
454 static void
455 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
456             void *data)
457 {
458         TBM_RETURN_IF_FAIL(cb != NULL);
459
460         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
461
462         TBM_RETURN_IF_FAIL(item != NULL);
463
464         LIST_INITHEAD(&item->link);
465         item->cb = cb;
466         item->data = data;
467
468         LIST_ADDTAIL(&item->link, list);
469 }
470
471 static void
472 _trace_remove(struct list_head *list,
473                tbm_surface_queue_trace_cb cb, void *data)
474 {
475         queue_trace *item = NULL, *tmp;
476
477         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
478                 if (item->cb == cb && item->data == data) {
479                         LIST_DEL(&item->link);
480                         free(item);
481                         return;
482                 }
483         }
484
485         TBM_LOG_E("Cannot find notifiy\n");
486 }
487
488 static void
489 _trace_remove_all(struct list_head *list)
490 {
491         queue_trace *item = NULL, *tmp;
492
493         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
494                 LIST_DEL(&item->link);
495                 free(item);
496         }
497 }
498
499 static void
500 _trace_emit(tbm_surface_queue_h surface_queue,
501              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
502 {
503         queue_trace *item = NULL, *tmp;;
504
505         /*
506                 The item->cb is the outside function of the libtbm.
507                 The tbm user may/can remove the item of the list,
508                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
509         */
510         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
511                 item->cb(surface_queue, surface, trace, item->data);
512 }
513
514 static int
515 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
516 {
517         queue_node *node = NULL;
518         int count = 0;
519
520         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
521                 if (node->type == type)
522                         count++;
523         }
524
525         return count;
526 }
527
528 static void
529 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
530                           tbm_surface_h surface)
531 {
532         queue_node *node;
533
534         node = _queue_node_create();
535         TBM_RETURN_IF_FAIL(node != NULL);
536
537         tbm_surface_internal_ref(surface);
538         node->surface = surface;
539
540         LIST_ADDTAIL(&node->link, &surface_queue->list);
541         surface_queue->num_attached++;
542         _queue_node_push_back(&surface_queue->free_queue, node);
543 }
544
545 static void
546 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
547                           tbm_surface_h surface)
548 {
549         queue_node *node;
550         int queue_type;
551
552         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
553         if (node) {
554                 _queue_delete_node(surface_queue, node);
555                 surface_queue->num_attached--;
556         }
557 }
558
559 static void
560 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
561                            queue_node *node, int push_back)
562 {
563         if (push_back)
564                 _queue_node_push_back(&surface_queue->dirty_queue, node);
565         else
566                 _queue_node_push_front(&surface_queue->dirty_queue, node);
567 }
568
569 static queue_node *
570 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
571 {
572         queue_node *node;
573
574         node = _queue_node_pop_front(&surface_queue->free_queue);
575
576         return node;
577 }
578
579 static queue_node *
580 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
581 {
582         queue_node *node;
583
584         if (_queue_is_empty(&surface_queue->dirty_queue))
585                 return NULL;
586
587         node = _queue_node_pop_front(&surface_queue->dirty_queue);
588
589         return node;
590 }
591
592 static void
593 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
594                            queue_node *node, int push_back)
595 {
596         if (push_back)
597                 _queue_node_push_back(&surface_queue->free_queue, node);
598         else
599                 _queue_node_push_front(&surface_queue->free_queue, node);
600 }
601
602 static void
603 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
604                         int queue_size,
605                         int width, int height, int format,
606                         const tbm_surface_queue_interface *impl, void *data)
607 {
608         TBM_RETURN_IF_FAIL(surface_queue != NULL);
609         TBM_RETURN_IF_FAIL(impl != NULL);
610
611         if (!g_surf_queue_bufmgr)
612                 _init_tbm_surf_queue_bufmgr();
613
614         pthread_mutex_init(&surface_queue->lock, NULL);
615         pthread_cond_init(&surface_queue->free_cond, NULL);
616         pthread_cond_init(&surface_queue->dirty_cond, NULL);
617
618         surface_queue->queue_size = queue_size;
619         surface_queue->width = width;
620         surface_queue->height = height;
621         surface_queue->format = format;
622         surface_queue->impl = impl;
623         surface_queue->impl_data = data;
624         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
625
626         _queue_init(&surface_queue->free_queue);
627         _queue_init(&surface_queue->dirty_queue);
628         LIST_INITHEAD(&surface_queue->list);
629
630         LIST_INITHEAD(&surface_queue->destory_noti);
631         LIST_INITHEAD(&surface_queue->dequeuable_noti);
632         LIST_INITHEAD(&surface_queue->dequeue_noti);
633         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
634         LIST_INITHEAD(&surface_queue->acquirable_noti);
635         LIST_INITHEAD(&surface_queue->reset_noti);
636         LIST_INITHEAD(&surface_queue->trace_noti);
637
638         if (surface_queue->impl && surface_queue->impl->init)
639                 surface_queue->impl->init(surface_queue);
640
641         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
642 }
643
644 tbm_surface_queue_error_e
645 tbm_surface_queue_add_destroy_cb(
646         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
647         void *data)
648 {
649         _tbm_surf_queue_mutex_lock();
650
651         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
652                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
653
654         pthread_mutex_lock(&surface_queue->lock);
655
656         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
657
658         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
659
660         pthread_mutex_unlock(&surface_queue->lock);
661
662         _tbm_surf_queue_mutex_unlock();
663
664         return TBM_SURFACE_QUEUE_ERROR_NONE;
665 }
666
667 tbm_surface_queue_error_e
668 tbm_surface_queue_remove_destroy_cb(
669         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
670         void *data)
671 {
672         _tbm_surf_queue_mutex_lock();
673
674         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
675                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
676
677         pthread_mutex_lock(&surface_queue->lock);
678
679         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
680
681         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
682
683         pthread_mutex_unlock(&surface_queue->lock);
684
685         _tbm_surf_queue_mutex_unlock();
686
687         return TBM_SURFACE_QUEUE_ERROR_NONE;
688 }
689
690 tbm_surface_queue_error_e
691 tbm_surface_queue_add_dequeuable_cb(
692         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
693         void *data)
694 {
695         _tbm_surf_queue_mutex_lock();
696
697         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
698                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
699
700         pthread_mutex_lock(&surface_queue->lock);
701
702         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
703
704         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
705
706         pthread_mutex_unlock(&surface_queue->lock);
707
708         _tbm_surf_queue_mutex_unlock();
709
710         return TBM_SURFACE_QUEUE_ERROR_NONE;
711 }
712
713 tbm_surface_queue_error_e
714 tbm_surface_queue_remove_dequeuable_cb(
715         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
716         void *data)
717 {
718         _tbm_surf_queue_mutex_lock();
719
720         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
721                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
722
723         pthread_mutex_lock(&surface_queue->lock);
724
725         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
726
727         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
728
729         pthread_mutex_unlock(&surface_queue->lock);
730
731         _tbm_surf_queue_mutex_unlock();
732
733         return TBM_SURFACE_QUEUE_ERROR_NONE;
734 }
735
736 tbm_surface_queue_error_e
737 tbm_surface_queue_add_dequeue_cb(
738         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
739         void *data)
740 {
741         _tbm_surf_queue_mutex_lock();
742
743         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
744                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
745
746         pthread_mutex_lock(&surface_queue->lock);
747
748         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
749
750         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
751
752         pthread_mutex_unlock(&surface_queue->lock);
753
754         _tbm_surf_queue_mutex_unlock();
755
756         return TBM_SURFACE_QUEUE_ERROR_NONE;
757 }
758
759 tbm_surface_queue_error_e
760 tbm_surface_queue_remove_dequeue_cb(
761         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
762         void *data)
763 {
764         _tbm_surf_queue_mutex_lock();
765
766         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
767                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
768
769         pthread_mutex_lock(&surface_queue->lock);
770
771         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
772
773         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
774
775         pthread_mutex_unlock(&surface_queue->lock);
776
777         _tbm_surf_queue_mutex_unlock();
778
779         return TBM_SURFACE_QUEUE_ERROR_NONE;
780 }
781
782 tbm_surface_queue_error_e
783 tbm_surface_queue_add_can_dequeue_cb(
784         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
785         void *data)
786 {
787         _tbm_surf_queue_mutex_lock();
788
789         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
790                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
791
792         pthread_mutex_lock(&surface_queue->lock);
793
794         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
795
796         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
797
798         pthread_mutex_unlock(&surface_queue->lock);
799
800         _tbm_surf_queue_mutex_unlock();
801
802         return TBM_SURFACE_QUEUE_ERROR_NONE;
803 }
804
805 tbm_surface_queue_error_e
806 tbm_surface_queue_remove_can_dequeue_cb(
807         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
808         void *data)
809 {
810         _tbm_surf_queue_mutex_lock();
811
812         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
813                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
814
815         pthread_mutex_lock(&surface_queue->lock);
816
817         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
818
819         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
820
821         pthread_mutex_unlock(&surface_queue->lock);
822
823         _tbm_surf_queue_mutex_unlock();
824
825         return TBM_SURFACE_QUEUE_ERROR_NONE;
826 }
827
828 tbm_surface_queue_error_e
829 tbm_surface_queue_add_acquirable_cb(
830         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
831         void *data)
832 {
833         _tbm_surf_queue_mutex_lock();
834
835         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
836                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
837
838         pthread_mutex_lock(&surface_queue->lock);
839
840         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
841
842         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
843
844         pthread_mutex_unlock(&surface_queue->lock);
845
846         _tbm_surf_queue_mutex_unlock();
847
848         return TBM_SURFACE_QUEUE_ERROR_NONE;
849 }
850
851 tbm_surface_queue_error_e
852 tbm_surface_queue_remove_acquirable_cb(
853         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
854         void *data)
855 {
856         _tbm_surf_queue_mutex_lock();
857
858         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
859                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
860
861         pthread_mutex_lock(&surface_queue->lock);
862
863         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
864
865         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
866
867         pthread_mutex_unlock(&surface_queue->lock);
868
869         _tbm_surf_queue_mutex_unlock();
870
871         return TBM_SURFACE_QUEUE_ERROR_NONE;
872 }
873
874 tbm_surface_queue_error_e
875 tbm_surface_queue_add_trace_cb(
876         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
877         void *data)
878 {
879         _tbm_surf_queue_mutex_lock();
880
881         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
882                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
883
884         pthread_mutex_lock(&surface_queue->lock);
885
886         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
887
888         _trace_add(&surface_queue->trace_noti, trace_cb, data);
889
890         pthread_mutex_unlock(&surface_queue->lock);
891
892         _tbm_surf_queue_mutex_unlock();
893
894         return TBM_SURFACE_QUEUE_ERROR_NONE;
895 }
896
897 tbm_surface_queue_error_e
898 tbm_surface_queue_remove_trace_cb(
899         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
900         void *data)
901 {
902         _tbm_surf_queue_mutex_lock();
903
904         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
905                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
906
907         pthread_mutex_lock(&surface_queue->lock);
908
909         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
910
911         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
912
913         pthread_mutex_unlock(&surface_queue->lock);
914
915         _tbm_surf_queue_mutex_unlock();
916
917         return TBM_SURFACE_QUEUE_ERROR_NONE;
918 }
919
920 tbm_surface_queue_error_e
921 tbm_surface_queue_set_alloc_cb(
922         tbm_surface_queue_h surface_queue,
923         tbm_surface_alloc_cb alloc_cb,
924         tbm_surface_free_cb free_cb,
925         void *data)
926 {
927         _tbm_surf_queue_mutex_lock();
928
929         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
930                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
931
932         pthread_mutex_lock(&surface_queue->lock);
933
934         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
935
936         surface_queue->alloc_cb = alloc_cb;
937         surface_queue->free_cb = free_cb;
938         surface_queue->alloc_cb_data = data;
939
940         pthread_mutex_unlock(&surface_queue->lock);
941
942         _tbm_surf_queue_mutex_unlock();
943
944         return TBM_SURFACE_QUEUE_ERROR_NONE;
945 }
946
947 int
948 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
949 {
950         int width;
951
952         _tbm_surf_queue_mutex_lock();
953
954         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
955
956         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
957
958         width = surface_queue->width;
959
960         _tbm_surf_queue_mutex_unlock();
961
962         return width;
963 }
964
965 int
966 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
967 {
968         int height;
969
970         _tbm_surf_queue_mutex_lock();
971
972         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
973
974         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
975
976         height = surface_queue->height;
977
978         _tbm_surf_queue_mutex_unlock();
979
980         return height;
981 }
982
983 int
984 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
985 {
986         int format;
987
988         _tbm_surf_queue_mutex_lock();
989
990         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
991
992         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
993
994         format = surface_queue->format;
995
996         _tbm_surf_queue_mutex_unlock();
997
998         return format;
999 }
1000
1001 int
1002 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1003 {
1004         int queue_size;
1005
1006         _tbm_surf_queue_mutex_lock();
1007
1008         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1009
1010         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1011
1012         queue_size = surface_queue->queue_size;
1013
1014         _tbm_surf_queue_mutex_unlock();
1015
1016         return queue_size;
1017 }
1018
1019 tbm_surface_queue_error_e
1020 tbm_surface_queue_add_reset_cb(
1021         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1022         void *data)
1023 {
1024         _tbm_surf_queue_mutex_lock();
1025
1026         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1027                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1028
1029         pthread_mutex_lock(&surface_queue->lock);
1030
1031         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1032
1033         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1034
1035         pthread_mutex_unlock(&surface_queue->lock);
1036
1037         _tbm_surf_queue_mutex_unlock();
1038
1039         return TBM_SURFACE_QUEUE_ERROR_NONE;
1040 }
1041
1042 tbm_surface_queue_error_e
1043 tbm_surface_queue_remove_reset_cb(
1044         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1045         void *data)
1046 {
1047         _tbm_surf_queue_mutex_lock();
1048
1049         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1050                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1051
1052         pthread_mutex_lock(&surface_queue->lock);
1053
1054         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1055
1056         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1057
1058         pthread_mutex_unlock(&surface_queue->lock);
1059
1060         _tbm_surf_queue_mutex_unlock();
1061
1062         return TBM_SURFACE_QUEUE_ERROR_NONE;
1063 }
1064
1065 tbm_surface_queue_error_e
1066 tbm_surface_queue_enqueue(tbm_surface_queue_h
1067                           surface_queue, tbm_surface_h surface)
1068 {
1069         queue_node *node;
1070         int queue_type;
1071
1072         _tbm_surf_queue_mutex_lock();
1073
1074         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1075                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1076         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1077                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1078
1079         if (b_dump_queue)
1080                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1081
1082         pthread_mutex_lock(&surface_queue->lock);
1083
1084         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1085
1086         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1087         if (node == NULL || queue_type != NODE_LIST) {
1088                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1089                         node, queue_type);
1090                 pthread_mutex_unlock(&surface_queue->lock);
1091
1092                 _tbm_surf_queue_mutex_unlock();
1093
1094                 if (!node)
1095                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1096                 else
1097                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1098         }
1099
1100         if (surface_queue->impl && surface_queue->impl->enqueue)
1101                 surface_queue->impl->enqueue(surface_queue, node);
1102         else
1103                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1104
1105         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1106                 TBM_LOG_E("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1107                 pthread_mutex_unlock(&surface_queue->lock);
1108
1109                 _tbm_surf_queue_mutex_unlock();
1110                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1111         }
1112
1113         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1114
1115         if (surface_queue->enqueue_sync_count == 1) {
1116                 tbm_surface_info_s info;
1117                 int ret;
1118
1119                 TBM_LOG_E("start map surface:%p", surface);
1120                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1121                 TBM_LOG_E("end map surface:%p", surface);
1122                 if (ret == TBM_SURFACE_ERROR_NONE)
1123                         tbm_surface_unmap(surface);
1124         }
1125
1126         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1127
1128         pthread_mutex_unlock(&surface_queue->lock);
1129         pthread_cond_signal(&surface_queue->dirty_cond);
1130
1131         _tbm_surf_queue_mutex_unlock();
1132
1133         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1134
1135         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1136
1137         return TBM_SURFACE_QUEUE_ERROR_NONE;
1138 }
1139
1140 tbm_surface_queue_error_e
1141 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1142                           surface_queue, tbm_surface_h surface)
1143 {
1144         queue_node *node;
1145         int queue_type;
1146
1147         _tbm_surf_queue_mutex_lock();
1148
1149         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1150                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1151         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1152                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1153
1154         pthread_mutex_lock(&surface_queue->lock);
1155
1156         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1157
1158         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1159         if (node == NULL || queue_type != NODE_LIST) {
1160                 TBM_LOG_E("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1161                         node, queue_type);
1162                 pthread_mutex_unlock(&surface_queue->lock);
1163
1164                 _tbm_surf_queue_mutex_unlock();
1165                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1166         }
1167
1168         if (node->delete_pending) {
1169                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1170
1171                 _queue_delete_node(surface_queue, node);
1172
1173                 pthread_mutex_unlock(&surface_queue->lock);
1174
1175                 _tbm_surf_queue_mutex_unlock();
1176
1177                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1178
1179                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1180         }
1181
1182         if (surface_queue->queue_size < surface_queue->num_attached) {
1183                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1184
1185                 if (surface_queue->impl && surface_queue->impl->need_detach)
1186                         surface_queue->impl->need_detach(surface_queue, node);
1187                 else
1188                         _tbm_surface_queue_detach(surface_queue, surface);
1189
1190                 pthread_mutex_unlock(&surface_queue->lock);
1191
1192                 _tbm_surf_queue_mutex_unlock();
1193
1194                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1195
1196                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1197         }
1198
1199         if (surface_queue->impl && surface_queue->impl->release)
1200                 surface_queue->impl->release(surface_queue, node);
1201         else
1202                 _tbm_surface_queue_release(surface_queue, node, 1);
1203
1204         if (_queue_is_empty(&surface_queue->free_queue)) {
1205                 pthread_mutex_unlock(&surface_queue->lock);
1206
1207                 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1208                 _tbm_surf_queue_mutex_unlock();
1209                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1210         }
1211
1212         node->type = QUEUE_NODE_TYPE_RELEASE;
1213
1214         pthread_mutex_unlock(&surface_queue->lock);
1215         pthread_cond_signal(&surface_queue->free_cond);
1216
1217         _tbm_surf_queue_mutex_unlock();
1218
1219         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1220
1221         return TBM_SURFACE_QUEUE_ERROR_NONE;
1222 }
1223
1224 tbm_surface_queue_error_e
1225 tbm_surface_queue_dequeue(tbm_surface_queue_h
1226                           surface_queue, tbm_surface_h *surface)
1227 {
1228         queue_node *node;
1229
1230         _tbm_surf_queue_mutex_lock();
1231
1232         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1233                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1234         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1235                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1236
1237         *surface = NULL;
1238
1239         pthread_mutex_lock(&surface_queue->lock);
1240
1241         if (_queue_is_empty(&surface_queue->free_queue)) {
1242                 if (surface_queue->impl && surface_queue->impl->need_attach)
1243                         surface_queue->impl->need_attach(surface_queue);
1244
1245                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1246                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1247                         pthread_mutex_unlock(&surface_queue->lock);
1248                         _tbm_surf_queue_mutex_unlock();
1249                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1250                 }
1251         }
1252
1253         if (surface_queue->impl && surface_queue->impl->dequeue)
1254                 node = surface_queue->impl->dequeue(surface_queue);
1255         else
1256                 node = _tbm_surface_queue_dequeue(surface_queue);
1257
1258         if (node == NULL || node->surface == NULL) {
1259                 TBM_LOG_E("_queue_node_pop_front failed\n");
1260                 pthread_mutex_unlock(&surface_queue->lock);
1261
1262                 _tbm_surf_queue_mutex_unlock();
1263                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1264         }
1265
1266         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1267         *surface = node->surface;
1268
1269         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1270
1271         pthread_mutex_unlock(&surface_queue->lock);
1272
1273         _tbm_surf_queue_mutex_unlock();
1274
1275         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1276
1277         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1278
1279         return TBM_SURFACE_QUEUE_ERROR_NONE;
1280 }
1281
1282 int
1283 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1284 {
1285         _tbm_surf_queue_mutex_lock();
1286
1287         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1288
1289         _tbm_surf_queue_mutex_unlock();
1290
1291         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1292
1293         _tbm_surf_queue_mutex_lock();
1294
1295         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1296
1297         pthread_mutex_lock(&surface_queue->lock);
1298
1299         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1300
1301         if (_queue_is_empty(&surface_queue->free_queue)) {
1302                 if (surface_queue->impl && surface_queue->impl->need_attach)
1303                         surface_queue->impl->need_attach(surface_queue);
1304
1305                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1306                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1307                         pthread_mutex_unlock(&surface_queue->lock);
1308                         _tbm_surf_queue_mutex_unlock();
1309                         return 0;
1310                 }
1311         }
1312
1313         if (!_queue_is_empty(&surface_queue->free_queue)) {
1314                 pthread_mutex_unlock(&surface_queue->lock);
1315                 _tbm_surf_queue_mutex_unlock();
1316                 return 1;
1317         }
1318
1319         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1320                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1321                 _tbm_surf_queue_mutex_unlock();
1322                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1323                 pthread_mutex_unlock(&surface_queue->lock);
1324                 return 1;
1325         }
1326
1327         pthread_mutex_unlock(&surface_queue->lock);
1328         _tbm_surf_queue_mutex_unlock();
1329         return 0;
1330 }
1331
1332 tbm_surface_queue_error_e
1333 tbm_surface_queue_release(tbm_surface_queue_h
1334                           surface_queue, tbm_surface_h surface)
1335 {
1336         queue_node *node;
1337         int queue_type;
1338
1339         _tbm_surf_queue_mutex_lock();
1340
1341         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1342                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1343         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1344                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1345
1346         pthread_mutex_lock(&surface_queue->lock);
1347
1348         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1349
1350         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1351         if (node == NULL || queue_type != NODE_LIST) {
1352                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1353                         node, queue_type);
1354                 pthread_mutex_unlock(&surface_queue->lock);
1355
1356                 _tbm_surf_queue_mutex_unlock();
1357
1358                 if (!node)
1359                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1360                 else
1361                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1362         }
1363
1364         if (node->delete_pending) {
1365                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1366
1367                 _queue_delete_node(surface_queue, node);
1368
1369                 pthread_mutex_unlock(&surface_queue->lock);
1370
1371                 _tbm_surf_queue_mutex_unlock();
1372
1373                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1374
1375                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1376         }
1377
1378         if (surface_queue->queue_size < surface_queue->num_attached) {
1379                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1380
1381                 if (surface_queue->impl && surface_queue->impl->need_detach)
1382                         surface_queue->impl->need_detach(surface_queue, node);
1383                 else
1384                         _tbm_surface_queue_detach(surface_queue, surface);
1385
1386                 pthread_mutex_unlock(&surface_queue->lock);
1387
1388                 _tbm_surf_queue_mutex_unlock();
1389
1390                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1391
1392                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1393         }
1394
1395         if (surface_queue->impl && surface_queue->impl->release)
1396                 surface_queue->impl->release(surface_queue, node);
1397         else
1398                 _tbm_surface_queue_release(surface_queue, node, 1);
1399
1400         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1401                 TBM_LOG_E("release surface(%p) but surface isn't present in the free_queue\n", surface);
1402                 pthread_mutex_unlock(&surface_queue->lock);
1403
1404                 _tbm_surf_queue_mutex_unlock();
1405                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1406         }
1407
1408         node->type = QUEUE_NODE_TYPE_RELEASE;
1409
1410         pthread_mutex_unlock(&surface_queue->lock);
1411         pthread_cond_signal(&surface_queue->free_cond);
1412
1413         _tbm_surf_queue_mutex_unlock();
1414
1415         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1416
1417         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1418
1419         return TBM_SURFACE_QUEUE_ERROR_NONE;
1420 }
1421
1422 tbm_surface_queue_error_e
1423 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1424                         surface_queue, tbm_surface_h surface)
1425 {
1426         queue_node *node;
1427         int queue_type;
1428
1429         _tbm_surf_queue_mutex_lock();
1430
1431         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1432                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1433         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1434                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1435
1436         pthread_mutex_lock(&surface_queue->lock);
1437
1438         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1439
1440         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1441         if (node == NULL || queue_type != NODE_LIST) {
1442                 TBM_LOG_E("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1443                         node, queue_type);
1444                 pthread_mutex_unlock(&surface_queue->lock);
1445
1446                 _tbm_surf_queue_mutex_unlock();
1447                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1448         }
1449
1450         if (surface_queue->impl && surface_queue->impl->enqueue)
1451                 surface_queue->impl->enqueue(surface_queue, node);
1452         else
1453                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1454
1455         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1456                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1457                 pthread_mutex_unlock(&surface_queue->lock);
1458
1459                 _tbm_surf_queue_mutex_unlock();
1460                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1461         }
1462
1463         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1464
1465         pthread_mutex_unlock(&surface_queue->lock);
1466         pthread_cond_signal(&surface_queue->dirty_cond);
1467
1468         _tbm_surf_queue_mutex_unlock();
1469
1470         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1471
1472         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1473
1474         return TBM_SURFACE_QUEUE_ERROR_NONE;
1475 }
1476
1477 tbm_surface_queue_error_e
1478 tbm_surface_queue_acquire(tbm_surface_queue_h
1479                           surface_queue, tbm_surface_h *surface)
1480 {
1481         queue_node *node;
1482
1483         _tbm_surf_queue_mutex_lock();
1484
1485         *surface = NULL;
1486
1487         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1488                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1489         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1490                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1491
1492         pthread_mutex_lock(&surface_queue->lock);
1493
1494         if (surface_queue->impl && surface_queue->impl->acquire)
1495                 node = surface_queue->impl->acquire(surface_queue);
1496         else
1497                 node = _tbm_surface_queue_acquire(surface_queue);
1498
1499         if (node == NULL || node->surface == NULL) {
1500                 TBM_LOG_E("_queue_node_pop_front failed\n");
1501                 pthread_mutex_unlock(&surface_queue->lock);
1502
1503                 _tbm_surf_queue_mutex_unlock();
1504                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1505         }
1506
1507         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1508
1509         *surface = node->surface;
1510
1511         if (surface_queue->acquire_sync_count == 1) {
1512                 tbm_surface_info_s info;
1513                 int ret;
1514
1515                 TBM_LOG_E("start map surface:%p", *surface);
1516                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1517                 TBM_LOG_E("end map surface:%p", *surface);
1518                 if (ret == TBM_SURFACE_ERROR_NONE)
1519                         tbm_surface_unmap(*surface);
1520         }
1521
1522         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1523
1524         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1525
1526         pthread_mutex_unlock(&surface_queue->lock);
1527
1528         _tbm_surf_queue_mutex_unlock();
1529
1530         if (b_dump_queue)
1531                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1532
1533         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1534
1535         return TBM_SURFACE_QUEUE_ERROR_NONE;
1536 }
1537
1538 int
1539 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1540 {
1541         _tbm_surf_queue_mutex_lock();
1542
1543         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1544
1545         pthread_mutex_lock(&surface_queue->lock);
1546
1547         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1548
1549         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1550                 pthread_mutex_unlock(&surface_queue->lock);
1551                 _tbm_surf_queue_mutex_unlock();
1552                 return 1;
1553         }
1554
1555         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1556                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1557                 _tbm_surf_queue_mutex_unlock();
1558                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1559                 pthread_mutex_unlock(&surface_queue->lock);
1560                 return 1;
1561         }
1562
1563         pthread_mutex_unlock(&surface_queue->lock);
1564         _tbm_surf_queue_mutex_unlock();
1565         return 0;
1566 }
1567
1568 void
1569 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1570 {
1571         queue_node *node = NULL, *tmp;
1572
1573         _tbm_surf_queue_mutex_lock();
1574
1575         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1576
1577         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1578
1579         LIST_DEL(&surface_queue->item_link);
1580
1581         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1582                 _queue_delete_node(surface_queue, node);
1583
1584         if (surface_queue->impl && surface_queue->impl->destroy)
1585                 surface_queue->impl->destroy(surface_queue);
1586
1587         _notify_emit(surface_queue, &surface_queue->destory_noti);
1588
1589         _notify_remove_all(&surface_queue->destory_noti);
1590         _notify_remove_all(&surface_queue->dequeuable_noti);
1591         _notify_remove_all(&surface_queue->dequeue_noti);
1592         _notify_remove_all(&surface_queue->can_dequeue_noti);
1593         _notify_remove_all(&surface_queue->acquirable_noti);
1594         _notify_remove_all(&surface_queue->reset_noti);
1595         _trace_remove_all(&surface_queue->trace_noti);
1596
1597         pthread_mutex_destroy(&surface_queue->lock);
1598
1599         free(surface_queue);
1600
1601         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1602                 _deinit_tbm_surf_queue_bufmgr();
1603
1604         _tbm_surf_queue_mutex_unlock();
1605 }
1606
1607 tbm_surface_queue_error_e
1608 tbm_surface_queue_reset(tbm_surface_queue_h
1609                         surface_queue, int width, int height, int format)
1610 {
1611         queue_node *node = NULL, *tmp;
1612
1613         _tbm_surf_queue_mutex_lock();
1614
1615         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1616                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1617
1618         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1619
1620         if (width == surface_queue->width && height == surface_queue->height &&
1621                 format == surface_queue->format) {
1622                 _tbm_surf_queue_mutex_unlock();
1623                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1624         }
1625
1626         pthread_mutex_lock(&surface_queue->lock);
1627
1628         surface_queue->width = width;
1629         surface_queue->height = height;
1630         surface_queue->format = format;
1631
1632         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1633                 /* Destory surface and Push to free_queue */
1634                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1635                         _queue_delete_node(surface_queue, node);
1636
1637                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1638                         node->delete_pending = 1;
1639         } else {
1640                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1641                         _queue_delete_node(surface_queue, node);
1642
1643                 _queue_init(&surface_queue->dirty_queue);
1644                 LIST_INITHEAD(&surface_queue->list);
1645         }
1646
1647         /* Reset queue */
1648         _queue_init(&surface_queue->free_queue);
1649
1650         surface_queue->num_attached = 0;
1651
1652         if (surface_queue->impl && surface_queue->impl->reset)
1653                 surface_queue->impl->reset(surface_queue);
1654
1655         pthread_mutex_unlock(&surface_queue->lock);
1656         pthread_cond_signal(&surface_queue->free_cond);
1657
1658         _tbm_surf_queue_mutex_unlock();
1659
1660         _notify_emit(surface_queue, &surface_queue->reset_noti);
1661
1662         return TBM_SURFACE_QUEUE_ERROR_NONE;
1663 }
1664
1665 tbm_surface_queue_error_e
1666 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1667 {
1668         _tbm_surf_queue_mutex_lock();
1669
1670         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1671                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1672
1673         _tbm_surf_queue_mutex_unlock();
1674
1675         _notify_emit(surface_queue, &surface_queue->reset_noti);
1676
1677         return TBM_SURFACE_QUEUE_ERROR_NONE;
1678 }
1679
1680 tbm_surface_queue_error_e
1681 tbm_surface_queue_set_size(tbm_surface_queue_h
1682                         surface_queue, int queue_size, int flush)
1683 {
1684         queue_node *node = NULL, *tmp;
1685
1686         _tbm_surf_queue_mutex_lock();
1687
1688         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1689                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1690         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1691                                         TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1692
1693         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1694
1695         if ((surface_queue->queue_size == queue_size) && !flush) {
1696                 _tbm_surf_queue_mutex_unlock();
1697                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1698         }
1699
1700         pthread_mutex_lock(&surface_queue->lock);
1701
1702         if (flush) {
1703                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1704                         /* Destory surface and Push to free_queue */
1705                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1706                                 _queue_delete_node(surface_queue, node);
1707
1708                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1709                                 node->delete_pending = 1;
1710                 } else {
1711                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1712                                 _queue_delete_node(surface_queue, node);
1713
1714                         _queue_init(&surface_queue->dirty_queue);
1715                         LIST_INITHEAD(&surface_queue->list);
1716                 }
1717
1718                 /* Reset queue */
1719                 _queue_init(&surface_queue->free_queue);
1720
1721                 surface_queue->num_attached = 0;
1722                 surface_queue->queue_size = queue_size;
1723
1724                 if (surface_queue->impl && surface_queue->impl->reset)
1725                         surface_queue->impl->reset(surface_queue);
1726
1727                 pthread_mutex_unlock(&surface_queue->lock);
1728                 pthread_cond_signal(&surface_queue->free_cond);
1729
1730                 _tbm_surf_queue_mutex_unlock();
1731
1732                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1733
1734                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1735         } else {
1736                 if (surface_queue->queue_size > queue_size) {
1737                         int need_del = surface_queue->queue_size - queue_size;
1738
1739                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1740                                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1741
1742                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1743                                         surface_queue->impl->need_detach(surface_queue, node);
1744                                 else
1745                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1746
1747                                 need_del--;
1748                                 if (need_del == 0)
1749                                         break;
1750                         }
1751                 }
1752
1753                 surface_queue->queue_size = queue_size;
1754
1755                 pthread_mutex_unlock(&surface_queue->lock);
1756
1757                 _tbm_surf_queue_mutex_unlock();
1758
1759                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1760         }
1761 }
1762
1763 tbm_surface_queue_error_e
1764 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1765 {
1766         queue_node *node = NULL;
1767
1768         _tbm_surf_queue_mutex_lock();
1769
1770         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1771                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1772
1773         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1774
1775         if (surface_queue->num_attached == 0) {
1776                 _tbm_surf_queue_mutex_unlock();
1777                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1778         }
1779
1780         pthread_mutex_lock(&surface_queue->lock);
1781
1782         /* Destory surface in free_queue */
1783         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1784                 if (surface_queue->impl && surface_queue->impl->need_detach)
1785                         surface_queue->impl->need_detach(surface_queue, node);
1786                 else
1787                         _tbm_surface_queue_detach(surface_queue, node->surface);
1788         }
1789
1790         /* Reset queue */
1791         _queue_init(&surface_queue->free_queue);
1792
1793         pthread_mutex_unlock(&surface_queue->lock);
1794         _tbm_surf_queue_mutex_unlock();
1795
1796         return TBM_SURFACE_QUEUE_ERROR_NONE;
1797 }
1798
1799 tbm_surface_queue_error_e
1800 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1801 {
1802         queue_node *node = NULL, *tmp;
1803
1804         _tbm_surf_queue_mutex_lock();
1805
1806         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1807                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1808
1809         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1810
1811         if (surface_queue->num_attached == 0) {
1812                 _tbm_surf_queue_mutex_unlock();
1813                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1814         }
1815
1816         pthread_mutex_lock(&surface_queue->lock);
1817
1818         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1819                 /* Destory surface and Push to free_queue */
1820                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1821                         _queue_delete_node(surface_queue, node);
1822
1823                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1824                         node->delete_pending = 1;
1825         } else {
1826                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1827                         _queue_delete_node(surface_queue, node);
1828
1829                 _queue_init(&surface_queue->dirty_queue);
1830                 LIST_INITHEAD(&surface_queue->list);
1831         }
1832
1833         /* Reset queue */
1834         _queue_init(&surface_queue->free_queue);
1835
1836         surface_queue->num_attached = 0;
1837
1838         if (surface_queue->impl && surface_queue->impl->reset)
1839                 surface_queue->impl->reset(surface_queue);
1840
1841         pthread_mutex_unlock(&surface_queue->lock);
1842         pthread_cond_signal(&surface_queue->free_cond);
1843
1844         _tbm_surf_queue_mutex_unlock();
1845
1846         _notify_emit(surface_queue, &surface_queue->reset_noti);
1847
1848         return TBM_SURFACE_QUEUE_ERROR_NONE;
1849 }
1850
1851 tbm_surface_queue_error_e
1852 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1853                         tbm_surface_h *surfaces, int *num)
1854 {
1855         queue_node *node = NULL;
1856
1857         _tbm_surf_queue_mutex_lock();
1858
1859         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1860                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1861         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1862                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1863
1864         *num = 0;
1865
1866         pthread_mutex_lock(&surface_queue->lock);
1867
1868         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1869                 if (surfaces)
1870                         surfaces[*num] = node->surface;
1871
1872                 *num = *num + 1;
1873         }
1874
1875         pthread_mutex_unlock(&surface_queue->lock);
1876
1877         _tbm_surf_queue_mutex_unlock();
1878
1879         return TBM_SURFACE_QUEUE_ERROR_NONE;
1880 }
1881
1882 tbm_surface_queue_error_e
1883 tbm_surface_queue_get_trace_surface_num(
1884                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1885 {
1886         _tbm_surf_queue_mutex_lock();
1887
1888         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1889                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1890         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1891                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1892
1893         *num = 0;
1894
1895         pthread_mutex_lock(&surface_queue->lock);
1896
1897         switch (trace) {
1898         case TBM_SURFACE_QUEUE_TRACE_NONE:
1899                 *num = 0;
1900                 break;
1901         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1902                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1903                 break;
1904         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1905                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1906                 break;
1907         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1908                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1909                 break;
1910         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1911                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1912                 break;
1913         default:
1914                 break;
1915         }
1916
1917         pthread_mutex_unlock(&surface_queue->lock);
1918
1919         _tbm_surf_queue_mutex_unlock();
1920
1921         return TBM_SURFACE_QUEUE_ERROR_NONE;
1922 }
1923
1924 typedef struct {
1925         int flags;
1926 } tbm_queue_default;
1927
1928 static void
1929 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1930 {
1931         free(surface_queue->impl_data);
1932 }
1933
1934 static void
1935 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1936 {
1937         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1938         tbm_surface_h surface;
1939
1940         if (surface_queue->queue_size == surface_queue->num_attached)
1941                 return;
1942
1943         if (surface_queue->alloc_cb) {
1944                 pthread_mutex_unlock(&surface_queue->lock);
1945                 _tbm_surf_queue_mutex_unlock();
1946                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1947                 _tbm_surf_queue_mutex_lock();
1948                 pthread_mutex_lock(&surface_queue->lock);
1949
1950                 /* silent return */
1951                 if (!surface)
1952                         return;
1953
1954                 tbm_surface_internal_ref(surface);
1955         } else {
1956                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1957                                 surface_queue->height,
1958                                 surface_queue->format,
1959                                 data->flags);
1960                 TBM_RETURN_IF_FAIL(surface != NULL);
1961         }
1962
1963         _tbm_surface_queue_attach(surface_queue, surface);
1964         tbm_surface_internal_unref(surface);
1965 }
1966
1967 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1968         NULL,                           /*__tbm_queue_default_init*/
1969         NULL,                           /*__tbm_queue_default_reset*/
1970         __tbm_queue_default_destroy,
1971         __tbm_queue_default_need_attach,
1972         NULL,                           /*__tbm_queue_default_enqueue*/
1973         NULL,                           /*__tbm_queue_default_release*/
1974         NULL,                           /*__tbm_queue_default_dequeue*/
1975         NULL,                           /*__tbm_queue_default_acquire*/
1976         NULL,                           /*__tbm_queue_default_need_detach*/
1977 };
1978
1979 tbm_surface_queue_h
1980 tbm_surface_queue_create(int queue_size, int width,
1981                          int height, int format, int flags)
1982 {
1983         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1984         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1985         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1986         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1987
1988         _tbm_surf_queue_mutex_lock();
1989
1990         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1991                                             sizeof(struct _tbm_surface_queue));
1992         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1993
1994         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1995
1996         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1997                                   sizeof(tbm_queue_default));
1998         if (data == NULL) {
1999                 TBM_LOG_E("cannot allocate the tbm_queue_default.\n");
2000                 free(surface_queue);
2001                 _tbm_surf_queue_mutex_unlock();
2002                 return NULL;
2003         }
2004
2005         data->flags = flags;
2006         _tbm_surface_queue_init(surface_queue,
2007                                 queue_size,
2008                                 width, height, format,
2009                                 &tbm_queue_default_impl, data);
2010
2011         _tbm_surf_queue_mutex_unlock();
2012
2013         return surface_queue;
2014 }
2015
2016 typedef struct {
2017         int flags;
2018         queue dequeue_list;
2019 } tbm_queue_sequence;
2020
2021 static void
2022 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2023 {
2024         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2025
2026         _queue_init(&data->dequeue_list);
2027 }
2028
2029 static void
2030 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2031 {
2032         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2033
2034         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2035                 return;
2036
2037         _queue_init(&data->dequeue_list);
2038 }
2039
2040 static void
2041 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2042 {
2043         free(surface_queue->impl_data);
2044 }
2045
2046 static void
2047 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2048 {
2049         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2050         tbm_surface_h surface;
2051
2052         if (surface_queue->queue_size == surface_queue->num_attached)
2053                 return;
2054
2055         if (surface_queue->alloc_cb) {
2056                 pthread_mutex_unlock(&surface_queue->lock);
2057                 _tbm_surf_queue_mutex_unlock();
2058                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2059                 _tbm_surf_queue_mutex_lock();
2060                 pthread_mutex_lock(&surface_queue->lock);
2061
2062                 /* silent return */
2063                 if (!surface)
2064                         return;
2065
2066                 tbm_surface_internal_ref(surface);
2067         } else {
2068                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2069                                 surface_queue->height,
2070                                 surface_queue->format,
2071                                 data->flags);
2072                 TBM_RETURN_IF_FAIL(surface != NULL);
2073         }
2074
2075         _tbm_surface_queue_attach(surface_queue, surface);
2076         tbm_surface_internal_unref(surface);
2077 }
2078
2079 static void
2080 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2081                              queue_node *node)
2082 {
2083         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2084         queue_node *first = NULL;
2085
2086         first = container_of(data->dequeue_list.head.next, first, item_link);
2087         if (first != node) {
2088                 return;
2089         }
2090
2091         node->priv_flags = 0;
2092
2093         _queue_node_pop(&data->dequeue_list, node);
2094         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2095 }
2096
2097 static void
2098 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2099                                 queue_node *node)
2100 {
2101         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2102
2103         if (node->priv_flags) {
2104                 node->priv_flags = 0;
2105                 _queue_node_pop(&data->dequeue_list, node);
2106         }
2107
2108         _tbm_surface_queue_release(surface_queue, node, 1);
2109 }
2110
2111 static queue_node *
2112 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2113                              surface_queue)
2114 {
2115         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2116         queue_node *node;
2117
2118         node = _tbm_surface_queue_dequeue(surface_queue);
2119         if (node) {
2120                 _queue_node_push_back(&data->dequeue_list, node);
2121                 node->priv_flags = 1;
2122         }
2123
2124         return node;
2125 }
2126
2127 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2128         __tbm_queue_sequence_init,
2129         __tbm_queue_sequence_reset,
2130         __tbm_queue_sequence_destroy,
2131         __tbm_queue_sequence_need_attach,
2132         __tbm_queue_sequence_enqueue,
2133         __tbm_queue_sequence_release,
2134         __tbm_queue_sequence_dequeue,
2135         NULL,                                   /*__tbm_queue_sequence_acquire*/
2136         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2137 };
2138
2139 tbm_surface_queue_h
2140 tbm_surface_queue_sequence_create(int queue_size, int width,
2141                                   int height, int format, int flags)
2142 {
2143         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2144         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2145         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2146         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2147
2148         _tbm_surf_queue_mutex_lock();
2149
2150         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2151                                             sizeof(struct _tbm_surface_queue));
2152         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2153
2154         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2155
2156         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2157                                    sizeof(tbm_queue_sequence));
2158         if (data == NULL) {
2159                 TBM_LOG_E("cannot allocate the tbm_queue_sequence.\n");
2160                 free(surface_queue);
2161                 _tbm_surf_queue_mutex_unlock();
2162                 return NULL;
2163         }
2164
2165         data->flags = flags;
2166         _tbm_surface_queue_init(surface_queue,
2167                                 queue_size,
2168                                 width, height, format,
2169                                 &tbm_queue_sequence_impl, data);
2170
2171         _tbm_surf_queue_mutex_unlock();
2172
2173         return surface_queue;
2174 }
2175
2176 tbm_surface_queue_error_e
2177 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2178                                   int modes)
2179 {
2180         _tbm_surf_queue_mutex_lock();
2181
2182         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2183                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2184
2185         pthread_mutex_lock(&surface_queue->lock);
2186
2187         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2188                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2189         else
2190                 surface_queue->modes |= modes;
2191
2192         pthread_mutex_unlock(&surface_queue->lock);
2193
2194         _tbm_surf_queue_mutex_unlock();
2195
2196         return TBM_SURFACE_QUEUE_ERROR_NONE;
2197 }
2198
2199 tbm_surface_queue_error_e
2200 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2201                                   unsigned int sync_count)
2202 {
2203         int dequeue_num, enqueue_num;
2204
2205         _tbm_surf_queue_mutex_lock();
2206
2207         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2208                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2209
2210         pthread_mutex_lock(&surface_queue->lock);
2211
2212         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2213         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2214
2215         if (dequeue_num + sync_count == 0)
2216                 surface_queue->acquire_sync_count = enqueue_num;
2217         else
2218                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2219
2220         TBM_QUEUE_TRACE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2221                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2222
2223         pthread_mutex_unlock(&surface_queue->lock);
2224
2225         _tbm_surf_queue_mutex_unlock();
2226
2227         return TBM_SURFACE_QUEUE_ERROR_NONE;
2228 }