tbm_surface_queue: forbid a registration of NULL instead of cb functions
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 #define TBM_QUEUE_DEBUG 0
42
43 #ifdef TRACE
44 #define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
45 #else
46 #define TBM_QUEUE_TRACE(fmt, ...)
47 #endif /* TRACE */
48
49 #if TBM_QUEUE_DEBUG
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
52 #else
53 #define TBM_LOCK()
54 #define TBM_UNLOCK()
55 #endif
56
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
60
61 /* check condition */
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
63         if (!(cond)) {\
64                 TBM_LOG_E("'%s' failed.\n", #cond);\
65                 _tbm_surf_queue_mutex_unlock();\
66                 return;\
67         } \
68 }
69
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
71         if (!(cond)) {\
72                 TBM_LOG_E("'%s' failed.\n", #cond);\
73                 _tbm_surf_queue_mutex_unlock();\
74                 return val;\
75         } \
76 }
77
78 typedef enum _queue_node_type {
79         QUEUE_NODE_TYPE_NONE,
80         QUEUE_NODE_TYPE_DEQUEUE,
81         QUEUE_NODE_TYPE_ENQUEUE,
82         QUEUE_NODE_TYPE_ACQUIRE,
83         QUEUE_NODE_TYPE_RELEASE
84 } Queue_Node_Type;
85
86 typedef struct {
87         struct list_head head;
88         int count;
89 } queue;
90
91 typedef struct {
92         tbm_surface_h surface;
93
94         struct list_head item_link;
95         struct list_head link;
96
97         Queue_Node_Type type;
98
99         unsigned int priv_flags;        /*for each queue*/
100
101         int delete_pending;
102 } queue_node;
103
104 typedef struct {
105         struct list_head link;
106
107         tbm_surface_queue_notify_cb cb;
108         void *data;
109 } queue_notify;
110
111 typedef struct {
112         struct list_head link;
113
114         tbm_surface_queue_trace_cb cb;
115         void *data;
116 } queue_trace;
117
118 typedef struct _tbm_surface_queue_interface {
119         void (*init)(tbm_surface_queue_h queue);
120         void (*reset)(tbm_surface_queue_h queue);
121         void (*destroy)(tbm_surface_queue_h queue);
122         void (*need_attach)(tbm_surface_queue_h queue);
123
124         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
125         void (*release)(tbm_surface_queue_h queue, queue_node *node);
126         queue_node *(*dequeue)(tbm_surface_queue_h queue);
127         queue_node *(*acquire)(tbm_surface_queue_h queue);
128         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
129 } tbm_surface_queue_interface;
130
131 struct _tbm_surface_queue {
132         int width;
133         int height;
134         int format;
135         int queue_size;
136         int num_attached;
137
138         queue free_queue;
139         queue dirty_queue;
140         struct list_head list;
141
142         struct list_head destory_noti;
143         struct list_head dequeuable_noti;
144         struct list_head dequeue_noti;
145         struct list_head can_dequeue_noti;
146         struct list_head acquirable_noti;
147         struct list_head reset_noti;
148         struct list_head trace_noti;
149
150         pthread_mutex_t lock;
151         pthread_cond_t free_cond;
152         pthread_cond_t dirty_cond;
153
154         const tbm_surface_queue_interface *impl;
155         void *impl_data;
156
157         //For external buffer allocation
158         tbm_surface_alloc_cb alloc_cb;
159         tbm_surface_free_cb free_cb;
160         void *alloc_cb_data;
161
162         struct list_head item_link; /* link of surface queue */
163
164         int modes;
165         unsigned int enqueue_sync_count;
166         unsigned int acquire_sync_count;
167 };
168
169 static bool
170 _tbm_surf_queue_mutex_init(void)
171 {
172         static bool tbm_surf_queue_mutex_init = false;
173
174         if (tbm_surf_queue_mutex_init)
175                 return true;
176
177         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
178                 TBM_LOG_E("fail: pthread_mutex_init\n");
179                 return false;
180         }
181
182         tbm_surf_queue_mutex_init = true;
183
184         return true;
185 }
186
187 static void
188 _tbm_surf_queue_mutex_lock(void)
189 {
190         if (!_tbm_surf_queue_mutex_init()) {
191                 TBM_LOG_E("fail: _tbm_surf_queue_mutex_init\n");
192                 return;
193         }
194
195         pthread_mutex_lock(&tbm_surf_queue_lock);
196 }
197
198 static void
199 _tbm_surf_queue_mutex_unlock(void)
200 {
201         pthread_mutex_unlock(&tbm_surf_queue_lock);
202 }
203
204 static void
205 _init_tbm_surf_queue_bufmgr(void)
206 {
207         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
208 }
209
210 static void
211 _deinit_tbm_surf_queue_bufmgr(void)
212 {
213         if (!g_surf_queue_bufmgr)
214                 return;
215
216         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
217         g_surf_queue_bufmgr = NULL;
218 }
219
220 static int
221 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
222 {
223         tbm_surface_queue_h old_data = NULL;
224
225         if (surface_queue == NULL) {
226                 TBM_LOG_E("error: surface_queue is NULL.\n");
227                 return 0;
228         }
229
230         if (g_surf_queue_bufmgr == NULL) {
231                 TBM_LOG_E("error: g_surf_queue_bufmgr is NULL.\n");
232                 return 0;
233         }
234
235         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
236                 TBM_LOG_E("error: surf_queue_list is empty\n");
237                 return 0;
238         }
239
240         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
241                                 item_link) {
242                 if (old_data == surface_queue) {
243                         TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
244                         return 1;
245                 }
246         }
247
248         TBM_LOG_E("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
249
250         return 0;
251 }
252
253 static queue_node *
254 _queue_node_create(void)
255 {
256         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
257
258         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
259
260         return node;
261 }
262
263 static void
264 _queue_node_delete(queue_node *node)
265 {
266         LIST_DEL(&node->item_link);
267         LIST_DEL(&node->link);
268         free(node);
269 }
270
271 static int
272 _queue_is_empty(queue *queue)
273 {
274         if (LIST_IS_EMPTY(&queue->head))
275                 return 1;
276
277         return 0;
278 }
279
280 static void
281 _queue_node_push_back(queue *queue, queue_node *node)
282 {
283         LIST_ADDTAIL(&node->item_link, &queue->head);
284         queue->count++;
285 }
286
287 static void
288 _queue_node_push_front(queue *queue, queue_node *node)
289 {
290         LIST_ADD(&node->item_link, &queue->head);
291         queue->count++;
292 }
293
294 static queue_node *
295 _queue_node_pop_front(queue *queue)
296 {
297         queue_node *node;
298
299         if (!queue->head.next) return NULL;
300         if (!queue->count) return NULL;
301
302         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
303
304         LIST_DELINIT(&node->item_link);
305         queue->count--;
306
307         return node;
308 }
309
310 static queue_node *
311 _queue_node_pop(queue *queue, queue_node *node)
312 {
313         LIST_DELINIT(&node->item_link);
314         queue->count--;
315
316         return node;
317 }
318
319 static queue_node *
320 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
321                 tbm_surface_h surface, int *out_type)
322 {
323         queue_node *node = NULL;
324
325         if (type == 0)
326                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
327         if (out_type)
328                 *out_type = 0;
329
330         if (type & FREE_QUEUE) {
331                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
332                                          item_link) {
333                         if (node->surface == surface) {
334                                 if (out_type)
335                                         *out_type = FREE_QUEUE;
336
337                                 return node;
338                         }
339                 }
340         }
341
342         if (type & DIRTY_QUEUE) {
343                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
344                                          item_link) {
345                         if (node->surface == surface) {
346                                 if (out_type)
347                                         *out_type = DIRTY_QUEUE;
348
349                                 return node;
350                         }
351                 }
352         }
353
354         if (type & NODE_LIST) {
355                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
356                         if (node->surface == surface) {
357                                 if (out_type)
358                                         *out_type = NODE_LIST;
359
360                                 return node;
361                         }
362                 }
363         }
364
365         TBM_LOG_E("fail to get the queue_node.\n");
366
367         return NULL;
368 }
369
370 static void
371 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
372 {
373         if (node->surface) {
374                 if (surface_queue->free_cb) {
375                         surface_queue->free_cb(surface_queue,
376                                         surface_queue->alloc_cb_data,
377                                         node->surface);
378                 }
379
380                 tbm_surface_destroy(node->surface);
381         }
382
383         _queue_node_delete(node);
384 }
385
386 static void
387 _queue_init(queue *queue)
388 {
389         LIST_INITHEAD(&queue->head);
390
391         queue->count = 0;
392 }
393
394 static void
395 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
396             void *data)
397 {
398         TBM_RETURN_IF_FAIL(cb != NULL);
399
400         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
401
402         TBM_RETURN_IF_FAIL(item != NULL);
403
404         LIST_INITHEAD(&item->link);
405         item->cb = cb;
406         item->data = data;
407
408         LIST_ADDTAIL(&item->link, list);
409 }
410
411 static void
412 _notify_remove(struct list_head *list,
413                tbm_surface_queue_notify_cb cb, void *data)
414 {
415         queue_notify *item = NULL, *tmp;
416
417         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
418                 if (item->cb == cb && item->data == data) {
419                         LIST_DEL(&item->link);
420                         free(item);
421                         return;
422                 }
423         }
424
425         TBM_LOG_E("Cannot find notifiy\n");
426 }
427
428 static void
429 _notify_remove_all(struct list_head *list)
430 {
431         queue_notify *item = NULL, *tmp;
432
433         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
434                 LIST_DEL(&item->link);
435                 free(item);
436         }
437 }
438
439 static void
440 _notify_emit(tbm_surface_queue_h surface_queue,
441              struct list_head *list)
442 {
443         queue_notify *item = NULL, *tmp;;
444
445         /*
446                 The item->cb is the outside function of the libtbm.
447                 The tbm user may/can remove the item of the list,
448                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
449         */
450         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
451                 item->cb(surface_queue, item->data);
452 }
453
454 static void
455 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
456             void *data)
457 {
458         TBM_RETURN_IF_FAIL(cb != NULL);
459
460         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
461
462         TBM_RETURN_IF_FAIL(item != NULL);
463
464         LIST_INITHEAD(&item->link);
465         item->cb = cb;
466         item->data = data;
467
468         LIST_ADDTAIL(&item->link, list);
469 }
470
471 static void
472 _trace_remove(struct list_head *list,
473                tbm_surface_queue_trace_cb cb, void *data)
474 {
475         queue_trace *item = NULL, *tmp;
476
477         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
478                 if (item->cb == cb && item->data == data) {
479                         LIST_DEL(&item->link);
480                         free(item);
481                         return;
482                 }
483         }
484
485         TBM_LOG_E("Cannot find notifiy\n");
486 }
487
488 static void
489 _trace_remove_all(struct list_head *list)
490 {
491         queue_trace *item = NULL, *tmp;
492
493         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
494                 LIST_DEL(&item->link);
495                 free(item);
496         }
497 }
498
499 static void
500 _trace_emit(tbm_surface_queue_h surface_queue,
501              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
502 {
503         queue_trace *item = NULL, *tmp;;
504
505         /*
506                 The item->cb is the outside function of the libtbm.
507                 The tbm user may/can remove the item of the list,
508                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
509         */
510         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
511                 item->cb(surface_queue, surface, trace, item->data);
512 }
513
514 static int
515 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
516 {
517         queue_node *node = NULL;
518         int count = 0;
519
520         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
521                 if (node->type == type)
522                         count++;
523         }
524
525         return count;
526 }
527
528 static void
529 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
530                           tbm_surface_h surface)
531 {
532         queue_node *node;
533
534         node = _queue_node_create();
535         TBM_RETURN_IF_FAIL(node != NULL);
536
537         tbm_surface_internal_ref(surface);
538         node->surface = surface;
539
540         LIST_ADDTAIL(&node->link, &surface_queue->list);
541         surface_queue->num_attached++;
542         _queue_node_push_back(&surface_queue->free_queue, node);
543 }
544
545 static void
546 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
547                           tbm_surface_h surface)
548 {
549         queue_node *node;
550         int queue_type;
551
552         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
553         if (node) {
554                 _queue_delete_node(surface_queue, node);
555                 surface_queue->num_attached--;
556         }
557 }
558
559 static void
560 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
561                            queue_node *node, int push_back)
562 {
563         if (push_back)
564                 _queue_node_push_back(&surface_queue->dirty_queue, node);
565         else
566                 _queue_node_push_front(&surface_queue->dirty_queue, node);
567 }
568
569 static queue_node *
570 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
571 {
572         queue_node *node;
573
574         node = _queue_node_pop_front(&surface_queue->free_queue);
575
576         return node;
577 }
578
579 static queue_node *
580 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
581 {
582         queue_node *node;
583
584         if (_queue_is_empty(&surface_queue->dirty_queue))
585                 return NULL;
586
587         node = _queue_node_pop_front(&surface_queue->dirty_queue);
588
589         return node;
590 }
591
592 static void
593 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
594                            queue_node *node, int push_back)
595 {
596         if (push_back)
597                 _queue_node_push_back(&surface_queue->free_queue, node);
598         else
599                 _queue_node_push_front(&surface_queue->free_queue, node);
600 }
601
602 static void
603 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
604                         int queue_size,
605                         int width, int height, int format,
606                         const tbm_surface_queue_interface *impl, void *data)
607 {
608         TBM_RETURN_IF_FAIL(surface_queue != NULL);
609         TBM_RETURN_IF_FAIL(impl != NULL);
610
611         if (!g_surf_queue_bufmgr)
612                 _init_tbm_surf_queue_bufmgr();
613
614         pthread_mutex_init(&surface_queue->lock, NULL);
615         pthread_cond_init(&surface_queue->free_cond, NULL);
616         pthread_cond_init(&surface_queue->dirty_cond, NULL);
617
618         surface_queue->queue_size = queue_size;
619         surface_queue->width = width;
620         surface_queue->height = height;
621         surface_queue->format = format;
622         surface_queue->impl = impl;
623         surface_queue->impl_data = data;
624         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
625
626         _queue_init(&surface_queue->free_queue);
627         _queue_init(&surface_queue->dirty_queue);
628         LIST_INITHEAD(&surface_queue->list);
629
630         LIST_INITHEAD(&surface_queue->destory_noti);
631         LIST_INITHEAD(&surface_queue->dequeuable_noti);
632         LIST_INITHEAD(&surface_queue->dequeue_noti);
633         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
634         LIST_INITHEAD(&surface_queue->acquirable_noti);
635         LIST_INITHEAD(&surface_queue->reset_noti);
636         LIST_INITHEAD(&surface_queue->trace_noti);
637
638         if (surface_queue->impl && surface_queue->impl->init)
639                 surface_queue->impl->init(surface_queue);
640
641         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
642 }
643
644 tbm_surface_queue_error_e
645 tbm_surface_queue_add_destroy_cb(
646         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
647         void *data)
648 {
649         _tbm_surf_queue_mutex_lock();
650
651         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
652                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
653         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
654                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
655
656         pthread_mutex_lock(&surface_queue->lock);
657
658         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
659
660         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
661
662         pthread_mutex_unlock(&surface_queue->lock);
663
664         _tbm_surf_queue_mutex_unlock();
665
666         return TBM_SURFACE_QUEUE_ERROR_NONE;
667 }
668
669 tbm_surface_queue_error_e
670 tbm_surface_queue_remove_destroy_cb(
671         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
672         void *data)
673 {
674         _tbm_surf_queue_mutex_lock();
675
676         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
677                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
678
679         pthread_mutex_lock(&surface_queue->lock);
680
681         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
682
683         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
684
685         pthread_mutex_unlock(&surface_queue->lock);
686
687         _tbm_surf_queue_mutex_unlock();
688
689         return TBM_SURFACE_QUEUE_ERROR_NONE;
690 }
691
692 tbm_surface_queue_error_e
693 tbm_surface_queue_add_dequeuable_cb(
694         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
695         void *data)
696 {
697         _tbm_surf_queue_mutex_lock();
698
699         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
700                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
701         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
702                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
703
704         pthread_mutex_lock(&surface_queue->lock);
705
706         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
707
708         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
709
710         pthread_mutex_unlock(&surface_queue->lock);
711
712         _tbm_surf_queue_mutex_unlock();
713
714         return TBM_SURFACE_QUEUE_ERROR_NONE;
715 }
716
717 tbm_surface_queue_error_e
718 tbm_surface_queue_remove_dequeuable_cb(
719         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
720         void *data)
721 {
722         _tbm_surf_queue_mutex_lock();
723
724         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
725                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
726
727         pthread_mutex_lock(&surface_queue->lock);
728
729         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
730
731         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
732
733         pthread_mutex_unlock(&surface_queue->lock);
734
735         _tbm_surf_queue_mutex_unlock();
736
737         return TBM_SURFACE_QUEUE_ERROR_NONE;
738 }
739
740 tbm_surface_queue_error_e
741 tbm_surface_queue_add_dequeue_cb(
742         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
743         void *data)
744 {
745         _tbm_surf_queue_mutex_lock();
746
747         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
748                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
749         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
750                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
751
752         pthread_mutex_lock(&surface_queue->lock);
753
754         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
755
756         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
757
758         pthread_mutex_unlock(&surface_queue->lock);
759
760         _tbm_surf_queue_mutex_unlock();
761
762         return TBM_SURFACE_QUEUE_ERROR_NONE;
763 }
764
765 tbm_surface_queue_error_e
766 tbm_surface_queue_remove_dequeue_cb(
767         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
768         void *data)
769 {
770         _tbm_surf_queue_mutex_lock();
771
772         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
773                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
774
775         pthread_mutex_lock(&surface_queue->lock);
776
777         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
778
779         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
780
781         pthread_mutex_unlock(&surface_queue->lock);
782
783         _tbm_surf_queue_mutex_unlock();
784
785         return TBM_SURFACE_QUEUE_ERROR_NONE;
786 }
787
788 tbm_surface_queue_error_e
789 tbm_surface_queue_add_can_dequeue_cb(
790         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
791         void *data)
792 {
793         _tbm_surf_queue_mutex_lock();
794
795         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
796                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
797         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
798                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
799
800         pthread_mutex_lock(&surface_queue->lock);
801
802         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
803
804         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
805
806         pthread_mutex_unlock(&surface_queue->lock);
807
808         _tbm_surf_queue_mutex_unlock();
809
810         return TBM_SURFACE_QUEUE_ERROR_NONE;
811 }
812
813 tbm_surface_queue_error_e
814 tbm_surface_queue_remove_can_dequeue_cb(
815         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
816         void *data)
817 {
818         _tbm_surf_queue_mutex_lock();
819
820         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
821                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
822
823         pthread_mutex_lock(&surface_queue->lock);
824
825         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
826
827         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
828
829         pthread_mutex_unlock(&surface_queue->lock);
830
831         _tbm_surf_queue_mutex_unlock();
832
833         return TBM_SURFACE_QUEUE_ERROR_NONE;
834 }
835
836 tbm_surface_queue_error_e
837 tbm_surface_queue_add_acquirable_cb(
838         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
839         void *data)
840 {
841         _tbm_surf_queue_mutex_lock();
842
843         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
844                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
845         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
846                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
847
848         pthread_mutex_lock(&surface_queue->lock);
849
850         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
851
852         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
853
854         pthread_mutex_unlock(&surface_queue->lock);
855
856         _tbm_surf_queue_mutex_unlock();
857
858         return TBM_SURFACE_QUEUE_ERROR_NONE;
859 }
860
861 tbm_surface_queue_error_e
862 tbm_surface_queue_remove_acquirable_cb(
863         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
864         void *data)
865 {
866         _tbm_surf_queue_mutex_lock();
867
868         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
869                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
870
871         pthread_mutex_lock(&surface_queue->lock);
872
873         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
874
875         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
876
877         pthread_mutex_unlock(&surface_queue->lock);
878
879         _tbm_surf_queue_mutex_unlock();
880
881         return TBM_SURFACE_QUEUE_ERROR_NONE;
882 }
883
884 tbm_surface_queue_error_e
885 tbm_surface_queue_add_trace_cb(
886         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
887         void *data)
888 {
889         _tbm_surf_queue_mutex_lock();
890
891         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
892                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
893         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
894                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
895
896         pthread_mutex_lock(&surface_queue->lock);
897
898         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
899
900         _trace_add(&surface_queue->trace_noti, trace_cb, data);
901
902         pthread_mutex_unlock(&surface_queue->lock);
903
904         _tbm_surf_queue_mutex_unlock();
905
906         return TBM_SURFACE_QUEUE_ERROR_NONE;
907 }
908
909 tbm_surface_queue_error_e
910 tbm_surface_queue_remove_trace_cb(
911         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
912         void *data)
913 {
914         _tbm_surf_queue_mutex_lock();
915
916         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
917                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
918
919         pthread_mutex_lock(&surface_queue->lock);
920
921         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
922
923         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
924
925         pthread_mutex_unlock(&surface_queue->lock);
926
927         _tbm_surf_queue_mutex_unlock();
928
929         return TBM_SURFACE_QUEUE_ERROR_NONE;
930 }
931
932 tbm_surface_queue_error_e
933 tbm_surface_queue_set_alloc_cb(
934         tbm_surface_queue_h surface_queue,
935         tbm_surface_alloc_cb alloc_cb,
936         tbm_surface_free_cb free_cb,
937         void *data)
938 {
939         _tbm_surf_queue_mutex_lock();
940
941         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
942                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
943
944         pthread_mutex_lock(&surface_queue->lock);
945
946         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
947
948         surface_queue->alloc_cb = alloc_cb;
949         surface_queue->free_cb = free_cb;
950         surface_queue->alloc_cb_data = data;
951
952         pthread_mutex_unlock(&surface_queue->lock);
953
954         _tbm_surf_queue_mutex_unlock();
955
956         return TBM_SURFACE_QUEUE_ERROR_NONE;
957 }
958
959 int
960 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
961 {
962         int width;
963
964         _tbm_surf_queue_mutex_lock();
965
966         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
967
968         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
969
970         width = surface_queue->width;
971
972         _tbm_surf_queue_mutex_unlock();
973
974         return width;
975 }
976
977 int
978 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
979 {
980         int height;
981
982         _tbm_surf_queue_mutex_lock();
983
984         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
985
986         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
987
988         height = surface_queue->height;
989
990         _tbm_surf_queue_mutex_unlock();
991
992         return height;
993 }
994
995 int
996 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
997 {
998         int format;
999
1000         _tbm_surf_queue_mutex_lock();
1001
1002         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1003
1004         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1005
1006         format = surface_queue->format;
1007
1008         _tbm_surf_queue_mutex_unlock();
1009
1010         return format;
1011 }
1012
1013 int
1014 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1015 {
1016         int queue_size;
1017
1018         _tbm_surf_queue_mutex_lock();
1019
1020         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1021
1022         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1023
1024         queue_size = surface_queue->queue_size;
1025
1026         _tbm_surf_queue_mutex_unlock();
1027
1028         return queue_size;
1029 }
1030
1031 tbm_surface_queue_error_e
1032 tbm_surface_queue_add_reset_cb(
1033         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1034         void *data)
1035 {
1036         _tbm_surf_queue_mutex_lock();
1037
1038         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1039                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1040         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
1041                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1042
1043         pthread_mutex_lock(&surface_queue->lock);
1044
1045         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1046
1047         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1048
1049         pthread_mutex_unlock(&surface_queue->lock);
1050
1051         _tbm_surf_queue_mutex_unlock();
1052
1053         return TBM_SURFACE_QUEUE_ERROR_NONE;
1054 }
1055
1056 tbm_surface_queue_error_e
1057 tbm_surface_queue_remove_reset_cb(
1058         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1059         void *data)
1060 {
1061         _tbm_surf_queue_mutex_lock();
1062
1063         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1064                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1065
1066         pthread_mutex_lock(&surface_queue->lock);
1067
1068         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1069
1070         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1071
1072         pthread_mutex_unlock(&surface_queue->lock);
1073
1074         _tbm_surf_queue_mutex_unlock();
1075
1076         return TBM_SURFACE_QUEUE_ERROR_NONE;
1077 }
1078
1079 tbm_surface_queue_error_e
1080 tbm_surface_queue_enqueue(tbm_surface_queue_h
1081                           surface_queue, tbm_surface_h surface)
1082 {
1083         queue_node *node;
1084         int queue_type;
1085
1086         _tbm_surf_queue_mutex_lock();
1087
1088         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1089                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1090         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1091                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1092
1093         if (b_dump_queue)
1094                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1095
1096         pthread_mutex_lock(&surface_queue->lock);
1097
1098         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1099
1100         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1101         if (node == NULL || queue_type != NODE_LIST) {
1102                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1103                         node, queue_type);
1104                 pthread_mutex_unlock(&surface_queue->lock);
1105
1106                 _tbm_surf_queue_mutex_unlock();
1107
1108                 if (!node)
1109                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1110                 else
1111                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1112         }
1113
1114         if (surface_queue->impl && surface_queue->impl->enqueue)
1115                 surface_queue->impl->enqueue(surface_queue, node);
1116         else
1117                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1118
1119         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1120                 TBM_LOG_E("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1121                 pthread_mutex_unlock(&surface_queue->lock);
1122
1123                 _tbm_surf_queue_mutex_unlock();
1124                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1125         }
1126
1127         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1128
1129         if (surface_queue->enqueue_sync_count == 1) {
1130                 tbm_surface_info_s info;
1131                 int ret;
1132
1133                 TBM_LOG_E("start map surface:%p", surface);
1134                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1135                 TBM_LOG_E("end map surface:%p", surface);
1136                 if (ret == TBM_SURFACE_ERROR_NONE)
1137                         tbm_surface_unmap(surface);
1138         }
1139
1140         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1141
1142         pthread_mutex_unlock(&surface_queue->lock);
1143         pthread_cond_signal(&surface_queue->dirty_cond);
1144
1145         _tbm_surf_queue_mutex_unlock();
1146
1147         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1148
1149         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1150
1151         return TBM_SURFACE_QUEUE_ERROR_NONE;
1152 }
1153
1154 tbm_surface_queue_error_e
1155 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1156                           surface_queue, tbm_surface_h surface)
1157 {
1158         queue_node *node;
1159         int queue_type;
1160
1161         _tbm_surf_queue_mutex_lock();
1162
1163         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1164                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1165         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1166                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1167
1168         pthread_mutex_lock(&surface_queue->lock);
1169
1170         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1171
1172         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1173         if (node == NULL || queue_type != NODE_LIST) {
1174                 TBM_LOG_E("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1175                         node, queue_type);
1176                 pthread_mutex_unlock(&surface_queue->lock);
1177
1178                 _tbm_surf_queue_mutex_unlock();
1179                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1180         }
1181
1182         if (node->delete_pending) {
1183                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1184
1185                 _queue_delete_node(surface_queue, node);
1186
1187                 pthread_mutex_unlock(&surface_queue->lock);
1188
1189                 _tbm_surf_queue_mutex_unlock();
1190
1191                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1192
1193                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1194         }
1195
1196         if (surface_queue->queue_size < surface_queue->num_attached) {
1197                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1198
1199                 if (surface_queue->impl && surface_queue->impl->need_detach)
1200                         surface_queue->impl->need_detach(surface_queue, node);
1201                 else
1202                         _tbm_surface_queue_detach(surface_queue, surface);
1203
1204                 pthread_mutex_unlock(&surface_queue->lock);
1205
1206                 _tbm_surf_queue_mutex_unlock();
1207
1208                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1209
1210                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1211         }
1212
1213         if (surface_queue->impl && surface_queue->impl->release)
1214                 surface_queue->impl->release(surface_queue, node);
1215         else
1216                 _tbm_surface_queue_release(surface_queue, node, 1);
1217
1218         if (_queue_is_empty(&surface_queue->free_queue)) {
1219                 pthread_mutex_unlock(&surface_queue->lock);
1220
1221                 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1222                 _tbm_surf_queue_mutex_unlock();
1223                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1224         }
1225
1226         node->type = QUEUE_NODE_TYPE_RELEASE;
1227
1228         pthread_mutex_unlock(&surface_queue->lock);
1229         pthread_cond_signal(&surface_queue->free_cond);
1230
1231         _tbm_surf_queue_mutex_unlock();
1232
1233         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1234
1235         return TBM_SURFACE_QUEUE_ERROR_NONE;
1236 }
1237
1238 tbm_surface_queue_error_e
1239 tbm_surface_queue_dequeue(tbm_surface_queue_h
1240                           surface_queue, tbm_surface_h *surface)
1241 {
1242         queue_node *node;
1243
1244         _tbm_surf_queue_mutex_lock();
1245
1246         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1247                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1248         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1249                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1250
1251         *surface = NULL;
1252
1253         pthread_mutex_lock(&surface_queue->lock);
1254
1255         if (_queue_is_empty(&surface_queue->free_queue)) {
1256                 if (surface_queue->impl && surface_queue->impl->need_attach)
1257                         surface_queue->impl->need_attach(surface_queue);
1258
1259                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1260                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1261                         pthread_mutex_unlock(&surface_queue->lock);
1262                         _tbm_surf_queue_mutex_unlock();
1263                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1264                 }
1265         }
1266
1267         if (surface_queue->impl && surface_queue->impl->dequeue)
1268                 node = surface_queue->impl->dequeue(surface_queue);
1269         else
1270                 node = _tbm_surface_queue_dequeue(surface_queue);
1271
1272         if (node == NULL || node->surface == NULL) {
1273                 TBM_LOG_E("_queue_node_pop_front failed\n");
1274                 pthread_mutex_unlock(&surface_queue->lock);
1275
1276                 _tbm_surf_queue_mutex_unlock();
1277                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1278         }
1279
1280         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1281         *surface = node->surface;
1282
1283         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1284
1285         pthread_mutex_unlock(&surface_queue->lock);
1286
1287         _tbm_surf_queue_mutex_unlock();
1288
1289         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1290
1291         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1292
1293         return TBM_SURFACE_QUEUE_ERROR_NONE;
1294 }
1295
1296 int
1297 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1298 {
1299         _tbm_surf_queue_mutex_lock();
1300
1301         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1302
1303         _tbm_surf_queue_mutex_unlock();
1304
1305         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1306
1307         _tbm_surf_queue_mutex_lock();
1308
1309         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1310
1311         pthread_mutex_lock(&surface_queue->lock);
1312
1313         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1314
1315         if (_queue_is_empty(&surface_queue->free_queue)) {
1316                 if (surface_queue->impl && surface_queue->impl->need_attach)
1317                         surface_queue->impl->need_attach(surface_queue);
1318
1319                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1320                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1321                         pthread_mutex_unlock(&surface_queue->lock);
1322                         _tbm_surf_queue_mutex_unlock();
1323                         return 0;
1324                 }
1325         }
1326
1327         if (!_queue_is_empty(&surface_queue->free_queue)) {
1328                 pthread_mutex_unlock(&surface_queue->lock);
1329                 _tbm_surf_queue_mutex_unlock();
1330                 return 1;
1331         }
1332
1333         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1334                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1335                 _tbm_surf_queue_mutex_unlock();
1336                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1337                 pthread_mutex_unlock(&surface_queue->lock);
1338                 return 1;
1339         }
1340
1341         pthread_mutex_unlock(&surface_queue->lock);
1342         _tbm_surf_queue_mutex_unlock();
1343         return 0;
1344 }
1345
1346 tbm_surface_queue_error_e
1347 tbm_surface_queue_release(tbm_surface_queue_h
1348                           surface_queue, tbm_surface_h surface)
1349 {
1350         queue_node *node;
1351         int queue_type;
1352
1353         _tbm_surf_queue_mutex_lock();
1354
1355         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1356                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1357         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1358                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1359
1360         pthread_mutex_lock(&surface_queue->lock);
1361
1362         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1363
1364         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1365         if (node == NULL || queue_type != NODE_LIST) {
1366                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1367                         node, queue_type);
1368                 pthread_mutex_unlock(&surface_queue->lock);
1369
1370                 _tbm_surf_queue_mutex_unlock();
1371
1372                 if (!node)
1373                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1374                 else
1375                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1376         }
1377
1378         if (node->delete_pending) {
1379                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1380
1381                 _queue_delete_node(surface_queue, node);
1382
1383                 pthread_mutex_unlock(&surface_queue->lock);
1384
1385                 _tbm_surf_queue_mutex_unlock();
1386
1387                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1388
1389                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1390         }
1391
1392         if (surface_queue->queue_size < surface_queue->num_attached) {
1393                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1394
1395                 if (surface_queue->impl && surface_queue->impl->need_detach)
1396                         surface_queue->impl->need_detach(surface_queue, node);
1397                 else
1398                         _tbm_surface_queue_detach(surface_queue, surface);
1399
1400                 pthread_mutex_unlock(&surface_queue->lock);
1401
1402                 _tbm_surf_queue_mutex_unlock();
1403
1404                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1405
1406                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1407         }
1408
1409         if (surface_queue->impl && surface_queue->impl->release)
1410                 surface_queue->impl->release(surface_queue, node);
1411         else
1412                 _tbm_surface_queue_release(surface_queue, node, 1);
1413
1414         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1415                 TBM_LOG_E("release surface(%p) but surface isn't present in the free_queue\n", surface);
1416                 pthread_mutex_unlock(&surface_queue->lock);
1417
1418                 _tbm_surf_queue_mutex_unlock();
1419                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1420         }
1421
1422         node->type = QUEUE_NODE_TYPE_RELEASE;
1423
1424         pthread_mutex_unlock(&surface_queue->lock);
1425         pthread_cond_signal(&surface_queue->free_cond);
1426
1427         _tbm_surf_queue_mutex_unlock();
1428
1429         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1430
1431         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1432
1433         return TBM_SURFACE_QUEUE_ERROR_NONE;
1434 }
1435
1436 tbm_surface_queue_error_e
1437 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1438                         surface_queue, tbm_surface_h surface)
1439 {
1440         queue_node *node;
1441         int queue_type;
1442
1443         _tbm_surf_queue_mutex_lock();
1444
1445         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1446                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1447         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1448                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1449
1450         pthread_mutex_lock(&surface_queue->lock);
1451
1452         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1453
1454         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1455         if (node == NULL || queue_type != NODE_LIST) {
1456                 TBM_LOG_E("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1457                         node, queue_type);
1458                 pthread_mutex_unlock(&surface_queue->lock);
1459
1460                 _tbm_surf_queue_mutex_unlock();
1461                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1462         }
1463
1464         if (surface_queue->impl && surface_queue->impl->enqueue)
1465                 surface_queue->impl->enqueue(surface_queue, node);
1466         else
1467                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1468
1469         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1470                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1471                 pthread_mutex_unlock(&surface_queue->lock);
1472
1473                 _tbm_surf_queue_mutex_unlock();
1474                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1475         }
1476
1477         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1478
1479         pthread_mutex_unlock(&surface_queue->lock);
1480         pthread_cond_signal(&surface_queue->dirty_cond);
1481
1482         _tbm_surf_queue_mutex_unlock();
1483
1484         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1485
1486         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1487
1488         return TBM_SURFACE_QUEUE_ERROR_NONE;
1489 }
1490
1491 tbm_surface_queue_error_e
1492 tbm_surface_queue_acquire(tbm_surface_queue_h
1493                           surface_queue, tbm_surface_h *surface)
1494 {
1495         queue_node *node;
1496
1497         _tbm_surf_queue_mutex_lock();
1498
1499         *surface = NULL;
1500
1501         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1502                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1503         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1504                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1505
1506         pthread_mutex_lock(&surface_queue->lock);
1507
1508         if (surface_queue->impl && surface_queue->impl->acquire)
1509                 node = surface_queue->impl->acquire(surface_queue);
1510         else
1511                 node = _tbm_surface_queue_acquire(surface_queue);
1512
1513         if (node == NULL || node->surface == NULL) {
1514                 TBM_LOG_E("_queue_node_pop_front failed\n");
1515                 pthread_mutex_unlock(&surface_queue->lock);
1516
1517                 _tbm_surf_queue_mutex_unlock();
1518                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1519         }
1520
1521         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1522
1523         *surface = node->surface;
1524
1525         if (surface_queue->acquire_sync_count == 1) {
1526                 tbm_surface_info_s info;
1527                 int ret;
1528
1529                 TBM_LOG_E("start map surface:%p", *surface);
1530                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1531                 TBM_LOG_E("end map surface:%p", *surface);
1532                 if (ret == TBM_SURFACE_ERROR_NONE)
1533                         tbm_surface_unmap(*surface);
1534         }
1535
1536         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1537
1538         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1539
1540         pthread_mutex_unlock(&surface_queue->lock);
1541
1542         _tbm_surf_queue_mutex_unlock();
1543
1544         if (b_dump_queue)
1545                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1546
1547         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1548
1549         return TBM_SURFACE_QUEUE_ERROR_NONE;
1550 }
1551
1552 int
1553 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1554 {
1555         _tbm_surf_queue_mutex_lock();
1556
1557         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1558
1559         pthread_mutex_lock(&surface_queue->lock);
1560
1561         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1562
1563         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1564                 pthread_mutex_unlock(&surface_queue->lock);
1565                 _tbm_surf_queue_mutex_unlock();
1566                 return 1;
1567         }
1568
1569         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1570                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1571                 _tbm_surf_queue_mutex_unlock();
1572                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1573                 pthread_mutex_unlock(&surface_queue->lock);
1574                 return 1;
1575         }
1576
1577         pthread_mutex_unlock(&surface_queue->lock);
1578         _tbm_surf_queue_mutex_unlock();
1579         return 0;
1580 }
1581
1582 void
1583 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1584 {
1585         queue_node *node = NULL, *tmp;
1586
1587         _tbm_surf_queue_mutex_lock();
1588
1589         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1590
1591         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1592
1593         LIST_DEL(&surface_queue->item_link);
1594
1595         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1596                 _queue_delete_node(surface_queue, node);
1597
1598         if (surface_queue->impl && surface_queue->impl->destroy)
1599                 surface_queue->impl->destroy(surface_queue);
1600
1601         _notify_emit(surface_queue, &surface_queue->destory_noti);
1602
1603         _notify_remove_all(&surface_queue->destory_noti);
1604         _notify_remove_all(&surface_queue->dequeuable_noti);
1605         _notify_remove_all(&surface_queue->dequeue_noti);
1606         _notify_remove_all(&surface_queue->can_dequeue_noti);
1607         _notify_remove_all(&surface_queue->acquirable_noti);
1608         _notify_remove_all(&surface_queue->reset_noti);
1609         _trace_remove_all(&surface_queue->trace_noti);
1610
1611         pthread_mutex_destroy(&surface_queue->lock);
1612
1613         free(surface_queue);
1614
1615         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1616                 _deinit_tbm_surf_queue_bufmgr();
1617
1618         _tbm_surf_queue_mutex_unlock();
1619 }
1620
1621 tbm_surface_queue_error_e
1622 tbm_surface_queue_reset(tbm_surface_queue_h
1623                         surface_queue, int width, int height, int format)
1624 {
1625         queue_node *node = NULL, *tmp;
1626
1627         _tbm_surf_queue_mutex_lock();
1628
1629         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1630                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1631
1632         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1633
1634         if (width == surface_queue->width && height == surface_queue->height &&
1635                 format == surface_queue->format) {
1636                 _tbm_surf_queue_mutex_unlock();
1637                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1638         }
1639
1640         pthread_mutex_lock(&surface_queue->lock);
1641
1642         surface_queue->width = width;
1643         surface_queue->height = height;
1644         surface_queue->format = format;
1645
1646         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1647                 /* Destory surface and Push to free_queue */
1648                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1649                         _queue_delete_node(surface_queue, node);
1650
1651                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1652                         node->delete_pending = 1;
1653         } else {
1654                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1655                         _queue_delete_node(surface_queue, node);
1656
1657                 _queue_init(&surface_queue->dirty_queue);
1658                 LIST_INITHEAD(&surface_queue->list);
1659         }
1660
1661         /* Reset queue */
1662         _queue_init(&surface_queue->free_queue);
1663
1664         surface_queue->num_attached = 0;
1665
1666         if (surface_queue->impl && surface_queue->impl->reset)
1667                 surface_queue->impl->reset(surface_queue);
1668
1669         pthread_mutex_unlock(&surface_queue->lock);
1670         pthread_cond_signal(&surface_queue->free_cond);
1671
1672         _tbm_surf_queue_mutex_unlock();
1673
1674         _notify_emit(surface_queue, &surface_queue->reset_noti);
1675
1676         return TBM_SURFACE_QUEUE_ERROR_NONE;
1677 }
1678
1679 tbm_surface_queue_error_e
1680 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1681 {
1682         _tbm_surf_queue_mutex_lock();
1683
1684         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1685                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1686
1687         _tbm_surf_queue_mutex_unlock();
1688
1689         _notify_emit(surface_queue, &surface_queue->reset_noti);
1690
1691         return TBM_SURFACE_QUEUE_ERROR_NONE;
1692 }
1693
1694 tbm_surface_queue_error_e
1695 tbm_surface_queue_set_size(tbm_surface_queue_h
1696                         surface_queue, int queue_size, int flush)
1697 {
1698         queue_node *node = NULL, *tmp;
1699
1700         _tbm_surf_queue_mutex_lock();
1701
1702         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1703                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1704         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1705                                         TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1706
1707         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1708
1709         if ((surface_queue->queue_size == queue_size) && !flush) {
1710                 _tbm_surf_queue_mutex_unlock();
1711                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1712         }
1713
1714         pthread_mutex_lock(&surface_queue->lock);
1715
1716         if (flush) {
1717                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1718                         /* Destory surface and Push to free_queue */
1719                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1720                                 _queue_delete_node(surface_queue, node);
1721
1722                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1723                                 node->delete_pending = 1;
1724                 } else {
1725                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1726                                 _queue_delete_node(surface_queue, node);
1727
1728                         _queue_init(&surface_queue->dirty_queue);
1729                         LIST_INITHEAD(&surface_queue->list);
1730                 }
1731
1732                 /* Reset queue */
1733                 _queue_init(&surface_queue->free_queue);
1734
1735                 surface_queue->num_attached = 0;
1736                 surface_queue->queue_size = queue_size;
1737
1738                 if (surface_queue->impl && surface_queue->impl->reset)
1739                         surface_queue->impl->reset(surface_queue);
1740
1741                 pthread_mutex_unlock(&surface_queue->lock);
1742                 pthread_cond_signal(&surface_queue->free_cond);
1743
1744                 _tbm_surf_queue_mutex_unlock();
1745
1746                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1747
1748                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1749         } else {
1750                 if (surface_queue->queue_size > queue_size) {
1751                         int need_del = surface_queue->queue_size - queue_size;
1752
1753                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1754                                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1755
1756                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1757                                         surface_queue->impl->need_detach(surface_queue, node);
1758                                 else
1759                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1760
1761                                 need_del--;
1762                                 if (need_del == 0)
1763                                         break;
1764                         }
1765                 }
1766
1767                 surface_queue->queue_size = queue_size;
1768
1769                 pthread_mutex_unlock(&surface_queue->lock);
1770
1771                 _tbm_surf_queue_mutex_unlock();
1772
1773                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1774         }
1775 }
1776
1777 tbm_surface_queue_error_e
1778 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1779 {
1780         queue_node *node = NULL;
1781
1782         _tbm_surf_queue_mutex_lock();
1783
1784         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1785                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1786
1787         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1788
1789         if (surface_queue->num_attached == 0) {
1790                 _tbm_surf_queue_mutex_unlock();
1791                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1792         }
1793
1794         pthread_mutex_lock(&surface_queue->lock);
1795
1796         /* Destory surface in free_queue */
1797         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1798                 if (surface_queue->impl && surface_queue->impl->need_detach)
1799                         surface_queue->impl->need_detach(surface_queue, node);
1800                 else
1801                         _tbm_surface_queue_detach(surface_queue, node->surface);
1802         }
1803
1804         /* Reset queue */
1805         _queue_init(&surface_queue->free_queue);
1806
1807         pthread_mutex_unlock(&surface_queue->lock);
1808         _tbm_surf_queue_mutex_unlock();
1809
1810         return TBM_SURFACE_QUEUE_ERROR_NONE;
1811 }
1812
1813 tbm_surface_queue_error_e
1814 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1815 {
1816         queue_node *node = NULL, *tmp;
1817
1818         _tbm_surf_queue_mutex_lock();
1819
1820         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1821                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1822
1823         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1824
1825         if (surface_queue->num_attached == 0) {
1826                 _tbm_surf_queue_mutex_unlock();
1827                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1828         }
1829
1830         pthread_mutex_lock(&surface_queue->lock);
1831
1832         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1833                 /* Destory surface and Push to free_queue */
1834                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1835                         _queue_delete_node(surface_queue, node);
1836
1837                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1838                         node->delete_pending = 1;
1839         } else {
1840                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1841                         _queue_delete_node(surface_queue, node);
1842
1843                 _queue_init(&surface_queue->dirty_queue);
1844                 LIST_INITHEAD(&surface_queue->list);
1845         }
1846
1847         /* Reset queue */
1848         _queue_init(&surface_queue->free_queue);
1849
1850         surface_queue->num_attached = 0;
1851
1852         if (surface_queue->impl && surface_queue->impl->reset)
1853                 surface_queue->impl->reset(surface_queue);
1854
1855         pthread_mutex_unlock(&surface_queue->lock);
1856         pthread_cond_signal(&surface_queue->free_cond);
1857
1858         _tbm_surf_queue_mutex_unlock();
1859
1860         _notify_emit(surface_queue, &surface_queue->reset_noti);
1861
1862         return TBM_SURFACE_QUEUE_ERROR_NONE;
1863 }
1864
1865 tbm_surface_queue_error_e
1866 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1867                         tbm_surface_h *surfaces, int *num)
1868 {
1869         queue_node *node = NULL;
1870
1871         _tbm_surf_queue_mutex_lock();
1872
1873         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1874                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1875         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1876                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1877
1878         *num = 0;
1879
1880         pthread_mutex_lock(&surface_queue->lock);
1881
1882         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1883                 if (surfaces)
1884                         surfaces[*num] = node->surface;
1885
1886                 *num = *num + 1;
1887         }
1888
1889         pthread_mutex_unlock(&surface_queue->lock);
1890
1891         _tbm_surf_queue_mutex_unlock();
1892
1893         return TBM_SURFACE_QUEUE_ERROR_NONE;
1894 }
1895
1896 tbm_surface_queue_error_e
1897 tbm_surface_queue_get_trace_surface_num(
1898                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1899 {
1900         _tbm_surf_queue_mutex_lock();
1901
1902         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1903                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1904         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1905                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1906
1907         *num = 0;
1908
1909         pthread_mutex_lock(&surface_queue->lock);
1910
1911         switch (trace) {
1912         case TBM_SURFACE_QUEUE_TRACE_NONE:
1913                 *num = 0;
1914                 break;
1915         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1916                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1917                 break;
1918         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1919                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1920                 break;
1921         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1922                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1923                 break;
1924         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1925                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1926                 break;
1927         default:
1928                 break;
1929         }
1930
1931         pthread_mutex_unlock(&surface_queue->lock);
1932
1933         _tbm_surf_queue_mutex_unlock();
1934
1935         return TBM_SURFACE_QUEUE_ERROR_NONE;
1936 }
1937
1938 typedef struct {
1939         int flags;
1940 } tbm_queue_default;
1941
1942 static void
1943 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1944 {
1945         free(surface_queue->impl_data);
1946 }
1947
1948 static void
1949 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1950 {
1951         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1952         tbm_surface_h surface;
1953
1954         if (surface_queue->queue_size == surface_queue->num_attached)
1955                 return;
1956
1957         if (surface_queue->alloc_cb) {
1958                 pthread_mutex_unlock(&surface_queue->lock);
1959                 _tbm_surf_queue_mutex_unlock();
1960                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1961                 _tbm_surf_queue_mutex_lock();
1962                 pthread_mutex_lock(&surface_queue->lock);
1963
1964                 /* silent return */
1965                 if (!surface)
1966                         return;
1967
1968                 tbm_surface_internal_ref(surface);
1969         } else {
1970                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1971                                 surface_queue->height,
1972                                 surface_queue->format,
1973                                 data->flags);
1974                 TBM_RETURN_IF_FAIL(surface != NULL);
1975         }
1976
1977         _tbm_surface_queue_attach(surface_queue, surface);
1978         tbm_surface_internal_unref(surface);
1979 }
1980
1981 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1982         NULL,                           /*__tbm_queue_default_init*/
1983         NULL,                           /*__tbm_queue_default_reset*/
1984         __tbm_queue_default_destroy,
1985         __tbm_queue_default_need_attach,
1986         NULL,                           /*__tbm_queue_default_enqueue*/
1987         NULL,                           /*__tbm_queue_default_release*/
1988         NULL,                           /*__tbm_queue_default_dequeue*/
1989         NULL,                           /*__tbm_queue_default_acquire*/
1990         NULL,                           /*__tbm_queue_default_need_detach*/
1991 };
1992
1993 tbm_surface_queue_h
1994 tbm_surface_queue_create(int queue_size, int width,
1995                          int height, int format, int flags)
1996 {
1997         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1998         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1999         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2000         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2001
2002         _tbm_surf_queue_mutex_lock();
2003
2004         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2005                                             sizeof(struct _tbm_surface_queue));
2006         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2007
2008         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2009
2010         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
2011                                   sizeof(tbm_queue_default));
2012         if (data == NULL) {
2013                 TBM_LOG_E("cannot allocate the tbm_queue_default.\n");
2014                 free(surface_queue);
2015                 _tbm_surf_queue_mutex_unlock();
2016                 return NULL;
2017         }
2018
2019         data->flags = flags;
2020         _tbm_surface_queue_init(surface_queue,
2021                                 queue_size,
2022                                 width, height, format,
2023                                 &tbm_queue_default_impl, data);
2024
2025         _tbm_surf_queue_mutex_unlock();
2026
2027         return surface_queue;
2028 }
2029
2030 typedef struct {
2031         int flags;
2032         queue dequeue_list;
2033 } tbm_queue_sequence;
2034
2035 static void
2036 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2037 {
2038         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2039
2040         _queue_init(&data->dequeue_list);
2041 }
2042
2043 static void
2044 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2045 {
2046         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2047
2048         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2049                 return;
2050
2051         _queue_init(&data->dequeue_list);
2052 }
2053
2054 static void
2055 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2056 {
2057         free(surface_queue->impl_data);
2058 }
2059
2060 static void
2061 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2062 {
2063         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2064         tbm_surface_h surface;
2065
2066         if (surface_queue->queue_size == surface_queue->num_attached)
2067                 return;
2068
2069         if (surface_queue->alloc_cb) {
2070                 pthread_mutex_unlock(&surface_queue->lock);
2071                 _tbm_surf_queue_mutex_unlock();
2072                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2073                 _tbm_surf_queue_mutex_lock();
2074                 pthread_mutex_lock(&surface_queue->lock);
2075
2076                 /* silent return */
2077                 if (!surface)
2078                         return;
2079
2080                 tbm_surface_internal_ref(surface);
2081         } else {
2082                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2083                                 surface_queue->height,
2084                                 surface_queue->format,
2085                                 data->flags);
2086                 TBM_RETURN_IF_FAIL(surface != NULL);
2087         }
2088
2089         _tbm_surface_queue_attach(surface_queue, surface);
2090         tbm_surface_internal_unref(surface);
2091 }
2092
2093 static void
2094 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2095                              queue_node *node)
2096 {
2097         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2098         queue_node *first = NULL;
2099
2100         first = container_of(data->dequeue_list.head.next, first, item_link);
2101         if (first != node) {
2102                 return;
2103         }
2104
2105         node->priv_flags = 0;
2106
2107         _queue_node_pop(&data->dequeue_list, node);
2108         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2109 }
2110
2111 static void
2112 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2113                                 queue_node *node)
2114 {
2115         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2116
2117         if (node->priv_flags) {
2118                 node->priv_flags = 0;
2119                 _queue_node_pop(&data->dequeue_list, node);
2120         }
2121
2122         _tbm_surface_queue_release(surface_queue, node, 1);
2123 }
2124
2125 static queue_node *
2126 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2127                              surface_queue)
2128 {
2129         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2130         queue_node *node;
2131
2132         node = _tbm_surface_queue_dequeue(surface_queue);
2133         if (node) {
2134                 _queue_node_push_back(&data->dequeue_list, node);
2135                 node->priv_flags = 1;
2136         }
2137
2138         return node;
2139 }
2140
2141 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2142         __tbm_queue_sequence_init,
2143         __tbm_queue_sequence_reset,
2144         __tbm_queue_sequence_destroy,
2145         __tbm_queue_sequence_need_attach,
2146         __tbm_queue_sequence_enqueue,
2147         __tbm_queue_sequence_release,
2148         __tbm_queue_sequence_dequeue,
2149         NULL,                                   /*__tbm_queue_sequence_acquire*/
2150         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2151 };
2152
2153 tbm_surface_queue_h
2154 tbm_surface_queue_sequence_create(int queue_size, int width,
2155                                   int height, int format, int flags)
2156 {
2157         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2158         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2159         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2160         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2161
2162         _tbm_surf_queue_mutex_lock();
2163
2164         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2165                                             sizeof(struct _tbm_surface_queue));
2166         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2167
2168         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2169
2170         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2171                                    sizeof(tbm_queue_sequence));
2172         if (data == NULL) {
2173                 TBM_LOG_E("cannot allocate the tbm_queue_sequence.\n");
2174                 free(surface_queue);
2175                 _tbm_surf_queue_mutex_unlock();
2176                 return NULL;
2177         }
2178
2179         data->flags = flags;
2180         _tbm_surface_queue_init(surface_queue,
2181                                 queue_size,
2182                                 width, height, format,
2183                                 &tbm_queue_sequence_impl, data);
2184
2185         _tbm_surf_queue_mutex_unlock();
2186
2187         return surface_queue;
2188 }
2189
2190 tbm_surface_queue_error_e
2191 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2192                                   int modes)
2193 {
2194         _tbm_surf_queue_mutex_lock();
2195
2196         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2197                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2198
2199         pthread_mutex_lock(&surface_queue->lock);
2200
2201         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2202                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2203         else
2204                 surface_queue->modes |= modes;
2205
2206         pthread_mutex_unlock(&surface_queue->lock);
2207
2208         _tbm_surf_queue_mutex_unlock();
2209
2210         return TBM_SURFACE_QUEUE_ERROR_NONE;
2211 }
2212
2213 tbm_surface_queue_error_e
2214 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2215                                   unsigned int sync_count)
2216 {
2217         int dequeue_num, enqueue_num;
2218
2219         _tbm_surf_queue_mutex_lock();
2220
2221         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2222                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2223
2224         pthread_mutex_lock(&surface_queue->lock);
2225
2226         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2227         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2228
2229         if (dequeue_num + sync_count == 0)
2230                 surface_queue->acquire_sync_count = enqueue_num;
2231         else
2232                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2233
2234         TBM_QUEUE_TRACE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2235                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2236
2237         pthread_mutex_unlock(&surface_queue->lock);
2238
2239         _tbm_surf_queue_mutex_unlock();
2240
2241         return TBM_SURFACE_QUEUE_ERROR_NONE;
2242 }