ab2fd2bed0d747c218c51bc8e0094d2c3e6656c7
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 #define TBM_QUEUE_DEBUG 0
42
43 #ifdef TRACE
44 #define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
45 #else
46 #define TBM_QUEUE_TRACE(fmt, ...)
47 #endif /* TRACE */
48
49 #if TBM_QUEUE_DEBUG
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
52 #else
53 #define TBM_LOCK()
54 #define TBM_UNLOCK()
55 #endif
56
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
60
61 /* check condition */
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
63         if (!(cond)) {\
64                 TBM_LOG_E("'%s' failed.\n", #cond);\
65                 _tbm_surf_queue_mutex_unlock();\
66                 return;\
67         } \
68 }
69
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
71         if (!(cond)) {\
72                 TBM_LOG_E("'%s' failed.\n", #cond);\
73                 _tbm_surf_queue_mutex_unlock();\
74                 return val;\
75         } \
76 }
77
78 typedef enum _queue_node_type {
79         QUEUE_NODE_TYPE_NONE,
80         QUEUE_NODE_TYPE_DEQUEUE,
81         QUEUE_NODE_TYPE_ENQUEUE,
82         QUEUE_NODE_TYPE_ACQUIRE,
83         QUEUE_NODE_TYPE_RELEASE
84 } Queue_Node_Type;
85
86 typedef struct {
87         struct list_head head;
88         int count;
89 } queue;
90
91 typedef struct {
92         tbm_surface_h surface;
93
94         struct list_head item_link;
95         struct list_head link;
96
97         Queue_Node_Type type;
98
99         unsigned int priv_flags;        /*for each queue*/
100
101         int delete_pending;
102 } queue_node;
103
104 typedef struct {
105         struct list_head link;
106
107         tbm_surface_queue_notify_cb cb;
108         void *data;
109 } queue_notify;
110
111 typedef struct {
112         struct list_head link;
113
114         tbm_surface_queue_trace_cb cb;
115         void *data;
116 } queue_trace;
117
118 typedef struct _tbm_surface_queue_interface {
119         void (*init)(tbm_surface_queue_h queue);
120         void (*reset)(tbm_surface_queue_h queue);
121         void (*destroy)(tbm_surface_queue_h queue);
122         void (*need_attach)(tbm_surface_queue_h queue);
123
124         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
125         void (*release)(tbm_surface_queue_h queue, queue_node *node);
126         queue_node *(*dequeue)(tbm_surface_queue_h queue);
127         queue_node *(*acquire)(tbm_surface_queue_h queue);
128         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
129 } tbm_surface_queue_interface;
130
131 struct _tbm_surface_queue {
132         int width;
133         int height;
134         int format;
135         int queue_size;
136         int num_attached;
137
138         queue free_queue;
139         queue dirty_queue;
140         struct list_head list;
141
142         struct list_head destory_noti;
143         struct list_head dequeuable_noti;
144         struct list_head dequeue_noti;
145         struct list_head can_dequeue_noti;
146         struct list_head acquirable_noti;
147         struct list_head reset_noti;
148         struct list_head trace_noti;
149
150         pthread_mutex_t lock;
151         pthread_cond_t free_cond;
152         pthread_cond_t dirty_cond;
153
154         const tbm_surface_queue_interface *impl;
155         void *impl_data;
156
157         //For external buffer allocation
158         tbm_surface_alloc_cb alloc_cb;
159         tbm_surface_free_cb free_cb;
160         void *alloc_cb_data;
161
162         struct list_head item_link; /* link of surface queue */
163
164         int modes;
165         unsigned int enqueue_sync_count;
166         unsigned int acquire_sync_count;
167 };
168
169 static bool
170 _tbm_surf_queue_mutex_init(void)
171 {
172         static bool tbm_surf_queue_mutex_init = false;
173
174         if (tbm_surf_queue_mutex_init)
175                 return true;
176
177         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
178                 TBM_LOG_E("fail: pthread_mutex_init\n");
179                 return false;
180         }
181
182         tbm_surf_queue_mutex_init = true;
183
184         return true;
185 }
186
187 static void
188 _tbm_surf_queue_mutex_lock(void)
189 {
190         if (!_tbm_surf_queue_mutex_init()) {
191                 TBM_LOG_E("fail: _tbm_surf_queue_mutex_init\n");
192                 return;
193         }
194
195         pthread_mutex_lock(&tbm_surf_queue_lock);
196 }
197
198 static void
199 _tbm_surf_queue_mutex_unlock(void)
200 {
201         pthread_mutex_unlock(&tbm_surf_queue_lock);
202 }
203
204 static void
205 _init_tbm_surf_queue_bufmgr(void)
206 {
207         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
208 }
209
210 static void
211 _deinit_tbm_surf_queue_bufmgr(void)
212 {
213         if (!g_surf_queue_bufmgr)
214                 return;
215
216         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
217         g_surf_queue_bufmgr = NULL;
218 }
219
220 static int
221 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
222 {
223         tbm_surface_queue_h old_data = NULL;
224
225         if (surface_queue == NULL) {
226                 TBM_LOG_E("error: surface_queue is NULL.\n");
227                 return 0;
228         }
229
230         if (g_surf_queue_bufmgr == NULL) {
231                 TBM_LOG_E("error: g_surf_queue_bufmgr is NULL.\n");
232                 return 0;
233         }
234
235         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
236                 TBM_LOG_E("error: surf_queue_list is empty\n");
237                 return 0;
238         }
239
240         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
241                                 item_link) {
242                 if (old_data == surface_queue) {
243                         TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
244                         return 1;
245                 }
246         }
247
248         TBM_LOG_E("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
249
250         return 0;
251 }
252
253 static queue_node *
254 _queue_node_create(void)
255 {
256         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
257
258         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
259
260         return node;
261 }
262
263 static void
264 _queue_node_delete(queue_node *node)
265 {
266         LIST_DEL(&node->item_link);
267         LIST_DEL(&node->link);
268         free(node);
269 }
270
271 static int
272 _queue_is_empty(queue *queue)
273 {
274         if (LIST_IS_EMPTY(&queue->head))
275                 return 1;
276
277         return 0;
278 }
279
280 static void
281 _queue_node_push_back(queue *queue, queue_node *node)
282 {
283         LIST_ADDTAIL(&node->item_link, &queue->head);
284         queue->count++;
285 }
286
287 static void
288 _queue_node_push_front(queue *queue, queue_node *node)
289 {
290         LIST_ADD(&node->item_link, &queue->head);
291         queue->count++;
292 }
293
294 static queue_node *
295 _queue_node_pop_front(queue *queue)
296 {
297         queue_node *node;
298
299         if (!queue->head.next) return NULL;
300         if (!queue->count) return NULL;
301
302         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
303
304         LIST_DELINIT(&node->item_link);
305         queue->count--;
306
307         return node;
308 }
309
310 static queue_node *
311 _queue_node_pop(queue *queue, queue_node *node)
312 {
313         LIST_DELINIT(&node->item_link);
314         queue->count--;
315
316         return node;
317 }
318
319 static queue_node *
320 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
321                 tbm_surface_h surface, int *out_type)
322 {
323         queue_node *node = NULL;
324
325         if (type == 0)
326                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
327         if (out_type)
328                 *out_type = 0;
329
330         if (type & FREE_QUEUE) {
331                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
332                                          item_link) {
333                         if (node->surface == surface) {
334                                 if (out_type)
335                                         *out_type = FREE_QUEUE;
336
337                                 return node;
338                         }
339                 }
340         }
341
342         if (type & DIRTY_QUEUE) {
343                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
344                                          item_link) {
345                         if (node->surface == surface) {
346                                 if (out_type)
347                                         *out_type = DIRTY_QUEUE;
348
349                                 return node;
350                         }
351                 }
352         }
353
354         if (type & NODE_LIST) {
355                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
356                         if (node->surface == surface) {
357                                 if (out_type)
358                                         *out_type = NODE_LIST;
359
360                                 return node;
361                         }
362                 }
363         }
364
365         TBM_LOG_E("fail to get the queue_node.\n");
366
367         return NULL;
368 }
369
370 static void
371 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
372 {
373         if (node->surface) {
374                 if (surface_queue->free_cb) {
375                         surface_queue->free_cb(surface_queue,
376                                         surface_queue->alloc_cb_data,
377                                         node->surface);
378                 }
379
380                 tbm_surface_destroy(node->surface);
381         }
382
383         _queue_node_delete(node);
384 }
385
386 static void
387 _queue_init(queue *queue)
388 {
389         LIST_INITHEAD(&queue->head);
390
391         queue->count = 0;
392 }
393
394 static void
395 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
396             void *data)
397 {
398         TBM_RETURN_IF_FAIL(cb != NULL);
399
400         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
401
402         TBM_RETURN_IF_FAIL(item != NULL);
403
404         LIST_INITHEAD(&item->link);
405         item->cb = cb;
406         item->data = data;
407
408         LIST_ADDTAIL(&item->link, list);
409 }
410
411 static void
412 _notify_remove(struct list_head *list,
413                tbm_surface_queue_notify_cb cb, void *data)
414 {
415         queue_notify *item = NULL, *tmp;
416
417         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
418                 if (item->cb == cb && item->data == data) {
419                         LIST_DEL(&item->link);
420                         free(item);
421                         return;
422                 }
423         }
424
425         TBM_LOG_E("Cannot find notifiy\n");
426 }
427
428 static void
429 _notify_remove_all(struct list_head *list)
430 {
431         queue_notify *item = NULL, *tmp;
432
433         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
434                 LIST_DEL(&item->link);
435                 free(item);
436         }
437 }
438
439 static void
440 _notify_emit(tbm_surface_queue_h surface_queue,
441              struct list_head *list)
442 {
443         queue_notify *item = NULL, *tmp;;
444
445         /*
446                 The item->cb is the outside function of the libtbm.
447                 The tbm user may/can remove the item of the list,
448                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
449         */
450         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
451                 item->cb(surface_queue, item->data);
452 }
453
454 static void
455 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
456             void *data)
457 {
458         TBM_RETURN_IF_FAIL(cb != NULL);
459
460         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
461
462         TBM_RETURN_IF_FAIL(item != NULL);
463
464         LIST_INITHEAD(&item->link);
465         item->cb = cb;
466         item->data = data;
467
468         LIST_ADDTAIL(&item->link, list);
469 }
470
471 static void
472 _trace_remove(struct list_head *list,
473                tbm_surface_queue_trace_cb cb, void *data)
474 {
475         queue_trace *item = NULL, *tmp;
476
477         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
478                 if (item->cb == cb && item->data == data) {
479                         LIST_DEL(&item->link);
480                         free(item);
481                         return;
482                 }
483         }
484
485         TBM_LOG_E("Cannot find notifiy\n");
486 }
487
488 static void
489 _trace_remove_all(struct list_head *list)
490 {
491         queue_trace *item = NULL, *tmp;
492
493         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
494                 LIST_DEL(&item->link);
495                 free(item);
496         }
497 }
498
499 static void
500 _trace_emit(tbm_surface_queue_h surface_queue,
501              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
502 {
503         queue_trace *item = NULL, *tmp;;
504
505         /*
506                 The item->cb is the outside function of the libtbm.
507                 The tbm user may/can remove the item of the list,
508                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
509         */
510         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
511                 item->cb(surface_queue, surface, trace, item->data);
512 }
513
514 static int
515 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
516 {
517         queue_node *node = NULL;
518         int count = 0;
519
520         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
521                 if (node->type == type)
522                         count++;
523         }
524
525         return count;
526 }
527
528 static void
529 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
530                           tbm_surface_h surface)
531 {
532         queue_node *node;
533
534         node = _queue_node_create();
535         TBM_RETURN_IF_FAIL(node != NULL);
536
537         tbm_surface_internal_ref(surface);
538         node->surface = surface;
539
540         LIST_ADDTAIL(&node->link, &surface_queue->list);
541         surface_queue->num_attached++;
542         _queue_node_push_back(&surface_queue->free_queue, node);
543 }
544
545 static void
546 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
547                           tbm_surface_h surface)
548 {
549         queue_node *node;
550         int queue_type;
551
552         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
553         if (node) {
554                 _queue_delete_node(surface_queue, node);
555                 surface_queue->num_attached--;
556         }
557 }
558
559 static void
560 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
561                            queue_node *node, int push_back)
562 {
563         if (push_back)
564                 _queue_node_push_back(&surface_queue->dirty_queue, node);
565         else
566                 _queue_node_push_front(&surface_queue->dirty_queue, node);
567 }
568
569 static queue_node *
570 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
571 {
572         queue_node *node;
573
574         node = _queue_node_pop_front(&surface_queue->free_queue);
575
576         return node;
577 }
578
579 static queue_node *
580 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
581 {
582         queue_node *node;
583
584         if (_queue_is_empty(&surface_queue->dirty_queue))
585                 return NULL;
586
587         node = _queue_node_pop_front(&surface_queue->dirty_queue);
588
589         return node;
590 }
591
592 static void
593 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
594                            queue_node *node, int push_back)
595 {
596         if (push_back)
597                 _queue_node_push_back(&surface_queue->free_queue, node);
598         else
599                 _queue_node_push_front(&surface_queue->free_queue, node);
600 }
601
602 static void
603 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
604                         int queue_size,
605                         int width, int height, int format,
606                         const tbm_surface_queue_interface *impl, void *data)
607 {
608         TBM_RETURN_IF_FAIL(surface_queue != NULL);
609         TBM_RETURN_IF_FAIL(impl != NULL);
610
611         if (!g_surf_queue_bufmgr)
612                 _init_tbm_surf_queue_bufmgr();
613
614         pthread_mutex_init(&surface_queue->lock, NULL);
615         pthread_cond_init(&surface_queue->free_cond, NULL);
616         pthread_cond_init(&surface_queue->dirty_cond, NULL);
617
618         surface_queue->queue_size = queue_size;
619         surface_queue->width = width;
620         surface_queue->height = height;
621         surface_queue->format = format;
622         surface_queue->impl = impl;
623         surface_queue->impl_data = data;
624         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
625
626         _queue_init(&surface_queue->free_queue);
627         _queue_init(&surface_queue->dirty_queue);
628         LIST_INITHEAD(&surface_queue->list);
629
630         LIST_INITHEAD(&surface_queue->destory_noti);
631         LIST_INITHEAD(&surface_queue->dequeuable_noti);
632         LIST_INITHEAD(&surface_queue->dequeue_noti);
633         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
634         LIST_INITHEAD(&surface_queue->acquirable_noti);
635         LIST_INITHEAD(&surface_queue->reset_noti);
636         LIST_INITHEAD(&surface_queue->trace_noti);
637
638         if (surface_queue->impl && surface_queue->impl->init)
639                 surface_queue->impl->init(surface_queue);
640
641         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
642 }
643
644 tbm_surface_queue_error_e
645 tbm_surface_queue_add_destroy_cb(
646         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
647         void *data)
648 {
649         _tbm_surf_queue_mutex_lock();
650
651         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
652                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
653         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
654                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
655
656         pthread_mutex_lock(&surface_queue->lock);
657
658         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
659
660         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
661
662         pthread_mutex_unlock(&surface_queue->lock);
663
664         _tbm_surf_queue_mutex_unlock();
665
666         return TBM_SURFACE_QUEUE_ERROR_NONE;
667 }
668
669 tbm_surface_queue_error_e
670 tbm_surface_queue_remove_destroy_cb(
671         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
672         void *data)
673 {
674         _tbm_surf_queue_mutex_lock();
675
676         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
677                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
678
679         pthread_mutex_lock(&surface_queue->lock);
680
681         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
682
683         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
684
685         pthread_mutex_unlock(&surface_queue->lock);
686
687         _tbm_surf_queue_mutex_unlock();
688
689         return TBM_SURFACE_QUEUE_ERROR_NONE;
690 }
691
692 tbm_surface_queue_error_e
693 tbm_surface_queue_add_dequeuable_cb(
694         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
695         void *data)
696 {
697         _tbm_surf_queue_mutex_lock();
698
699         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
700                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
701         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
702                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
703
704         pthread_mutex_lock(&surface_queue->lock);
705
706         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
707
708         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
709
710         pthread_mutex_unlock(&surface_queue->lock);
711
712         _tbm_surf_queue_mutex_unlock();
713
714         return TBM_SURFACE_QUEUE_ERROR_NONE;
715 }
716
717 tbm_surface_queue_error_e
718 tbm_surface_queue_remove_dequeuable_cb(
719         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
720         void *data)
721 {
722         _tbm_surf_queue_mutex_lock();
723
724         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
725                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
726
727         pthread_mutex_lock(&surface_queue->lock);
728
729         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
730
731         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
732
733         pthread_mutex_unlock(&surface_queue->lock);
734
735         _tbm_surf_queue_mutex_unlock();
736
737         return TBM_SURFACE_QUEUE_ERROR_NONE;
738 }
739
740 tbm_surface_queue_error_e
741 tbm_surface_queue_add_dequeue_cb(
742         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
743         void *data)
744 {
745         _tbm_surf_queue_mutex_lock();
746
747         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
748                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
749         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
750                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
751
752         pthread_mutex_lock(&surface_queue->lock);
753
754         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
755
756         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
757
758         pthread_mutex_unlock(&surface_queue->lock);
759
760         _tbm_surf_queue_mutex_unlock();
761
762         return TBM_SURFACE_QUEUE_ERROR_NONE;
763 }
764
765 tbm_surface_queue_error_e
766 tbm_surface_queue_remove_dequeue_cb(
767         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
768         void *data)
769 {
770         _tbm_surf_queue_mutex_lock();
771
772         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
773                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
774
775         pthread_mutex_lock(&surface_queue->lock);
776
777         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
778
779         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
780
781         pthread_mutex_unlock(&surface_queue->lock);
782
783         _tbm_surf_queue_mutex_unlock();
784
785         return TBM_SURFACE_QUEUE_ERROR_NONE;
786 }
787
788 tbm_surface_queue_error_e
789 tbm_surface_queue_add_can_dequeue_cb(
790         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
791         void *data)
792 {
793         _tbm_surf_queue_mutex_lock();
794
795         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
796                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
797         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
798                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
799
800         pthread_mutex_lock(&surface_queue->lock);
801
802         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
803
804         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
805
806         pthread_mutex_unlock(&surface_queue->lock);
807
808         _tbm_surf_queue_mutex_unlock();
809
810         return TBM_SURFACE_QUEUE_ERROR_NONE;
811 }
812
813 tbm_surface_queue_error_e
814 tbm_surface_queue_remove_can_dequeue_cb(
815         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
816         void *data)
817 {
818         _tbm_surf_queue_mutex_lock();
819
820         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
821                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
822
823         pthread_mutex_lock(&surface_queue->lock);
824
825         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
826
827         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
828
829         pthread_mutex_unlock(&surface_queue->lock);
830
831         _tbm_surf_queue_mutex_unlock();
832
833         return TBM_SURFACE_QUEUE_ERROR_NONE;
834 }
835
836 tbm_surface_queue_error_e
837 tbm_surface_queue_add_acquirable_cb(
838         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
839         void *data)
840 {
841         _tbm_surf_queue_mutex_lock();
842
843         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
844                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
845         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
846                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
847
848         pthread_mutex_lock(&surface_queue->lock);
849
850         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
851
852         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
853
854         pthread_mutex_unlock(&surface_queue->lock);
855
856         _tbm_surf_queue_mutex_unlock();
857
858         return TBM_SURFACE_QUEUE_ERROR_NONE;
859 }
860
861 tbm_surface_queue_error_e
862 tbm_surface_queue_remove_acquirable_cb(
863         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
864         void *data)
865 {
866         _tbm_surf_queue_mutex_lock();
867
868         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
869                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
870
871         pthread_mutex_lock(&surface_queue->lock);
872
873         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
874
875         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
876
877         pthread_mutex_unlock(&surface_queue->lock);
878
879         _tbm_surf_queue_mutex_unlock();
880
881         return TBM_SURFACE_QUEUE_ERROR_NONE;
882 }
883
884 tbm_surface_queue_error_e
885 tbm_surface_queue_add_trace_cb(
886         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
887         void *data)
888 {
889         _tbm_surf_queue_mutex_lock();
890
891         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
892                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
893         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
894                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
895
896         pthread_mutex_lock(&surface_queue->lock);
897
898         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
899
900         _trace_add(&surface_queue->trace_noti, trace_cb, data);
901
902         pthread_mutex_unlock(&surface_queue->lock);
903
904         _tbm_surf_queue_mutex_unlock();
905
906         return TBM_SURFACE_QUEUE_ERROR_NONE;
907 }
908
909 tbm_surface_queue_error_e
910 tbm_surface_queue_remove_trace_cb(
911         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
912         void *data)
913 {
914         _tbm_surf_queue_mutex_lock();
915
916         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
917                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
918
919         pthread_mutex_lock(&surface_queue->lock);
920
921         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
922
923         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
924
925         pthread_mutex_unlock(&surface_queue->lock);
926
927         _tbm_surf_queue_mutex_unlock();
928
929         return TBM_SURFACE_QUEUE_ERROR_NONE;
930 }
931
932 tbm_surface_queue_error_e
933 tbm_surface_queue_set_alloc_cb(
934         tbm_surface_queue_h surface_queue,
935         tbm_surface_alloc_cb alloc_cb,
936         tbm_surface_free_cb free_cb,
937         void *data)
938 {
939         _tbm_surf_queue_mutex_lock();
940
941         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
942                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
943
944         pthread_mutex_lock(&surface_queue->lock);
945
946         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
947
948         surface_queue->alloc_cb = alloc_cb;
949         surface_queue->free_cb = free_cb;
950         surface_queue->alloc_cb_data = data;
951
952         pthread_mutex_unlock(&surface_queue->lock);
953
954         _tbm_surf_queue_mutex_unlock();
955
956         return TBM_SURFACE_QUEUE_ERROR_NONE;
957 }
958
959 int
960 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
961 {
962         int width;
963
964         _tbm_surf_queue_mutex_lock();
965
966         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
967
968         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
969
970         width = surface_queue->width;
971
972         _tbm_surf_queue_mutex_unlock();
973
974         return width;
975 }
976
977 int
978 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
979 {
980         int height;
981
982         _tbm_surf_queue_mutex_lock();
983
984         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
985
986         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
987
988         height = surface_queue->height;
989
990         _tbm_surf_queue_mutex_unlock();
991
992         return height;
993 }
994
995 int
996 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
997 {
998         int format;
999
1000         _tbm_surf_queue_mutex_lock();
1001
1002         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1003
1004         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1005
1006         format = surface_queue->format;
1007
1008         _tbm_surf_queue_mutex_unlock();
1009
1010         return format;
1011 }
1012
1013 int
1014 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1015 {
1016         int queue_size;
1017
1018         _tbm_surf_queue_mutex_lock();
1019
1020         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1021
1022         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1023
1024         queue_size = surface_queue->queue_size;
1025
1026         _tbm_surf_queue_mutex_unlock();
1027
1028         return queue_size;
1029 }
1030
1031 tbm_surface_queue_error_e
1032 tbm_surface_queue_add_reset_cb(
1033         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1034         void *data)
1035 {
1036         _tbm_surf_queue_mutex_lock();
1037
1038         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1039                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1040         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
1041                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1042
1043         pthread_mutex_lock(&surface_queue->lock);
1044
1045         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1046
1047         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1048
1049         pthread_mutex_unlock(&surface_queue->lock);
1050
1051         _tbm_surf_queue_mutex_unlock();
1052
1053         return TBM_SURFACE_QUEUE_ERROR_NONE;
1054 }
1055
1056 tbm_surface_queue_error_e
1057 tbm_surface_queue_remove_reset_cb(
1058         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1059         void *data)
1060 {
1061         _tbm_surf_queue_mutex_lock();
1062
1063         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1064                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1065
1066         pthread_mutex_lock(&surface_queue->lock);
1067
1068         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1069
1070         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1071
1072         pthread_mutex_unlock(&surface_queue->lock);
1073
1074         _tbm_surf_queue_mutex_unlock();
1075
1076         return TBM_SURFACE_QUEUE_ERROR_NONE;
1077 }
1078
1079 tbm_surface_queue_error_e
1080 tbm_surface_queue_enqueue(tbm_surface_queue_h
1081                           surface_queue, tbm_surface_h surface)
1082 {
1083         queue_node *node;
1084         int queue_type;
1085
1086         _tbm_surf_queue_mutex_lock();
1087
1088         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1089                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1090         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1091                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1092
1093         if (b_dump_queue)
1094                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1095
1096         pthread_mutex_lock(&surface_queue->lock);
1097
1098         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1099
1100         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1101         if (node == NULL || queue_type != NODE_LIST) {
1102                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1103                         node, queue_type);
1104                 pthread_mutex_unlock(&surface_queue->lock);
1105
1106                 _tbm_surf_queue_mutex_unlock();
1107
1108                 if (!node)
1109                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1110                 else
1111                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1112         }
1113
1114         if (surface_queue->impl && surface_queue->impl->enqueue)
1115                 surface_queue->impl->enqueue(surface_queue, node);
1116         else
1117                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1118
1119         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1120                 TBM_LOG_E("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1121                 pthread_mutex_unlock(&surface_queue->lock);
1122
1123                 _tbm_surf_queue_mutex_unlock();
1124                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1125         }
1126
1127         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1128
1129         if (surface_queue->enqueue_sync_count == 1) {
1130                 tbm_surface_info_s info;
1131                 int ret;
1132
1133                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1134                 if (ret == TBM_SURFACE_ERROR_NONE)
1135                         tbm_surface_unmap(surface);
1136         }
1137
1138         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1139
1140         pthread_mutex_unlock(&surface_queue->lock);
1141         pthread_cond_signal(&surface_queue->dirty_cond);
1142
1143         _tbm_surf_queue_mutex_unlock();
1144
1145         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1146
1147         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1148
1149         return TBM_SURFACE_QUEUE_ERROR_NONE;
1150 }
1151
1152 tbm_surface_queue_error_e
1153 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1154                           surface_queue, tbm_surface_h surface)
1155 {
1156         queue_node *node;
1157         int queue_type;
1158
1159         _tbm_surf_queue_mutex_lock();
1160
1161         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1162                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1163         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1164                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1165
1166         pthread_mutex_lock(&surface_queue->lock);
1167
1168         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1169
1170         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1171         if (node == NULL || queue_type != NODE_LIST) {
1172                 TBM_LOG_E("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1173                         node, queue_type);
1174                 pthread_mutex_unlock(&surface_queue->lock);
1175
1176                 _tbm_surf_queue_mutex_unlock();
1177                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1178         }
1179
1180         if (node->delete_pending) {
1181                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1182
1183                 _queue_delete_node(surface_queue, node);
1184
1185                 pthread_mutex_unlock(&surface_queue->lock);
1186
1187                 _tbm_surf_queue_mutex_unlock();
1188
1189                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1190
1191                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1192         }
1193
1194         if (surface_queue->queue_size < surface_queue->num_attached) {
1195                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1196
1197                 if (surface_queue->impl && surface_queue->impl->need_detach)
1198                         surface_queue->impl->need_detach(surface_queue, node);
1199                 else
1200                         _tbm_surface_queue_detach(surface_queue, surface);
1201
1202                 pthread_mutex_unlock(&surface_queue->lock);
1203
1204                 _tbm_surf_queue_mutex_unlock();
1205
1206                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1207
1208                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1209         }
1210
1211         if (surface_queue->impl && surface_queue->impl->release)
1212                 surface_queue->impl->release(surface_queue, node);
1213         else
1214                 _tbm_surface_queue_release(surface_queue, node, 1);
1215
1216         if (_queue_is_empty(&surface_queue->free_queue)) {
1217                 pthread_mutex_unlock(&surface_queue->lock);
1218
1219                 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1220                 _tbm_surf_queue_mutex_unlock();
1221                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1222         }
1223
1224         node->type = QUEUE_NODE_TYPE_RELEASE;
1225
1226         pthread_mutex_unlock(&surface_queue->lock);
1227         pthread_cond_signal(&surface_queue->free_cond);
1228
1229         _tbm_surf_queue_mutex_unlock();
1230
1231         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1232
1233         return TBM_SURFACE_QUEUE_ERROR_NONE;
1234 }
1235
1236 tbm_surface_queue_error_e
1237 tbm_surface_queue_dequeue(tbm_surface_queue_h
1238                           surface_queue, tbm_surface_h *surface)
1239 {
1240         queue_node *node;
1241
1242         _tbm_surf_queue_mutex_lock();
1243
1244         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1245                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1246         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1247                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1248
1249         *surface = NULL;
1250
1251         pthread_mutex_lock(&surface_queue->lock);
1252
1253         if (_queue_is_empty(&surface_queue->free_queue)) {
1254                 if (surface_queue->impl && surface_queue->impl->need_attach)
1255                         surface_queue->impl->need_attach(surface_queue);
1256
1257                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1258                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1259                         pthread_mutex_unlock(&surface_queue->lock);
1260                         _tbm_surf_queue_mutex_unlock();
1261                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1262                 }
1263         }
1264
1265         if (surface_queue->impl && surface_queue->impl->dequeue)
1266                 node = surface_queue->impl->dequeue(surface_queue);
1267         else
1268                 node = _tbm_surface_queue_dequeue(surface_queue);
1269
1270         if (node == NULL || node->surface == NULL) {
1271                 TBM_LOG_E("_queue_node_pop_front failed\n");
1272                 pthread_mutex_unlock(&surface_queue->lock);
1273
1274                 _tbm_surf_queue_mutex_unlock();
1275                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1276         }
1277
1278         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1279         *surface = node->surface;
1280
1281         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1282
1283         pthread_mutex_unlock(&surface_queue->lock);
1284
1285         _tbm_surf_queue_mutex_unlock();
1286
1287         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1288
1289         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1290
1291         return TBM_SURFACE_QUEUE_ERROR_NONE;
1292 }
1293
1294 int
1295 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1296 {
1297         _tbm_surf_queue_mutex_lock();
1298
1299         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1300
1301         _tbm_surf_queue_mutex_unlock();
1302
1303         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1304
1305         _tbm_surf_queue_mutex_lock();
1306
1307         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1308
1309         pthread_mutex_lock(&surface_queue->lock);
1310
1311         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1312
1313         if (_queue_is_empty(&surface_queue->free_queue)) {
1314                 if (surface_queue->impl && surface_queue->impl->need_attach)
1315                         surface_queue->impl->need_attach(surface_queue);
1316
1317                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1318                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1319                         pthread_mutex_unlock(&surface_queue->lock);
1320                         _tbm_surf_queue_mutex_unlock();
1321                         return 0;
1322                 }
1323         }
1324
1325         if (!_queue_is_empty(&surface_queue->free_queue)) {
1326                 pthread_mutex_unlock(&surface_queue->lock);
1327                 _tbm_surf_queue_mutex_unlock();
1328                 return 1;
1329         }
1330
1331         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1332                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1333                 _tbm_surf_queue_mutex_unlock();
1334                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1335                 pthread_mutex_unlock(&surface_queue->lock);
1336                 return 1;
1337         }
1338
1339         pthread_mutex_unlock(&surface_queue->lock);
1340         _tbm_surf_queue_mutex_unlock();
1341         return 0;
1342 }
1343
1344 tbm_surface_queue_error_e
1345 tbm_surface_queue_release(tbm_surface_queue_h
1346                           surface_queue, tbm_surface_h surface)
1347 {
1348         queue_node *node;
1349         int queue_type;
1350
1351         _tbm_surf_queue_mutex_lock();
1352
1353         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1354                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1355         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1356                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1357
1358         pthread_mutex_lock(&surface_queue->lock);
1359
1360         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1361
1362         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1363         if (node == NULL || queue_type != NODE_LIST) {
1364                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1365                         node, queue_type);
1366                 pthread_mutex_unlock(&surface_queue->lock);
1367
1368                 _tbm_surf_queue_mutex_unlock();
1369
1370                 if (!node)
1371                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1372                 else
1373                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1374         }
1375
1376         if (node->delete_pending) {
1377                 TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1378
1379                 _queue_delete_node(surface_queue, node);
1380
1381                 pthread_mutex_unlock(&surface_queue->lock);
1382
1383                 _tbm_surf_queue_mutex_unlock();
1384
1385                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1386
1387                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1388         }
1389
1390         if (surface_queue->queue_size < surface_queue->num_attached) {
1391                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1392
1393                 if (surface_queue->impl && surface_queue->impl->need_detach)
1394                         surface_queue->impl->need_detach(surface_queue, node);
1395                 else
1396                         _tbm_surface_queue_detach(surface_queue, surface);
1397
1398                 pthread_mutex_unlock(&surface_queue->lock);
1399
1400                 _tbm_surf_queue_mutex_unlock();
1401
1402                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1403
1404                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1405         }
1406
1407         if (surface_queue->impl && surface_queue->impl->release)
1408                 surface_queue->impl->release(surface_queue, node);
1409         else
1410                 _tbm_surface_queue_release(surface_queue, node, 1);
1411
1412         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1413                 TBM_LOG_E("release surface(%p) but surface isn't present in the free_queue\n", surface);
1414                 pthread_mutex_unlock(&surface_queue->lock);
1415
1416                 _tbm_surf_queue_mutex_unlock();
1417                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1418         }
1419
1420         node->type = QUEUE_NODE_TYPE_RELEASE;
1421
1422         pthread_mutex_unlock(&surface_queue->lock);
1423         pthread_cond_signal(&surface_queue->free_cond);
1424
1425         _tbm_surf_queue_mutex_unlock();
1426
1427         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1428
1429         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1430
1431         return TBM_SURFACE_QUEUE_ERROR_NONE;
1432 }
1433
1434 tbm_surface_queue_error_e
1435 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1436                         surface_queue, tbm_surface_h surface)
1437 {
1438         queue_node *node;
1439         int queue_type;
1440
1441         _tbm_surf_queue_mutex_lock();
1442
1443         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1444                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1445         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1446                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1447
1448         pthread_mutex_lock(&surface_queue->lock);
1449
1450         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1451
1452         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1453         if (node == NULL || queue_type != NODE_LIST) {
1454                 TBM_LOG_E("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1455                         node, queue_type);
1456                 pthread_mutex_unlock(&surface_queue->lock);
1457
1458                 _tbm_surf_queue_mutex_unlock();
1459                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1460         }
1461
1462         if (surface_queue->impl && surface_queue->impl->enqueue)
1463                 surface_queue->impl->enqueue(surface_queue, node);
1464         else
1465                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1466
1467         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1468                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1469                 pthread_mutex_unlock(&surface_queue->lock);
1470
1471                 _tbm_surf_queue_mutex_unlock();
1472                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1473         }
1474
1475         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1476
1477         pthread_mutex_unlock(&surface_queue->lock);
1478         pthread_cond_signal(&surface_queue->dirty_cond);
1479
1480         _tbm_surf_queue_mutex_unlock();
1481
1482         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1483
1484         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1485
1486         return TBM_SURFACE_QUEUE_ERROR_NONE;
1487 }
1488
1489 tbm_surface_queue_error_e
1490 tbm_surface_queue_acquire(tbm_surface_queue_h
1491                           surface_queue, tbm_surface_h *surface)
1492 {
1493         queue_node *node;
1494
1495         _tbm_surf_queue_mutex_lock();
1496
1497         *surface = NULL;
1498
1499         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1500                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1501         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1502                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1503
1504         pthread_mutex_lock(&surface_queue->lock);
1505
1506         if (surface_queue->impl && surface_queue->impl->acquire)
1507                 node = surface_queue->impl->acquire(surface_queue);
1508         else
1509                 node = _tbm_surface_queue_acquire(surface_queue);
1510
1511         if (node == NULL || node->surface == NULL) {
1512                 TBM_LOG_E("_queue_node_pop_front failed\n");
1513                 pthread_mutex_unlock(&surface_queue->lock);
1514
1515                 _tbm_surf_queue_mutex_unlock();
1516                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1517         }
1518
1519         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1520
1521         *surface = node->surface;
1522
1523         if (surface_queue->acquire_sync_count == 1) {
1524                 tbm_surface_info_s info;
1525                 int ret;
1526
1527                 TBM_LOG_E("start map surface:%p", *surface);
1528                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1529                 TBM_LOG_E("end map surface:%p", *surface);
1530                 if (ret == TBM_SURFACE_ERROR_NONE)
1531                         tbm_surface_unmap(*surface);
1532         }
1533
1534         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1535
1536         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1537
1538         pthread_mutex_unlock(&surface_queue->lock);
1539
1540         _tbm_surf_queue_mutex_unlock();
1541
1542         if (b_dump_queue)
1543                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1544
1545         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1546
1547         return TBM_SURFACE_QUEUE_ERROR_NONE;
1548 }
1549
1550 int
1551 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1552 {
1553         _tbm_surf_queue_mutex_lock();
1554
1555         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1556
1557         pthread_mutex_lock(&surface_queue->lock);
1558
1559         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1560
1561         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1562                 pthread_mutex_unlock(&surface_queue->lock);
1563                 _tbm_surf_queue_mutex_unlock();
1564                 return 1;
1565         }
1566
1567         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1568                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1569                 _tbm_surf_queue_mutex_unlock();
1570                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1571                 pthread_mutex_unlock(&surface_queue->lock);
1572                 return 1;
1573         }
1574
1575         pthread_mutex_unlock(&surface_queue->lock);
1576         _tbm_surf_queue_mutex_unlock();
1577         return 0;
1578 }
1579
1580 void
1581 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1582 {
1583         queue_node *node = NULL, *tmp;
1584
1585         _tbm_surf_queue_mutex_lock();
1586
1587         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1588
1589         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1590
1591         LIST_DEL(&surface_queue->item_link);
1592
1593         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1594                 _queue_delete_node(surface_queue, node);
1595
1596         if (surface_queue->impl && surface_queue->impl->destroy)
1597                 surface_queue->impl->destroy(surface_queue);
1598
1599         _notify_emit(surface_queue, &surface_queue->destory_noti);
1600
1601         _notify_remove_all(&surface_queue->destory_noti);
1602         _notify_remove_all(&surface_queue->dequeuable_noti);
1603         _notify_remove_all(&surface_queue->dequeue_noti);
1604         _notify_remove_all(&surface_queue->can_dequeue_noti);
1605         _notify_remove_all(&surface_queue->acquirable_noti);
1606         _notify_remove_all(&surface_queue->reset_noti);
1607         _trace_remove_all(&surface_queue->trace_noti);
1608
1609         pthread_mutex_destroy(&surface_queue->lock);
1610
1611         free(surface_queue);
1612
1613         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1614                 _deinit_tbm_surf_queue_bufmgr();
1615
1616         _tbm_surf_queue_mutex_unlock();
1617 }
1618
1619 tbm_surface_queue_error_e
1620 tbm_surface_queue_reset(tbm_surface_queue_h
1621                         surface_queue, int width, int height, int format)
1622 {
1623         queue_node *node = NULL, *tmp;
1624
1625         _tbm_surf_queue_mutex_lock();
1626
1627         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1628                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1629
1630         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1631
1632         if (width == surface_queue->width && height == surface_queue->height &&
1633                 format == surface_queue->format) {
1634                 _tbm_surf_queue_mutex_unlock();
1635                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1636         }
1637
1638         pthread_mutex_lock(&surface_queue->lock);
1639
1640         surface_queue->width = width;
1641         surface_queue->height = height;
1642         surface_queue->format = format;
1643
1644         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1645                 /* Destory surface and Push to free_queue */
1646                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1647                         _queue_delete_node(surface_queue, node);
1648
1649                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1650                         node->delete_pending = 1;
1651         } else {
1652                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1653                         _queue_delete_node(surface_queue, node);
1654
1655                 _queue_init(&surface_queue->dirty_queue);
1656                 LIST_INITHEAD(&surface_queue->list);
1657         }
1658
1659         /* Reset queue */
1660         _queue_init(&surface_queue->free_queue);
1661
1662         surface_queue->num_attached = 0;
1663
1664         if (surface_queue->impl && surface_queue->impl->reset)
1665                 surface_queue->impl->reset(surface_queue);
1666
1667         pthread_mutex_unlock(&surface_queue->lock);
1668         pthread_cond_signal(&surface_queue->free_cond);
1669
1670         _tbm_surf_queue_mutex_unlock();
1671
1672         _notify_emit(surface_queue, &surface_queue->reset_noti);
1673
1674         return TBM_SURFACE_QUEUE_ERROR_NONE;
1675 }
1676
1677 tbm_surface_queue_error_e
1678 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1679 {
1680         _tbm_surf_queue_mutex_lock();
1681
1682         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1683                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1684
1685         _tbm_surf_queue_mutex_unlock();
1686
1687         _notify_emit(surface_queue, &surface_queue->reset_noti);
1688
1689         return TBM_SURFACE_QUEUE_ERROR_NONE;
1690 }
1691
1692 tbm_surface_queue_error_e
1693 tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
1694 {
1695         _tbm_surf_queue_mutex_lock();
1696
1697         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1698                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1699
1700         _tbm_surf_queue_mutex_unlock();
1701
1702         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1703
1704         return TBM_SURFACE_QUEUE_ERROR_NONE;
1705 }
1706
1707 tbm_surface_queue_error_e
1708 tbm_surface_queue_set_size(tbm_surface_queue_h
1709                         surface_queue, int queue_size, int flush)
1710 {
1711         queue_node *node = NULL, *tmp;
1712
1713         _tbm_surf_queue_mutex_lock();
1714
1715         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1716                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1717         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1718                                         TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1719
1720         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1721
1722         if ((surface_queue->queue_size == queue_size) && !flush) {
1723                 _tbm_surf_queue_mutex_unlock();
1724                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1725         }
1726
1727         pthread_mutex_lock(&surface_queue->lock);
1728
1729         if (flush) {
1730                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1731                         /* Destory surface and Push to free_queue */
1732                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1733                                 _queue_delete_node(surface_queue, node);
1734
1735                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1736                                 node->delete_pending = 1;
1737                 } else {
1738                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1739                                 _queue_delete_node(surface_queue, node);
1740
1741                         _queue_init(&surface_queue->dirty_queue);
1742                         LIST_INITHEAD(&surface_queue->list);
1743                 }
1744
1745                 /* Reset queue */
1746                 _queue_init(&surface_queue->free_queue);
1747
1748                 surface_queue->num_attached = 0;
1749                 surface_queue->queue_size = queue_size;
1750
1751                 if (surface_queue->impl && surface_queue->impl->reset)
1752                         surface_queue->impl->reset(surface_queue);
1753
1754                 pthread_mutex_unlock(&surface_queue->lock);
1755                 pthread_cond_signal(&surface_queue->free_cond);
1756
1757                 _tbm_surf_queue_mutex_unlock();
1758
1759                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1760
1761                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1762         } else {
1763                 if (surface_queue->queue_size > queue_size) {
1764                         int need_del = surface_queue->queue_size - queue_size;
1765
1766                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1767                                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1768
1769                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1770                                         surface_queue->impl->need_detach(surface_queue, node);
1771                                 else
1772                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1773
1774                                 need_del--;
1775                                 if (need_del == 0)
1776                                         break;
1777                         }
1778                 }
1779
1780                 surface_queue->queue_size = queue_size;
1781
1782                 pthread_mutex_unlock(&surface_queue->lock);
1783
1784                 _tbm_surf_queue_mutex_unlock();
1785
1786                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1787         }
1788 }
1789
1790 tbm_surface_queue_error_e
1791 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1792 {
1793         queue_node *node = NULL;
1794
1795         _tbm_surf_queue_mutex_lock();
1796
1797         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1798                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1799
1800         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1801
1802         if (surface_queue->num_attached == 0) {
1803                 _tbm_surf_queue_mutex_unlock();
1804                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1805         }
1806
1807         pthread_mutex_lock(&surface_queue->lock);
1808
1809         /* Destory surface in free_queue */
1810         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1811                 if (surface_queue->impl && surface_queue->impl->need_detach)
1812                         surface_queue->impl->need_detach(surface_queue, node);
1813                 else
1814                         _tbm_surface_queue_detach(surface_queue, node->surface);
1815         }
1816
1817         /* Reset queue */
1818         _queue_init(&surface_queue->free_queue);
1819
1820         pthread_mutex_unlock(&surface_queue->lock);
1821         _tbm_surf_queue_mutex_unlock();
1822
1823         return TBM_SURFACE_QUEUE_ERROR_NONE;
1824 }
1825
1826 tbm_surface_queue_error_e
1827 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1828 {
1829         queue_node *node = NULL, *tmp;
1830
1831         _tbm_surf_queue_mutex_lock();
1832
1833         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1834                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1835
1836         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1837
1838         if (surface_queue->num_attached == 0) {
1839                 _tbm_surf_queue_mutex_unlock();
1840                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1841         }
1842
1843         pthread_mutex_lock(&surface_queue->lock);
1844
1845         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1846                 /* Destory surface and Push to free_queue */
1847                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1848                         _queue_delete_node(surface_queue, node);
1849
1850                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1851                         node->delete_pending = 1;
1852         } else {
1853                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1854                         _queue_delete_node(surface_queue, node);
1855
1856                 _queue_init(&surface_queue->dirty_queue);
1857                 LIST_INITHEAD(&surface_queue->list);
1858         }
1859
1860         /* Reset queue */
1861         _queue_init(&surface_queue->free_queue);
1862
1863         surface_queue->num_attached = 0;
1864
1865         if (surface_queue->impl && surface_queue->impl->reset)
1866                 surface_queue->impl->reset(surface_queue);
1867
1868         pthread_mutex_unlock(&surface_queue->lock);
1869         pthread_cond_signal(&surface_queue->free_cond);
1870
1871         _tbm_surf_queue_mutex_unlock();
1872
1873         _notify_emit(surface_queue, &surface_queue->reset_noti);
1874
1875         return TBM_SURFACE_QUEUE_ERROR_NONE;
1876 }
1877
1878 tbm_surface_queue_error_e
1879 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1880                         tbm_surface_h *surfaces, int *num)
1881 {
1882         queue_node *node = NULL;
1883
1884         _tbm_surf_queue_mutex_lock();
1885
1886         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1887                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1888         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1889                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1890
1891         *num = 0;
1892
1893         pthread_mutex_lock(&surface_queue->lock);
1894
1895         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1896                 if (surfaces)
1897                         surfaces[*num] = node->surface;
1898
1899                 *num = *num + 1;
1900         }
1901
1902         pthread_mutex_unlock(&surface_queue->lock);
1903
1904         _tbm_surf_queue_mutex_unlock();
1905
1906         return TBM_SURFACE_QUEUE_ERROR_NONE;
1907 }
1908
1909 tbm_surface_queue_error_e
1910 tbm_surface_queue_get_trace_surface_num(
1911                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1912 {
1913         _tbm_surf_queue_mutex_lock();
1914
1915         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1916                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1917         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1918                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1919
1920         *num = 0;
1921
1922         pthread_mutex_lock(&surface_queue->lock);
1923
1924         switch (trace) {
1925         case TBM_SURFACE_QUEUE_TRACE_NONE:
1926                 *num = 0;
1927                 break;
1928         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1929                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1930                 break;
1931         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1932                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1933                 break;
1934         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1935                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1936                 break;
1937         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1938                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1939                 break;
1940         default:
1941                 break;
1942         }
1943
1944         pthread_mutex_unlock(&surface_queue->lock);
1945
1946         _tbm_surf_queue_mutex_unlock();
1947
1948         return TBM_SURFACE_QUEUE_ERROR_NONE;
1949 }
1950
1951 typedef struct {
1952         int flags;
1953 } tbm_queue_default;
1954
1955 static void
1956 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1957 {
1958         free(surface_queue->impl_data);
1959 }
1960
1961 static void
1962 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1963 {
1964         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1965         tbm_surface_h surface;
1966
1967         if (surface_queue->queue_size == surface_queue->num_attached)
1968                 return;
1969
1970         if (surface_queue->alloc_cb) {
1971                 pthread_mutex_unlock(&surface_queue->lock);
1972                 _tbm_surf_queue_mutex_unlock();
1973                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1974                 _tbm_surf_queue_mutex_lock();
1975                 pthread_mutex_lock(&surface_queue->lock);
1976
1977                 /* silent return */
1978                 if (!surface)
1979                         return;
1980
1981                 tbm_surface_internal_ref(surface);
1982         } else {
1983                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1984                                 surface_queue->height,
1985                                 surface_queue->format,
1986                                 data->flags);
1987                 TBM_RETURN_IF_FAIL(surface != NULL);
1988         }
1989
1990         _tbm_surface_queue_attach(surface_queue, surface);
1991         tbm_surface_internal_unref(surface);
1992 }
1993
1994 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1995         NULL,                           /*__tbm_queue_default_init*/
1996         NULL,                           /*__tbm_queue_default_reset*/
1997         __tbm_queue_default_destroy,
1998         __tbm_queue_default_need_attach,
1999         NULL,                           /*__tbm_queue_default_enqueue*/
2000         NULL,                           /*__tbm_queue_default_release*/
2001         NULL,                           /*__tbm_queue_default_dequeue*/
2002         NULL,                           /*__tbm_queue_default_acquire*/
2003         NULL,                           /*__tbm_queue_default_need_detach*/
2004 };
2005
2006 tbm_surface_queue_h
2007 tbm_surface_queue_create(int queue_size, int width,
2008                          int height, int format, int flags)
2009 {
2010         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2011         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2012         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2013         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2014
2015         _tbm_surf_queue_mutex_lock();
2016
2017         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2018                                             sizeof(struct _tbm_surface_queue));
2019         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2020
2021         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2022
2023         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
2024                                   sizeof(tbm_queue_default));
2025         if (data == NULL) {
2026                 TBM_LOG_E("cannot allocate the tbm_queue_default.\n");
2027                 free(surface_queue);
2028                 _tbm_surf_queue_mutex_unlock();
2029                 return NULL;
2030         }
2031
2032         data->flags = flags;
2033         _tbm_surface_queue_init(surface_queue,
2034                                 queue_size,
2035                                 width, height, format,
2036                                 &tbm_queue_default_impl, data);
2037
2038         _tbm_surf_queue_mutex_unlock();
2039
2040         return surface_queue;
2041 }
2042
2043 typedef struct {
2044         int flags;
2045         queue dequeue_list;
2046 } tbm_queue_sequence;
2047
2048 static void
2049 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2050 {
2051         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2052
2053         _queue_init(&data->dequeue_list);
2054 }
2055
2056 static void
2057 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2058 {
2059         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2060
2061         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2062                 return;
2063
2064         _queue_init(&data->dequeue_list);
2065 }
2066
2067 static void
2068 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2069 {
2070         free(surface_queue->impl_data);
2071 }
2072
2073 static void
2074 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
2075 {
2076         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2077         tbm_surface_h surface;
2078
2079         if (surface_queue->queue_size == surface_queue->num_attached)
2080                 return;
2081
2082         if (surface_queue->alloc_cb) {
2083                 pthread_mutex_unlock(&surface_queue->lock);
2084                 _tbm_surf_queue_mutex_unlock();
2085                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
2086                 _tbm_surf_queue_mutex_lock();
2087                 pthread_mutex_lock(&surface_queue->lock);
2088
2089                 /* silent return */
2090                 if (!surface)
2091                         return;
2092
2093                 tbm_surface_internal_ref(surface);
2094         } else {
2095                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
2096                                 surface_queue->height,
2097                                 surface_queue->format,
2098                                 data->flags);
2099                 TBM_RETURN_IF_FAIL(surface != NULL);
2100         }
2101
2102         _tbm_surface_queue_attach(surface_queue, surface);
2103         tbm_surface_internal_unref(surface);
2104 }
2105
2106 static void
2107 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2108                              queue_node *node)
2109 {
2110         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2111         queue_node *first = NULL;
2112
2113         first = container_of(data->dequeue_list.head.next, first, item_link);
2114         if (first != node) {
2115                 return;
2116         }
2117
2118         node->priv_flags = 0;
2119
2120         _queue_node_pop(&data->dequeue_list, node);
2121         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2122 }
2123
2124 static void
2125 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2126                                 queue_node *node)
2127 {
2128         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2129
2130         if (node->priv_flags) {
2131                 node->priv_flags = 0;
2132                 _queue_node_pop(&data->dequeue_list, node);
2133         }
2134
2135         _tbm_surface_queue_release(surface_queue, node, 1);
2136 }
2137
2138 static queue_node *
2139 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2140                              surface_queue)
2141 {
2142         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2143         queue_node *node;
2144
2145         node = _tbm_surface_queue_dequeue(surface_queue);
2146         if (node) {
2147                 _queue_node_push_back(&data->dequeue_list, node);
2148                 node->priv_flags = 1;
2149         }
2150
2151         return node;
2152 }
2153
2154 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2155         __tbm_queue_sequence_init,
2156         __tbm_queue_sequence_reset,
2157         __tbm_queue_sequence_destroy,
2158         __tbm_queue_sequence_need_attach,
2159         __tbm_queue_sequence_enqueue,
2160         __tbm_queue_sequence_release,
2161         __tbm_queue_sequence_dequeue,
2162         NULL,                                   /*__tbm_queue_sequence_acquire*/
2163         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2164 };
2165
2166 tbm_surface_queue_h
2167 tbm_surface_queue_sequence_create(int queue_size, int width,
2168                                   int height, int format, int flags)
2169 {
2170         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2171         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2172         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2173         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2174
2175         _tbm_surf_queue_mutex_lock();
2176
2177         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2178                                             sizeof(struct _tbm_surface_queue));
2179         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2180
2181         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2182
2183         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2184                                    sizeof(tbm_queue_sequence));
2185         if (data == NULL) {
2186                 TBM_LOG_E("cannot allocate the tbm_queue_sequence.\n");
2187                 free(surface_queue);
2188                 _tbm_surf_queue_mutex_unlock();
2189                 return NULL;
2190         }
2191
2192         data->flags = flags;
2193         _tbm_surface_queue_init(surface_queue,
2194                                 queue_size,
2195                                 width, height, format,
2196                                 &tbm_queue_sequence_impl, data);
2197
2198         _tbm_surf_queue_mutex_unlock();
2199
2200         return surface_queue;
2201 }
2202
2203 tbm_surface_queue_error_e
2204 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2205                                   int modes)
2206 {
2207         _tbm_surf_queue_mutex_lock();
2208
2209         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2210                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2211
2212         pthread_mutex_lock(&surface_queue->lock);
2213
2214         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2215                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2216         else
2217                 surface_queue->modes |= modes;
2218
2219         pthread_mutex_unlock(&surface_queue->lock);
2220
2221         _tbm_surf_queue_mutex_unlock();
2222
2223         return TBM_SURFACE_QUEUE_ERROR_NONE;
2224 }
2225
2226 tbm_surface_queue_error_e
2227 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2228                                   unsigned int sync_count)
2229 {
2230         int dequeue_num, enqueue_num;
2231
2232         _tbm_surf_queue_mutex_lock();
2233
2234         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2235                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2236
2237         pthread_mutex_lock(&surface_queue->lock);
2238
2239         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2240         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2241
2242         if (dequeue_num + sync_count == 0)
2243                 surface_queue->acquire_sync_count = enqueue_num;
2244         else
2245                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2246
2247         TBM_QUEUE_TRACE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2248                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2249
2250         pthread_mutex_unlock(&surface_queue->lock);
2251
2252         _tbm_surf_queue_mutex_unlock();
2253
2254         return TBM_SURFACE_QUEUE_ERROR_NONE;
2255 }