surface_queue: added queue cancel acquire/dequeue
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 #define TBM_QUEUE_DEBUG 0
42
43 #ifdef TRACE
44 #define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
45 #else
46 #define TBM_QUEUE_TRACE(fmt, ...)
47 #endif /* TRACE */
48
49 #if TBM_QUEUE_DEBUG
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
52 #else
53 #define TBM_LOCK()
54 #define TBM_UNLOCK()
55 #endif
56
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
60
61 /* check condition */
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
63         if (!(cond)) {\
64                 TBM_LOG_E("'%s' failed.\n", #cond);\
65                 _tbm_surf_queue_mutex_unlock();\
66                 return;\
67         } \
68 }
69
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
71         if (!(cond)) {\
72                 TBM_LOG_E("'%s' failed.\n", #cond);\
73                 _tbm_surf_queue_mutex_unlock();\
74                 return val;\
75         } \
76 }
77
78 typedef enum _queue_node_type {
79         QUEUE_NODE_TYPE_NONE,
80         QUEUE_NODE_TYPE_DEQUEUE,
81         QUEUE_NODE_TYPE_ENQUEUE,
82         QUEUE_NODE_TYPE_ACQUIRE,
83         QUEUE_NODE_TYPE_RELEASE
84 } Queue_Node_Type;
85
86 typedef struct {
87         struct list_head head;
88         int count;
89 } queue;
90
91 typedef struct {
92         tbm_surface_h surface;
93
94         struct list_head item_link;
95         struct list_head link;
96
97         Queue_Node_Type type;
98
99         unsigned int priv_flags;        /*for each queue*/
100 } queue_node;
101
102 typedef struct {
103         struct list_head link;
104
105         tbm_surface_queue_notify_cb cb;
106         void *data;
107 } queue_notify;
108
109 typedef struct {
110         struct list_head link;
111
112         tbm_surface_queue_trace_cb cb;
113         void *data;
114 } queue_trace;
115
116 typedef struct _tbm_surface_queue_interface {
117         void (*init)(tbm_surface_queue_h queue);
118         void (*reset)(tbm_surface_queue_h queue);
119         void (*destroy)(tbm_surface_queue_h queue);
120         void (*need_attach)(tbm_surface_queue_h queue);
121
122         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
123         void (*release)(tbm_surface_queue_h queue, queue_node *node);
124         queue_node *(*dequeue)(tbm_surface_queue_h queue);
125         queue_node *(*acquire)(tbm_surface_queue_h queue);
126         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
127 } tbm_surface_queue_interface;
128
129 struct _tbm_surface_queue {
130         int width;
131         int height;
132         int format;
133         int queue_size;
134         int num_attached;
135
136         queue free_queue;
137         queue dirty_queue;
138         struct list_head list;
139
140         struct list_head destory_noti;
141         struct list_head dequeuable_noti;
142         struct list_head dequeue_noti;
143         struct list_head can_dequeue_noti;
144         struct list_head acquirable_noti;
145         struct list_head reset_noti;
146         struct list_head trace_noti;
147
148         pthread_mutex_t lock;
149         pthread_cond_t free_cond;
150         pthread_cond_t dirty_cond;
151
152         const tbm_surface_queue_interface *impl;
153         void *impl_data;
154
155         //For external buffer allocation
156         tbm_surface_alloc_cb alloc_cb;
157         tbm_surface_free_cb free_cb;
158         void *alloc_cb_data;
159
160         struct list_head item_link; /* link of surface queue */
161 };
162
163 /* LCOV_EXCL_START */
164
165 static bool
166 _tbm_surf_queue_mutex_init(void)
167 {
168         static bool tbm_surf_queue_mutex_init = false;
169
170         if (tbm_surf_queue_mutex_init)
171                 return true;
172
173         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
174                 TBM_LOG_E("fail: pthread_mutex_init\n");
175                 return false;
176         }
177
178         tbm_surf_queue_mutex_init = true;
179
180         return true;
181 }
182
183 static void
184 _tbm_surf_queue_mutex_lock(void)
185 {
186         if (!_tbm_surf_queue_mutex_init()) {
187                 TBM_LOG_E("fail: _tbm_surf_queue_mutex_init\n");
188                 return;
189         }
190
191         pthread_mutex_lock(&tbm_surf_queue_lock);
192 }
193
194 static void
195 _tbm_surf_queue_mutex_unlock(void)
196 {
197         pthread_mutex_unlock(&tbm_surf_queue_lock);
198 }
199
200 static void
201 _init_tbm_surf_queue_bufmgr(void)
202 {
203         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
204 }
205
206 static void
207 _deinit_tbm_surf_queue_bufmgr(void)
208 {
209         if (!g_surf_queue_bufmgr)
210                 return;
211
212         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
213         g_surf_queue_bufmgr = NULL;
214 }
215
216 static int
217 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
218 {
219         tbm_surface_queue_h old_data = NULL;
220
221         if (surface_queue == NULL) {
222                 TBM_LOG_E("error: surface_queue is NULL.\n");
223                 return 0;
224         }
225
226         if (g_surf_queue_bufmgr == NULL) {
227                 TBM_LOG_E("error: g_surf_queue_bufmgr is NULL.\n");
228                 return 0;
229         }
230
231         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
232                 TBM_LOG_E("error: surf_queue_list is empty\n");
233                 return 0;
234         }
235
236         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
237                                 item_link) {
238                 if (old_data == surface_queue) {
239                         TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
240                         return 1;
241                 }
242         }
243
244         TBM_LOG_E("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
245
246         return 0;
247 }
248
249 static queue_node *
250 _queue_node_create(void)
251 {
252         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
253
254         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
255
256         return node;
257 }
258
259 static void
260 _queue_node_delete(queue_node *node)
261 {
262         LIST_DEL(&node->item_link);
263         LIST_DEL(&node->link);
264         free(node);
265 }
266
267 static int
268 _queue_is_empty(queue *queue)
269 {
270         if (LIST_IS_EMPTY(&queue->head))
271                 return 1;
272
273         return 0;
274 }
275
276 static void
277 _queue_node_push_back(queue *queue, queue_node *node)
278 {
279         LIST_ADDTAIL(&node->item_link, &queue->head);
280         queue->count++;
281 }
282
283 static void
284 _queue_node_push_front(queue *queue, queue_node *node)
285 {
286         LIST_ADD(&node->item_link, &queue->head);
287         queue->count++;
288 }
289
290 static queue_node *
291 _queue_node_pop_front(queue *queue)
292 {
293         queue_node *node;
294
295         if (!queue->head.next) return NULL;
296         if (!queue->count) return NULL;
297
298         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
299
300         LIST_DELINIT(&node->item_link);
301         queue->count--;
302
303         return node;
304 }
305
306 static queue_node *
307 _queue_node_pop(queue *queue, queue_node *node)
308 {
309         LIST_DELINIT(&node->item_link);
310         queue->count--;
311
312         return node;
313 }
314
315 static queue_node *
316 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
317                 tbm_surface_h surface, int *out_type)
318 {
319         queue_node *node = NULL;
320
321         if (type == 0)
322                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
323         if (out_type)
324                 *out_type = 0;
325
326         if (type & FREE_QUEUE) {
327                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
328                                          item_link) {
329                         if (node->surface == surface) {
330                                 if (out_type)
331                                         *out_type = FREE_QUEUE;
332
333                                 return node;
334                         }
335                 }
336         }
337
338         if (type & DIRTY_QUEUE) {
339                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
340                                          item_link) {
341                         if (node->surface == surface) {
342                                 if (out_type)
343                                         *out_type = DIRTY_QUEUE;
344
345                                 return node;
346                         }
347                 }
348         }
349
350         if (type & NODE_LIST) {
351                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
352                         if (node->surface == surface) {
353                                 if (out_type)
354                                         *out_type = NODE_LIST;
355
356                                 return node;
357                         }
358                 }
359         }
360
361         TBM_LOG_E("fail to get the queue_node.\n");
362
363         return NULL;
364 }
365
366 static void
367 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
368 {
369         if (node->surface) {
370                 if (surface_queue->free_cb) {
371                         surface_queue->free_cb(surface_queue,
372                                         surface_queue->alloc_cb_data,
373                                         node->surface);
374                 }
375
376                 tbm_surface_destroy(node->surface);
377         }
378
379         _queue_node_delete(node);
380 }
381
382 static void
383 _queue_init(queue *queue)
384 {
385         LIST_INITHEAD(&queue->head);
386
387         queue->count = 0;
388 }
389
390 static void
391 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
392             void *data)
393 {
394         TBM_RETURN_IF_FAIL(cb != NULL);
395
396         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
397
398         TBM_RETURN_IF_FAIL(item != NULL);
399
400         LIST_INITHEAD(&item->link);
401         item->cb = cb;
402         item->data = data;
403
404         LIST_ADDTAIL(&item->link, list);
405 }
406
407 static void
408 _notify_remove(struct list_head *list,
409                tbm_surface_queue_notify_cb cb, void *data)
410 {
411         queue_notify *item = NULL, *tmp;
412
413         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
414                 if (item->cb == cb && item->data == data) {
415                         LIST_DEL(&item->link);
416                         free(item);
417                         return;
418                 }
419         }
420
421         TBM_LOG_E("Cannot find notifiy\n");
422 }
423
424 static void
425 _notify_remove_all(struct list_head *list)
426 {
427         queue_notify *item = NULL, *tmp;
428
429         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
430                 LIST_DEL(&item->link);
431                 free(item);
432         }
433 }
434
435 static void
436 _notify_emit(tbm_surface_queue_h surface_queue,
437              struct list_head *list)
438 {
439         queue_notify *item = NULL, *tmp;;
440
441         /*
442                 The item->cb is the outside function of the libtbm.
443                 The tbm user may/can remove the item of the list,
444                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
445         */
446         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
447                 item->cb(surface_queue, item->data);
448 }
449
450 static void
451 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
452             void *data)
453 {
454         TBM_RETURN_IF_FAIL(cb != NULL);
455
456         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
457
458         TBM_RETURN_IF_FAIL(item != NULL);
459
460         LIST_INITHEAD(&item->link);
461         item->cb = cb;
462         item->data = data;
463
464         LIST_ADDTAIL(&item->link, list);
465 }
466
467 static void
468 _trace_remove(struct list_head *list,
469                tbm_surface_queue_trace_cb cb, void *data)
470 {
471         queue_trace *item = NULL, *tmp;
472
473         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
474                 if (item->cb == cb && item->data == data) {
475                         LIST_DEL(&item->link);
476                         free(item);
477                         return;
478                 }
479         }
480
481         TBM_LOG_E("Cannot find notifiy\n");
482 }
483
484 static void
485 _trace_remove_all(struct list_head *list)
486 {
487         queue_trace *item = NULL, *tmp;
488
489         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
490                 LIST_DEL(&item->link);
491                 free(item);
492         }
493 }
494
495 static void
496 _trace_emit(tbm_surface_queue_h surface_queue,
497              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
498 {
499         queue_trace *item = NULL, *tmp;;
500
501         /*
502                 The item->cb is the outside function of the libtbm.
503                 The tbm user may/can remove the item of the list,
504                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
505         */
506         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
507                 item->cb(surface_queue, surface, trace, item->data);
508 }
509
510 static int
511 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
512 {
513         queue_node *node = NULL;
514         int count = 0;
515
516         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
517                 if (node->type == type)
518                         count++;
519         }
520
521         return count;
522 }
523
524 static void
525 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
526                           tbm_surface_h surface)
527 {
528         queue_node *node;
529
530         node = _queue_node_create();
531         TBM_RETURN_IF_FAIL(node != NULL);
532
533         tbm_surface_internal_ref(surface);
534         node->surface = surface;
535
536         LIST_ADDTAIL(&node->link, &surface_queue->list);
537         surface_queue->num_attached++;
538         _queue_node_push_back(&surface_queue->free_queue, node);
539 }
540
541 static void
542 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
543                           tbm_surface_h surface)
544 {
545         queue_node *node;
546         int queue_type;
547
548         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
549         if (node) {
550                 _queue_delete_node(surface_queue, node);
551                 surface_queue->num_attached--;
552         }
553 }
554
555 static void
556 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
557                            queue_node *node, int push_back)
558 {
559         if (push_back)
560                 _queue_node_push_back(&surface_queue->dirty_queue, node);
561         else
562                 _queue_node_push_front(&surface_queue->dirty_queue, node);
563 }
564
565 static queue_node *
566 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
567 {
568         queue_node *node;
569
570         node = _queue_node_pop_front(&surface_queue->free_queue);
571
572         return node;
573 }
574
575 static queue_node *
576 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
577 {
578         queue_node *node;
579
580         if (_queue_is_empty(&surface_queue->dirty_queue))
581                 return NULL;
582
583         node = _queue_node_pop_front(&surface_queue->dirty_queue);
584
585         return node;
586 }
587
588 static void
589 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
590                            queue_node *node, int push_back)
591 {
592         if (push_back)
593                 _queue_node_push_back(&surface_queue->free_queue, node);
594         else
595                 _queue_node_push_front(&surface_queue->free_queue, node);
596 }
597
598 static void
599 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
600                         int queue_size,
601                         int width, int height, int format,
602                         const tbm_surface_queue_interface *impl, void *data)
603 {
604         TBM_RETURN_IF_FAIL(surface_queue != NULL);
605         TBM_RETURN_IF_FAIL(impl != NULL);
606
607         if (!g_surf_queue_bufmgr)
608                 _init_tbm_surf_queue_bufmgr();
609
610         pthread_mutex_init(&surface_queue->lock, NULL);
611         pthread_cond_init(&surface_queue->free_cond, NULL);
612         pthread_cond_init(&surface_queue->dirty_cond, NULL);
613
614         surface_queue->queue_size = queue_size;
615         surface_queue->width = width;
616         surface_queue->height = height;
617         surface_queue->format = format;
618         surface_queue->impl = impl;
619         surface_queue->impl_data = data;
620
621         _queue_init(&surface_queue->free_queue);
622         _queue_init(&surface_queue->dirty_queue);
623         LIST_INITHEAD(&surface_queue->list);
624
625         LIST_INITHEAD(&surface_queue->destory_noti);
626         LIST_INITHEAD(&surface_queue->dequeuable_noti);
627         LIST_INITHEAD(&surface_queue->dequeue_noti);
628         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
629         LIST_INITHEAD(&surface_queue->acquirable_noti);
630         LIST_INITHEAD(&surface_queue->reset_noti);
631         LIST_INITHEAD(&surface_queue->trace_noti);
632
633         if (surface_queue->impl && surface_queue->impl->init)
634                 surface_queue->impl->init(surface_queue);
635
636         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
637 }
638
639 tbm_surface_queue_error_e
640 tbm_surface_queue_add_destroy_cb(
641         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
642         void *data)
643 {
644         _tbm_surf_queue_mutex_lock();
645
646         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
647                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
648
649         pthread_mutex_lock(&surface_queue->lock);
650
651         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
652
653         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
654
655         pthread_mutex_unlock(&surface_queue->lock);
656
657         _tbm_surf_queue_mutex_unlock();
658
659         return TBM_SURFACE_QUEUE_ERROR_NONE;
660 }
661
662 tbm_surface_queue_error_e
663 tbm_surface_queue_remove_destroy_cb(
664         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
665         void *data)
666 {
667         _tbm_surf_queue_mutex_lock();
668
669         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
670                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
671
672         pthread_mutex_lock(&surface_queue->lock);
673
674         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
675
676         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
677
678         pthread_mutex_unlock(&surface_queue->lock);
679
680         _tbm_surf_queue_mutex_unlock();
681
682         return TBM_SURFACE_QUEUE_ERROR_NONE;
683 }
684
685 tbm_surface_queue_error_e
686 tbm_surface_queue_add_dequeuable_cb(
687         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
688         void *data)
689 {
690         _tbm_surf_queue_mutex_lock();
691
692         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
693                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
694
695         pthread_mutex_lock(&surface_queue->lock);
696
697         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
698
699         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
700
701         pthread_mutex_unlock(&surface_queue->lock);
702
703         _tbm_surf_queue_mutex_unlock();
704
705         return TBM_SURFACE_QUEUE_ERROR_NONE;
706 }
707
708 tbm_surface_queue_error_e
709 tbm_surface_queue_remove_dequeuable_cb(
710         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
711         void *data)
712 {
713         _tbm_surf_queue_mutex_lock();
714
715         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
716                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
717
718         pthread_mutex_lock(&surface_queue->lock);
719
720         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
721
722         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
723
724         pthread_mutex_unlock(&surface_queue->lock);
725
726         _tbm_surf_queue_mutex_unlock();
727
728         return TBM_SURFACE_QUEUE_ERROR_NONE;
729 }
730
731 tbm_surface_queue_error_e
732 tbm_surface_queue_add_dequeue_cb(
733         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
734         void *data)
735 {
736         _tbm_surf_queue_mutex_lock();
737
738         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
739                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
740
741         pthread_mutex_lock(&surface_queue->lock);
742
743         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
744
745         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
746
747         pthread_mutex_unlock(&surface_queue->lock);
748
749         _tbm_surf_queue_mutex_unlock();
750
751         return TBM_SURFACE_QUEUE_ERROR_NONE;
752 }
753
754 tbm_surface_queue_error_e
755 tbm_surface_queue_remove_dequeue_cb(
756         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
757         void *data)
758 {
759         _tbm_surf_queue_mutex_lock();
760
761         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
762                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
763
764         pthread_mutex_lock(&surface_queue->lock);
765
766         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
767
768         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
769
770         pthread_mutex_unlock(&surface_queue->lock);
771
772         _tbm_surf_queue_mutex_unlock();
773
774         return TBM_SURFACE_QUEUE_ERROR_NONE;
775 }
776
777 tbm_surface_queue_error_e
778 tbm_surface_queue_add_can_dequeue_cb(
779         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
780         void *data)
781 {
782         _tbm_surf_queue_mutex_lock();
783
784         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
785                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
786
787         pthread_mutex_lock(&surface_queue->lock);
788
789         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
790
791         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
792
793         pthread_mutex_unlock(&surface_queue->lock);
794
795         _tbm_surf_queue_mutex_unlock();
796
797         return TBM_SURFACE_QUEUE_ERROR_NONE;
798 }
799
800 tbm_surface_queue_error_e
801 tbm_surface_queue_remove_can_dequeue_cb(
802         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
803         void *data)
804 {
805         _tbm_surf_queue_mutex_lock();
806
807         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
808                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
809
810         pthread_mutex_lock(&surface_queue->lock);
811
812         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
813
814         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
815
816         pthread_mutex_unlock(&surface_queue->lock);
817
818         _tbm_surf_queue_mutex_unlock();
819
820         return TBM_SURFACE_QUEUE_ERROR_NONE;
821 }
822
823 tbm_surface_queue_error_e
824 tbm_surface_queue_add_acquirable_cb(
825         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
826         void *data)
827 {
828         _tbm_surf_queue_mutex_lock();
829
830         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
831                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
832
833         pthread_mutex_lock(&surface_queue->lock);
834
835         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
836
837         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
838
839         pthread_mutex_unlock(&surface_queue->lock);
840
841         _tbm_surf_queue_mutex_unlock();
842
843         return TBM_SURFACE_QUEUE_ERROR_NONE;
844 }
845
846 tbm_surface_queue_error_e
847 tbm_surface_queue_remove_acquirable_cb(
848         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
849         void *data)
850 {
851         _tbm_surf_queue_mutex_lock();
852
853         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
854                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
855
856         pthread_mutex_lock(&surface_queue->lock);
857
858         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
859
860         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
861
862         pthread_mutex_unlock(&surface_queue->lock);
863
864         _tbm_surf_queue_mutex_unlock();
865
866         return TBM_SURFACE_QUEUE_ERROR_NONE;
867 }
868
869 tbm_surface_queue_error_e
870 tbm_surface_queue_add_trace_cb(
871         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
872         void *data)
873 {
874         _tbm_surf_queue_mutex_lock();
875
876         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
877                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
878
879         pthread_mutex_lock(&surface_queue->lock);
880
881         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
882
883         _trace_add(&surface_queue->trace_noti, trace_cb, data);
884
885         pthread_mutex_unlock(&surface_queue->lock);
886
887         _tbm_surf_queue_mutex_unlock();
888
889         return TBM_SURFACE_QUEUE_ERROR_NONE;
890 }
891
892 tbm_surface_queue_error_e
893 tbm_surface_queue_remove_trace_cb(
894         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
895         void *data)
896 {
897         _tbm_surf_queue_mutex_lock();
898
899         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
900                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
901
902         pthread_mutex_lock(&surface_queue->lock);
903
904         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
905
906         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
907
908         pthread_mutex_unlock(&surface_queue->lock);
909
910         _tbm_surf_queue_mutex_unlock();
911
912         return TBM_SURFACE_QUEUE_ERROR_NONE;
913 }
914
915 tbm_surface_queue_error_e
916 tbm_surface_queue_set_alloc_cb(
917         tbm_surface_queue_h surface_queue,
918         tbm_surface_alloc_cb alloc_cb,
919         tbm_surface_free_cb free_cb,
920         void *data)
921 {
922         _tbm_surf_queue_mutex_lock();
923
924         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
925                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
926
927         pthread_mutex_lock(&surface_queue->lock);
928
929         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
930
931         surface_queue->alloc_cb = alloc_cb;
932         surface_queue->free_cb = free_cb;
933         surface_queue->alloc_cb_data = data;
934
935         pthread_mutex_unlock(&surface_queue->lock);
936
937         _tbm_surf_queue_mutex_unlock();
938
939         return TBM_SURFACE_QUEUE_ERROR_NONE;
940 }
941
942 int
943 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
944 {
945         int width;
946
947         _tbm_surf_queue_mutex_lock();
948
949         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
950
951         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
952
953         width = surface_queue->width;
954
955         _tbm_surf_queue_mutex_unlock();
956
957         return width;
958 }
959
960 int
961 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
962 {
963         int height;
964
965         _tbm_surf_queue_mutex_lock();
966
967         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
968
969         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
970
971         height = surface_queue->height;
972
973         _tbm_surf_queue_mutex_unlock();
974
975         return height;
976 }
977
978 int
979 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
980 {
981         int format;
982
983         _tbm_surf_queue_mutex_lock();
984
985         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
986
987         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
988
989         format = surface_queue->format;
990
991         _tbm_surf_queue_mutex_unlock();
992
993         return format;
994 }
995
996 int
997 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
998 {
999         int queue_size;
1000
1001         _tbm_surf_queue_mutex_lock();
1002
1003         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1004
1005         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1006
1007         queue_size = surface_queue->queue_size;
1008
1009         _tbm_surf_queue_mutex_unlock();
1010
1011         return queue_size;
1012 }
1013
1014 tbm_surface_queue_error_e
1015 tbm_surface_queue_add_reset_cb(
1016         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1017         void *data)
1018 {
1019         _tbm_surf_queue_mutex_lock();
1020
1021         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1022                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1023
1024         pthread_mutex_lock(&surface_queue->lock);
1025
1026         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1027
1028         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1029
1030         pthread_mutex_unlock(&surface_queue->lock);
1031
1032         _tbm_surf_queue_mutex_unlock();
1033
1034         return TBM_SURFACE_QUEUE_ERROR_NONE;
1035 }
1036
1037 tbm_surface_queue_error_e
1038 tbm_surface_queue_remove_reset_cb(
1039         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1040         void *data)
1041 {
1042         _tbm_surf_queue_mutex_lock();
1043
1044         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1045                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1046
1047         pthread_mutex_lock(&surface_queue->lock);
1048
1049         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1050
1051         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1052
1053         pthread_mutex_unlock(&surface_queue->lock);
1054
1055         _tbm_surf_queue_mutex_unlock();
1056
1057         return TBM_SURFACE_QUEUE_ERROR_NONE;
1058 }
1059
1060 tbm_surface_queue_error_e
1061 tbm_surface_queue_enqueue(tbm_surface_queue_h
1062                           surface_queue, tbm_surface_h surface)
1063 {
1064         queue_node *node;
1065         int queue_type;
1066
1067         _tbm_surf_queue_mutex_lock();
1068
1069         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1070                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1071         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1072                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1073
1074         if (b_dump_queue)
1075                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1076
1077         pthread_mutex_lock(&surface_queue->lock);
1078
1079         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1080
1081         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1082         if (node == NULL || queue_type != NODE_LIST) {
1083                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1084                         node, queue_type);
1085                 pthread_mutex_unlock(&surface_queue->lock);
1086
1087                 _tbm_surf_queue_mutex_unlock();
1088
1089                 if (!node)
1090                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1091                 else
1092                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1093         }
1094
1095         if (surface_queue->impl && surface_queue->impl->enqueue)
1096                 surface_queue->impl->enqueue(surface_queue, node);
1097         else
1098                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1099
1100         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1101                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1102                 pthread_mutex_unlock(&surface_queue->lock);
1103
1104                 _tbm_surf_queue_mutex_unlock();
1105                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1106         }
1107
1108         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1109
1110         pthread_mutex_unlock(&surface_queue->lock);
1111         pthread_cond_signal(&surface_queue->dirty_cond);
1112
1113         _tbm_surf_queue_mutex_unlock();
1114
1115         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1116
1117         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1118
1119         return TBM_SURFACE_QUEUE_ERROR_NONE;
1120 }
1121
1122 tbm_surface_queue_error_e
1123 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1124                           surface_queue, tbm_surface_h surface)
1125 {
1126         queue_node *node;
1127         int queue_type;
1128
1129         _tbm_surf_queue_mutex_lock();
1130
1131         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1132                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1133         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1134                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1135
1136         pthread_mutex_lock(&surface_queue->lock);
1137
1138         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1139
1140         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1141         if (node == NULL || queue_type != NODE_LIST) {
1142                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1143                         node, queue_type);
1144                 pthread_mutex_unlock(&surface_queue->lock);
1145
1146                 _tbm_surf_queue_mutex_unlock();
1147                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1148         }
1149
1150         if (surface_queue->queue_size < surface_queue->num_attached) {
1151                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1152
1153                 if (surface_queue->impl && surface_queue->impl->need_detach)
1154                         surface_queue->impl->need_detach(surface_queue, node);
1155                 else
1156                         _tbm_surface_queue_detach(surface_queue, surface);
1157
1158                 pthread_mutex_unlock(&surface_queue->lock);
1159
1160                 _tbm_surf_queue_mutex_unlock();
1161
1162                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1163
1164                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1165         }
1166
1167         if (surface_queue->impl && surface_queue->impl->release)
1168                 surface_queue->impl->release(surface_queue, node);
1169         else
1170                 _tbm_surface_queue_release(surface_queue, node, 1);
1171
1172         if (_queue_is_empty(&surface_queue->free_queue)) {
1173                 pthread_mutex_unlock(&surface_queue->lock);
1174
1175                 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1176                 _tbm_surf_queue_mutex_unlock();
1177                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1178         }
1179
1180         node->type = QUEUE_NODE_TYPE_RELEASE;
1181
1182         pthread_mutex_unlock(&surface_queue->lock);
1183         pthread_cond_signal(&surface_queue->free_cond);
1184
1185         _tbm_surf_queue_mutex_unlock();
1186
1187         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1188
1189         return TBM_SURFACE_QUEUE_ERROR_NONE;
1190 }
1191
1192 tbm_surface_queue_error_e
1193 tbm_surface_queue_dequeue(tbm_surface_queue_h
1194                           surface_queue, tbm_surface_h *surface)
1195 {
1196         queue_node *node;
1197
1198         _tbm_surf_queue_mutex_lock();
1199
1200         *surface = NULL;
1201
1202         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1203                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1204         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1205                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1206
1207         pthread_mutex_lock(&surface_queue->lock);
1208
1209         if (_queue_is_empty(&surface_queue->free_queue)) {
1210                 if (surface_queue->impl && surface_queue->impl->need_attach)
1211                         surface_queue->impl->need_attach(surface_queue);
1212
1213                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1214                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1215                         pthread_mutex_unlock(&surface_queue->lock);
1216                         _tbm_surf_queue_mutex_unlock();
1217                         return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1218                 }
1219         }
1220
1221         if (surface_queue->impl && surface_queue->impl->dequeue)
1222                 node = surface_queue->impl->dequeue(surface_queue);
1223         else
1224                 node = _tbm_surface_queue_dequeue(surface_queue);
1225
1226         if (node == NULL || node->surface == NULL) {
1227                 TBM_LOG_E("_queue_node_pop_front failed\n");
1228                 pthread_mutex_unlock(&surface_queue->lock);
1229
1230                 _tbm_surf_queue_mutex_unlock();
1231                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1232         }
1233
1234         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1235         *surface = node->surface;
1236
1237         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1238
1239         pthread_mutex_unlock(&surface_queue->lock);
1240
1241         _tbm_surf_queue_mutex_unlock();
1242
1243         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1244
1245         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1246
1247         return TBM_SURFACE_QUEUE_ERROR_NONE;
1248 }
1249
1250 int
1251 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1252 {
1253         _tbm_surf_queue_mutex_lock();
1254
1255         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1256
1257         _tbm_surf_queue_mutex_unlock();
1258
1259         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1260
1261         _tbm_surf_queue_mutex_lock();
1262
1263         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1264
1265         pthread_mutex_lock(&surface_queue->lock);
1266
1267         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1268
1269         if (_queue_is_empty(&surface_queue->free_queue)) {
1270                 if (surface_queue->impl && surface_queue->impl->need_attach)
1271                         surface_queue->impl->need_attach(surface_queue);
1272
1273                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1274                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1275                         pthread_mutex_unlock(&surface_queue->lock);
1276                         _tbm_surf_queue_mutex_unlock();
1277                         return 0;
1278                 }
1279         }
1280
1281         if (!_queue_is_empty(&surface_queue->free_queue)) {
1282                 pthread_mutex_unlock(&surface_queue->lock);
1283                 _tbm_surf_queue_mutex_unlock();
1284                 return 1;
1285         }
1286
1287         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1288                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1289                 _tbm_surf_queue_mutex_unlock();
1290                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1291                 _tbm_surf_queue_mutex_lock();
1292
1293                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1294                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1295                         pthread_mutex_unlock(&surface_queue->lock);
1296                         _tbm_surf_queue_mutex_unlock();
1297                         return 0;
1298                 }
1299
1300                 pthread_mutex_unlock(&surface_queue->lock);
1301                 _tbm_surf_queue_mutex_unlock();
1302                 return 1;
1303         }
1304
1305         pthread_mutex_unlock(&surface_queue->lock);
1306         _tbm_surf_queue_mutex_unlock();
1307         return 0;
1308 }
1309
1310 tbm_surface_queue_error_e
1311 tbm_surface_queue_release(tbm_surface_queue_h
1312                           surface_queue, tbm_surface_h surface)
1313 {
1314         queue_node *node;
1315         int queue_type;
1316
1317         _tbm_surf_queue_mutex_lock();
1318
1319         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1320                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1321         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1322                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1323
1324         pthread_mutex_lock(&surface_queue->lock);
1325
1326         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1327
1328         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1329         if (node == NULL || queue_type != NODE_LIST) {
1330                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1331                         node, queue_type);
1332                 pthread_mutex_unlock(&surface_queue->lock);
1333
1334                 _tbm_surf_queue_mutex_unlock();
1335
1336                 if (!node)
1337                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1338                 else
1339                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1340         }
1341
1342         if (surface_queue->queue_size < surface_queue->num_attached) {
1343                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1344
1345                 if (surface_queue->impl && surface_queue->impl->need_detach)
1346                         surface_queue->impl->need_detach(surface_queue, node);
1347                 else
1348                         _tbm_surface_queue_detach(surface_queue, surface);
1349
1350                 pthread_mutex_unlock(&surface_queue->lock);
1351
1352                 _tbm_surf_queue_mutex_unlock();
1353                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1354         }
1355
1356         if (surface_queue->impl && surface_queue->impl->release)
1357                 surface_queue->impl->release(surface_queue, node);
1358         else
1359                 _tbm_surface_queue_release(surface_queue, node, 1);
1360
1361         if (_queue_is_empty(&surface_queue->free_queue)) {
1362                 pthread_mutex_unlock(&surface_queue->lock);
1363
1364                 TBM_LOG_E("surface_queue->free_queue is empty.\n");
1365                 _tbm_surf_queue_mutex_unlock();
1366                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1367         }
1368
1369         node->type = QUEUE_NODE_TYPE_RELEASE;
1370
1371         pthread_mutex_unlock(&surface_queue->lock);
1372         pthread_cond_signal(&surface_queue->free_cond);
1373
1374         _tbm_surf_queue_mutex_unlock();
1375
1376         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1377
1378         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1379
1380         return TBM_SURFACE_QUEUE_ERROR_NONE;
1381 }
1382
1383 tbm_surface_queue_error_e
1384 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1385                         surface_queue, tbm_surface_h surface)
1386 {
1387         queue_node *node;
1388         int queue_type;
1389
1390         _tbm_surf_queue_mutex_lock();
1391
1392         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1393                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1394         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1395                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1396
1397         pthread_mutex_lock(&surface_queue->lock);
1398
1399         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1400
1401         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1402         if (node == NULL || queue_type != NODE_LIST) {
1403                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1404                         node, queue_type);
1405                 pthread_mutex_unlock(&surface_queue->lock);
1406
1407                 _tbm_surf_queue_mutex_unlock();
1408                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1409         }
1410
1411         if (surface_queue->impl && surface_queue->impl->enqueue)
1412                 surface_queue->impl->enqueue(surface_queue, node);
1413         else
1414                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1415
1416         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1417                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
1418                 pthread_mutex_unlock(&surface_queue->lock);
1419
1420                 _tbm_surf_queue_mutex_unlock();
1421                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1422         }
1423
1424         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1425
1426         pthread_mutex_unlock(&surface_queue->lock);
1427         pthread_cond_signal(&surface_queue->dirty_cond);
1428
1429         _tbm_surf_queue_mutex_unlock();
1430
1431         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1432
1433         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1434
1435         return TBM_SURFACE_QUEUE_ERROR_NONE;
1436 }
1437
1438 tbm_surface_queue_error_e
1439 tbm_surface_queue_acquire(tbm_surface_queue_h
1440                           surface_queue, tbm_surface_h *surface)
1441 {
1442         queue_node *node;
1443
1444         _tbm_surf_queue_mutex_lock();
1445
1446         *surface = NULL;
1447
1448         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1449                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1450         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1451                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1452
1453         pthread_mutex_lock(&surface_queue->lock);
1454
1455         if (surface_queue->impl && surface_queue->impl->acquire)
1456                 node = surface_queue->impl->acquire(surface_queue);
1457         else
1458                 node = _tbm_surface_queue_acquire(surface_queue);
1459
1460         if (node == NULL || node->surface == NULL) {
1461                 TBM_LOG_E("_queue_node_pop_front failed\n");
1462                 pthread_mutex_unlock(&surface_queue->lock);
1463
1464                 _tbm_surf_queue_mutex_unlock();
1465                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1466         }
1467
1468         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1469
1470         *surface = node->surface;
1471
1472         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1473
1474         pthread_mutex_unlock(&surface_queue->lock);
1475
1476         _tbm_surf_queue_mutex_unlock();
1477
1478         if (b_dump_queue)
1479                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1480
1481         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1482
1483         return TBM_SURFACE_QUEUE_ERROR_NONE;
1484 }
1485
1486 int
1487 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1488 {
1489         _tbm_surf_queue_mutex_lock();
1490
1491         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1492
1493         pthread_mutex_lock(&surface_queue->lock);
1494
1495         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1496
1497         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1498                 pthread_mutex_unlock(&surface_queue->lock);
1499                 _tbm_surf_queue_mutex_unlock();
1500                 return 1;
1501         }
1502
1503         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1504                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1505                 _tbm_surf_queue_mutex_unlock();
1506                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1507                 _tbm_surf_queue_mutex_lock();
1508
1509                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1510                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1511                         pthread_mutex_unlock(&surface_queue->lock);
1512                         _tbm_surf_queue_mutex_unlock();
1513                         return 0;
1514                 }
1515
1516                 pthread_mutex_unlock(&surface_queue->lock);
1517                 _tbm_surf_queue_mutex_unlock();
1518                 return 1;
1519         }
1520
1521         pthread_mutex_unlock(&surface_queue->lock);
1522         _tbm_surf_queue_mutex_unlock();
1523         return 0;
1524 }
1525
1526 void
1527 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1528 {
1529         queue_node *node = NULL, *tmp;
1530
1531         _tbm_surf_queue_mutex_lock();
1532
1533         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1534
1535         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1536
1537         LIST_DEL(&surface_queue->item_link);
1538
1539         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1540                 _queue_delete_node(surface_queue, node);
1541
1542         if (surface_queue->impl && surface_queue->impl->destroy)
1543                 surface_queue->impl->destroy(surface_queue);
1544
1545         _notify_emit(surface_queue, &surface_queue->destory_noti);
1546
1547         _notify_remove_all(&surface_queue->destory_noti);
1548         _notify_remove_all(&surface_queue->dequeuable_noti);
1549         _notify_remove_all(&surface_queue->dequeue_noti);
1550         _notify_remove_all(&surface_queue->can_dequeue_noti);
1551         _notify_remove_all(&surface_queue->acquirable_noti);
1552         _notify_remove_all(&surface_queue->reset_noti);
1553         _trace_remove_all(&surface_queue->trace_noti);
1554
1555         pthread_mutex_destroy(&surface_queue->lock);
1556
1557         free(surface_queue);
1558
1559         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1560                 _deinit_tbm_surf_queue_bufmgr();
1561
1562         _tbm_surf_queue_mutex_unlock();
1563 }
1564
1565 tbm_surface_queue_error_e
1566 tbm_surface_queue_reset(tbm_surface_queue_h
1567                         surface_queue, int width, int height, int format)
1568 {
1569         queue_node *node = NULL, *tmp;
1570
1571         _tbm_surf_queue_mutex_lock();
1572
1573         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1574                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1575
1576         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1577
1578         if (width == surface_queue->width && height == surface_queue->height &&
1579                 format == surface_queue->format) {
1580                 _tbm_surf_queue_mutex_unlock();
1581                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1582         }
1583
1584         pthread_mutex_lock(&surface_queue->lock);
1585
1586         surface_queue->width = width;
1587         surface_queue->height = height;
1588         surface_queue->format = format;
1589
1590         /* Destory surface and Push to free_queue */
1591         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1592                 _queue_delete_node(surface_queue, node);
1593
1594         /* Reset queue */
1595         _queue_init(&surface_queue->free_queue);
1596         _queue_init(&surface_queue->dirty_queue);
1597         LIST_INITHEAD(&surface_queue->list);
1598
1599         surface_queue->num_attached = 0;
1600
1601         if (surface_queue->impl && surface_queue->impl->reset)
1602                 surface_queue->impl->reset(surface_queue);
1603
1604         pthread_mutex_unlock(&surface_queue->lock);
1605         pthread_cond_signal(&surface_queue->free_cond);
1606
1607         _tbm_surf_queue_mutex_unlock();
1608
1609         _notify_emit(surface_queue, &surface_queue->reset_noti);
1610
1611         return TBM_SURFACE_QUEUE_ERROR_NONE;
1612 }
1613
1614 tbm_surface_queue_error_e
1615 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1616 {
1617         _tbm_surf_queue_mutex_lock();
1618
1619         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1620                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1621
1622         _tbm_surf_queue_mutex_unlock();
1623
1624         _notify_emit(surface_queue, &surface_queue->reset_noti);
1625
1626         return TBM_SURFACE_QUEUE_ERROR_NONE;
1627 }
1628
1629 tbm_surface_queue_error_e
1630 tbm_surface_queue_set_size(tbm_surface_queue_h
1631                         surface_queue, int queue_size, int flush)
1632 {
1633         queue_node *node = NULL, *tmp;
1634
1635         _tbm_surf_queue_mutex_lock();
1636
1637         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1638                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1639         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1640                                         TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1641
1642         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1643
1644         if ((surface_queue->queue_size == queue_size) && !flush) {
1645                 _tbm_surf_queue_mutex_unlock();
1646                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1647         }
1648
1649         pthread_mutex_lock(&surface_queue->lock);
1650
1651         if (flush) {
1652                 /* Destory surface and Push to free_queue */
1653                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1654                         _queue_delete_node(surface_queue, node);
1655
1656                 /* Reset queue */
1657                 _queue_init(&surface_queue->free_queue);
1658                 _queue_init(&surface_queue->dirty_queue);
1659                 LIST_INITHEAD(&surface_queue->list);
1660
1661                 surface_queue->num_attached = 0;
1662                 surface_queue->queue_size = queue_size;
1663
1664                 if (surface_queue->impl && surface_queue->impl->reset)
1665                         surface_queue->impl->reset(surface_queue);
1666
1667                 pthread_mutex_unlock(&surface_queue->lock);
1668                 pthread_cond_signal(&surface_queue->free_cond);
1669
1670                 _tbm_surf_queue_mutex_unlock();
1671
1672                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1673
1674                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1675         } else {
1676                 if (surface_queue->queue_size > queue_size) {
1677                         int need_del = surface_queue->queue_size - queue_size;
1678
1679                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1680                                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1681
1682                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1683                                         surface_queue->impl->need_detach(surface_queue, node);
1684                                 else
1685                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1686
1687                                 need_del--;
1688                                 if (need_del == 0)
1689                                         break;
1690                         }
1691                 }
1692
1693                 surface_queue->queue_size = queue_size;
1694
1695                 pthread_mutex_unlock(&surface_queue->lock);
1696
1697                 _tbm_surf_queue_mutex_unlock();
1698
1699                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1700         }
1701 }
1702
1703 tbm_surface_queue_error_e
1704 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1705 {
1706         queue_node *node = NULL;
1707
1708         _tbm_surf_queue_mutex_lock();
1709
1710         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1711                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1712
1713         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1714
1715         if (surface_queue->num_attached == 0) {
1716                 _tbm_surf_queue_mutex_unlock();
1717                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1718         }
1719
1720         pthread_mutex_lock(&surface_queue->lock);
1721
1722         /* Destory surface in free_queue */
1723         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1724                 if (surface_queue->impl && surface_queue->impl->need_detach)
1725                         surface_queue->impl->need_detach(surface_queue, node);
1726                 else
1727                         _tbm_surface_queue_detach(surface_queue, node->surface);
1728         }
1729
1730         /* Reset queue */
1731         _queue_init(&surface_queue->free_queue);
1732
1733         pthread_mutex_unlock(&surface_queue->lock);
1734         _tbm_surf_queue_mutex_unlock();
1735
1736         return TBM_SURFACE_QUEUE_ERROR_NONE;
1737 }
1738
1739 tbm_surface_queue_error_e
1740 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1741 {
1742         queue_node *node = NULL, *tmp;
1743
1744         _tbm_surf_queue_mutex_lock();
1745
1746         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1747                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1748
1749         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1750
1751         if (surface_queue->num_attached == 0) {
1752                 _tbm_surf_queue_mutex_unlock();
1753                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1754         }
1755
1756         pthread_mutex_lock(&surface_queue->lock);
1757
1758         /* Destory surface and Push to free_queue */
1759         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1760                 _queue_delete_node(surface_queue, node);
1761
1762         /* Reset queue */
1763         _queue_init(&surface_queue->free_queue);
1764         _queue_init(&surface_queue->dirty_queue);
1765         LIST_INITHEAD(&surface_queue->list);
1766
1767         surface_queue->num_attached = 0;
1768
1769         if (surface_queue->impl && surface_queue->impl->reset)
1770                 surface_queue->impl->reset(surface_queue);
1771
1772         pthread_mutex_unlock(&surface_queue->lock);
1773         pthread_cond_signal(&surface_queue->free_cond);
1774
1775         _tbm_surf_queue_mutex_unlock();
1776
1777         _notify_emit(surface_queue, &surface_queue->reset_noti);
1778
1779         return TBM_SURFACE_QUEUE_ERROR_NONE;
1780 }
1781
1782 tbm_surface_queue_error_e
1783 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1784                         tbm_surface_h *surfaces, int *num)
1785 {
1786         queue_node *node = NULL;
1787
1788         _tbm_surf_queue_mutex_lock();
1789
1790         *num = 0;
1791
1792         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1793                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1794         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1795                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1796
1797         pthread_mutex_lock(&surface_queue->lock);
1798
1799         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1800                 if (surfaces)
1801                         surfaces[*num] = node->surface;
1802
1803                 *num = *num + 1;
1804         }
1805
1806         pthread_mutex_unlock(&surface_queue->lock);
1807
1808         _tbm_surf_queue_mutex_unlock();
1809
1810         return TBM_SURFACE_QUEUE_ERROR_NONE;
1811 }
1812
1813 tbm_surface_queue_error_e
1814 tbm_surface_queue_get_trace_surface_num(
1815                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
1816 {
1817         _tbm_surf_queue_mutex_lock();
1818
1819         *num = 0;
1820
1821         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1822                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1823         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1824                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1825
1826         pthread_mutex_lock(&surface_queue->lock);
1827
1828         switch (trace) {
1829         case TBM_SURFACE_QUEUE_TRACE_NONE:
1830                 *num = 0;
1831                 break;
1832         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
1833                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
1834                 break;
1835         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
1836                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
1837                 break;
1838         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
1839                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
1840                 break;
1841         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
1842                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
1843                 break;
1844         default:
1845                 break;
1846         }
1847
1848         pthread_mutex_unlock(&surface_queue->lock);
1849
1850         _tbm_surf_queue_mutex_unlock();
1851
1852         return TBM_SURFACE_QUEUE_ERROR_NONE;
1853 }
1854
1855 typedef struct {
1856         int flags;
1857 } tbm_queue_default;
1858
1859 static void
1860 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1861 {
1862         free(surface_queue->impl_data);
1863 }
1864
1865 static void
1866 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1867 {
1868         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1869         tbm_surface_h surface;
1870
1871         if (surface_queue->queue_size == surface_queue->num_attached)
1872                 return;
1873
1874         if (surface_queue->alloc_cb) {
1875                 pthread_mutex_unlock(&surface_queue->lock);
1876                 _tbm_surf_queue_mutex_unlock();
1877                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1878                 _tbm_surf_queue_mutex_lock();
1879                 pthread_mutex_lock(&surface_queue->lock);
1880
1881                 /* silent return */
1882                 if (!surface)
1883                         return;
1884
1885                 tbm_surface_internal_ref(surface);
1886         } else {
1887                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1888                                 surface_queue->height,
1889                                 surface_queue->format,
1890                                 data->flags);
1891                 TBM_RETURN_IF_FAIL(surface != NULL);
1892         }
1893
1894         _tbm_surface_queue_attach(surface_queue, surface);
1895         tbm_surface_internal_unref(surface);
1896 }
1897
1898 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1899         NULL,                           /*__tbm_queue_default_init*/
1900         NULL,                           /*__tbm_queue_default_reset*/
1901         __tbm_queue_default_destroy,
1902         __tbm_queue_default_need_attach,
1903         NULL,                           /*__tbm_queue_default_enqueue*/
1904         NULL,                           /*__tbm_queue_default_release*/
1905         NULL,                           /*__tbm_queue_default_dequeue*/
1906         NULL,                           /*__tbm_queue_default_acquire*/
1907         NULL,                           /*__tbm_queue_default_need_detach*/
1908 };
1909
1910 tbm_surface_queue_h
1911 tbm_surface_queue_create(int queue_size, int width,
1912                          int height, int format, int flags)
1913 {
1914         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1915         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1916         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1917         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1918
1919         _tbm_surf_queue_mutex_lock();
1920
1921         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1922                                             sizeof(struct _tbm_surface_queue));
1923         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1924
1925         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1926
1927         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1928                                   sizeof(tbm_queue_default));
1929         if (data == NULL) {
1930                 TBM_LOG_E("cannot allocate the tbm_queue_default.\n");
1931                 free(surface_queue);
1932                 _tbm_surf_queue_mutex_unlock();
1933                 return NULL;
1934         }
1935
1936         data->flags = flags;
1937         _tbm_surface_queue_init(surface_queue,
1938                                 queue_size,
1939                                 width, height, format,
1940                                 &tbm_queue_default_impl, data);
1941
1942         _tbm_surf_queue_mutex_unlock();
1943
1944         return surface_queue;
1945 }
1946
1947 typedef struct {
1948         int flags;
1949         queue dequeue_list;
1950 } tbm_queue_sequence;
1951
1952 static void
1953 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
1954 {
1955         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1956
1957         _queue_init(&data->dequeue_list);
1958 }
1959
1960 static void
1961 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
1962 {
1963         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1964
1965         _queue_init(&data->dequeue_list);
1966 }
1967
1968 static void
1969 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
1970 {
1971         free(surface_queue->impl_data);
1972 }
1973
1974 static void
1975 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
1976 {
1977         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1978         tbm_surface_h surface;
1979
1980         if (surface_queue->queue_size == surface_queue->num_attached)
1981                 return;
1982
1983         if (surface_queue->alloc_cb) {
1984                 pthread_mutex_unlock(&surface_queue->lock);
1985                 _tbm_surf_queue_mutex_unlock();
1986                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1987                 _tbm_surf_queue_mutex_lock();
1988                 pthread_mutex_lock(&surface_queue->lock);
1989
1990                 /* silent return */
1991                 if (!surface)
1992                         return;
1993
1994                 tbm_surface_internal_ref(surface);
1995         } else {
1996                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1997                                 surface_queue->height,
1998                                 surface_queue->format,
1999                                 data->flags);
2000                 TBM_RETURN_IF_FAIL(surface != NULL);
2001         }
2002
2003         _tbm_surface_queue_attach(surface_queue, surface);
2004         tbm_surface_internal_unref(surface);
2005 }
2006
2007 static void
2008 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2009                              queue_node *node)
2010 {
2011         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2012         queue_node *next = NULL, *tmp;
2013
2014         node->priv_flags = 0;
2015
2016         LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) {
2017                 if (next->priv_flags)
2018                         break;
2019                 _queue_node_pop(&data->dequeue_list, next);
2020                 _tbm_surface_queue_enqueue(surface_queue, next, 1);
2021         }
2022 }
2023
2024 static void
2025 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2026                                 queue_node *node)
2027 {
2028         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2029
2030         if (node->priv_flags) {
2031                 node->priv_flags = 0;
2032                 _queue_node_pop(&data->dequeue_list, node);
2033         }
2034
2035         _tbm_surface_queue_release(surface_queue, node, 1);
2036 }
2037
2038 static queue_node *
2039 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2040                              surface_queue)
2041 {
2042         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2043         queue_node *node;
2044
2045         node = _tbm_surface_queue_dequeue(surface_queue);
2046         if (node) {
2047                 _queue_node_push_back(&data->dequeue_list, node);
2048                 node->priv_flags = 1;
2049         }
2050
2051         return node;
2052 }
2053
2054 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2055         __tbm_queue_sequence_init,
2056         __tbm_queue_sequence_reset,
2057         __tbm_queue_sequence_destroy,
2058         __tbm_queue_sequence_need_attach,
2059         __tbm_queue_sequence_enqueue,
2060         __tbm_queue_sequence_release,
2061         __tbm_queue_sequence_dequeue,
2062         NULL,                                   /*__tbm_queue_sequence_acquire*/
2063         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2064 };
2065
2066 tbm_surface_queue_h
2067 tbm_surface_queue_sequence_create(int queue_size, int width,
2068                                   int height, int format, int flags)
2069 {
2070         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2071         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
2072         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
2073         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
2074
2075         _tbm_surf_queue_mutex_lock();
2076
2077         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2078                                             sizeof(struct _tbm_surface_queue));
2079         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
2080
2081         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
2082
2083         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2084                                    sizeof(tbm_queue_sequence));
2085         if (data == NULL) {
2086                 TBM_LOG_E("cannot allocate the tbm_queue_sequence.\n");
2087                 free(surface_queue);
2088                 _tbm_surf_queue_mutex_unlock();
2089                 return NULL;
2090         }
2091
2092         data->flags = flags;
2093         _tbm_surface_queue_init(surface_queue,
2094                                 queue_size,
2095                                 width, height, format,
2096                                 &tbm_queue_sequence_impl, data);
2097
2098         _tbm_surf_queue_mutex_unlock();
2099
2100         return surface_queue;
2101 }
2102 /* LCOV_EXCL_STOP */