fix deadlock problem in queue_reset
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "tbm_bufmgr_int.h"
33 #include "list.h"
34
35 #define FREE_QUEUE      1
36 #define DIRTY_QUEUE     2
37 #define NODE_LIST       4
38
39 #define TBM_QUEUE_DEBUG 0
40
41 #ifdef TRACE
42 #define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
43 #else
44 #define TBM_QUEUE_TRACE(fmt, ...)
45 #endif /* TRACE */
46
47 #if TBM_QUEUE_DEBUG
48 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
49 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
50 #else
51 #define TBM_LOCK()
52 #define TBM_UNLOCK()
53 #endif
54
55 static tbm_bufmgr g_surf_queue_bufmgr;
56 static pthread_mutex_t tbm_surf_queue_lock;
57 void _tbm_surface_queue_mutex_unlock(void);
58
59 /* check condition */
60 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
61         if (!(cond)) {\
62                 TBM_LOG_E("'%s' failed.\n", #cond);\
63                 _tbm_surf_queue_mutex_unlock();\
64                 return;\
65         } \
66 }
67
68 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
69         if (!(cond)) {\
70                 TBM_LOG_E("'%s' failed.\n", #cond);\
71                 _tbm_surf_queue_mutex_unlock();\
72                 return val;\
73         } \
74 }
75
76 typedef enum _queue_node_type {
77         QUEUE_NODE_TYPE_NONE,
78         QUEUE_NODE_TYPE_DEQUEUE,
79         QUEUE_NODE_TYPE_ENQUEUE,
80         QUEUE_NODE_TYPE_ACQUIRE,
81         QUEUE_NODE_TYPE_RELEASE
82 } Queue_Node_Type;
83
84 typedef struct {
85         struct list_head head;
86         int count;
87 } queue;
88
89 typedef struct {
90         tbm_surface_h surface;
91
92         struct list_head item_link;
93         struct list_head link;
94
95         Queue_Node_Type type;
96
97         unsigned int priv_flags;        /*for each queue*/
98 } queue_node;
99
100 typedef struct {
101         struct list_head link;
102
103         tbm_surface_queue_notify_cb cb;
104         void *data;
105 } queue_notify;
106
107 typedef struct _tbm_surface_queue_interface {
108         void (*init)(tbm_surface_queue_h queue);
109         void (*reset)(tbm_surface_queue_h queue);
110         void (*destroy)(tbm_surface_queue_h queue);
111         void (*need_attach)(tbm_surface_queue_h queue);
112
113         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
114         void (*release)(tbm_surface_queue_h queue, queue_node *node);
115         queue_node *(*dequeue)(tbm_surface_queue_h queue);
116         queue_node *(*acquire)(tbm_surface_queue_h queue);
117 } tbm_surface_queue_interface;
118
119 struct _tbm_surface_queue {
120         int width;
121         int height;
122         int format;
123         int queue_size;
124
125         queue free_queue;
126         queue dirty_queue;
127         struct list_head list;
128
129         struct list_head destory_noti;
130         struct list_head dequeuable_noti;
131         struct list_head dequeue_noti;
132         struct list_head acquirable_noti;
133         struct list_head reset_noti;
134
135         pthread_mutex_t lock;
136         pthread_cond_t free_cond;
137         pthread_cond_t dirty_cond;
138
139         const tbm_surface_queue_interface *impl;
140         void *impl_data;
141
142         //For external buffer allocation
143         tbm_surface_alloc_cb alloc_cb;
144         tbm_surface_free_cb free_cb;
145         void *alloc_cb_data;
146
147         struct list_head item_link; /* link of surface queue */
148 };
149
150 /* LCOV_EXCL_START */
151
152 static bool
153 _tbm_surf_queue_mutex_init(void)
154 {
155         static bool tbm_surf_queue_mutex_init = false;
156
157         if (tbm_surf_queue_mutex_init)
158                 return true;
159
160         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
161                 TBM_LOG_E("fail: tbm_surf_queue mutex init\n");
162                 return false;
163         }
164
165         tbm_surf_queue_mutex_init = true;
166
167         return true;
168 }
169
170 void
171 _tbm_surf_queue_mutex_lock(void)
172 {
173         if (!_tbm_surf_queue_mutex_init())
174                 return;
175
176         pthread_mutex_lock(&tbm_surf_queue_lock);
177 }
178
179 void
180 _tbm_surf_queue_mutex_unlock(void)
181 {
182         pthread_mutex_unlock(&tbm_surf_queue_lock);
183 }
184
185 static void
186 _init_tbm_surf_queue_bufmgr(void)
187 {
188         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
189 }
190
191 static void
192 _deinit_tbm_surf_queue_bufmgr(void)
193 {
194         if (!g_surf_queue_bufmgr)
195                 return;
196
197         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
198         g_surf_queue_bufmgr = NULL;
199 }
200
201 static int
202 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
203 {
204         tbm_surface_queue_h old_data = NULL, tmp = NULL;
205
206         if (surface_queue == NULL || g_surf_queue_bufmgr == NULL) {
207                 TBM_TRACE("error: tbm_surface_queue(%p)\n", surface_queue);
208                 return 0;
209         }
210
211         if (!LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
212                 LIST_FOR_EACH_ENTRY_SAFE(old_data, tmp, &g_surf_queue_bufmgr->surf_queue_list, item_link) {
213                         if (old_data == surface_queue) {
214                                 TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
215                                 return 1;
216                         }
217                 }
218         }
219         TBM_TRACE("error: tbm_surface_queue(%p)\n", surface_queue);
220         return 0;
221 }
222
223 static queue_node *
224 _queue_node_create(void)
225 {
226         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
227
228         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
229
230         return node;
231 }
232
233 static void
234 _queue_node_delete(queue_node *node)
235 {
236         LIST_DEL(&node->item_link);
237         LIST_DEL(&node->link);
238         free(node);
239 }
240
241 static int
242 _queue_is_empty(queue *queue)
243 {
244         if (LIST_IS_EMPTY(&queue->head))
245                 return 1;
246
247         return 0;
248 }
249
250 static void
251 _queue_node_push_back(queue *queue, queue_node *node)
252 {
253         LIST_ADDTAIL(&node->item_link, &queue->head);
254         queue->count++;
255 }
256
257 static void
258 _queue_node_push_front(queue *queue, queue_node *node)
259 {
260         LIST_ADD(&node->item_link, &queue->head);
261         queue->count++;
262 }
263
264 static queue_node *
265 _queue_node_pop_front(queue *queue)
266 {
267         queue_node *node = NULL;
268
269         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
270
271         LIST_DEL(&node->item_link);
272         queue->count--;
273
274         return node;
275 }
276
277 static queue_node *
278 _queue_node_pop(queue *queue, queue_node *node)
279 {
280         LIST_DEL(&node->item_link);
281         queue->count--;
282
283         return node;
284 }
285
286 static queue_node *
287 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
288                 tbm_surface_h surface, int *out_type)
289 {
290         queue_node *node = NULL;
291         queue_node *tmp = NULL;
292
293         if (type == 0)
294                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
295         if (out_type)
296                 *out_type = 0;
297
298         if (type & FREE_QUEUE) {
299                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head,
300                                          item_link) {
301                         if (node->surface == surface) {
302                                 if (out_type)
303                                         *out_type = FREE_QUEUE;
304
305                                 return node;
306                         }
307                 }
308         }
309
310         if (type & DIRTY_QUEUE) {
311                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->dirty_queue.head,
312                                          item_link) {
313                         if (node->surface == surface) {
314                                 if (out_type)
315                                         *out_type = DIRTY_QUEUE;
316
317                                 return node;
318                         }
319                 }
320         }
321
322         if (type & NODE_LIST) {
323                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) {
324                         if (node->surface == surface) {
325                                 if (out_type)
326                                         *out_type = NODE_LIST;
327
328                                 return node;
329                         }
330                 }
331         }
332
333         return NULL;
334 }
335
336 static void
337 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
338 {
339         if (node->surface) {
340                 if (surface_queue->free_cb) {
341                         surface_queue->free_cb(surface_queue,
342                                         surface_queue->alloc_cb_data,
343                                         node->surface);
344                 }
345
346                 tbm_surface_destroy(node->surface);
347         }
348
349         _queue_node_delete(node);
350 }
351
352 static void
353 _queue_init(queue *queue)
354 {
355         LIST_INITHEAD(&queue->head);
356
357         queue->count = 0;
358 }
359
360 static void
361 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
362             void *data)
363 {
364         TBM_RETURN_IF_FAIL(cb != NULL);
365
366         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
367
368         TBM_RETURN_IF_FAIL(item != NULL);
369
370         LIST_INITHEAD(&item->link);
371         item->cb = cb;
372         item->data = data;
373
374         LIST_ADDTAIL(&item->link, list);
375 }
376
377 static void
378 _notify_remove(struct list_head *list,
379                tbm_surface_queue_notify_cb cb, void *data)
380 {
381         queue_notify *item = NULL, *tmp = NULL;
382
383         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
384                 if (item->cb == cb && item->data == data) {
385                         LIST_DEL(&item->link);
386                         free(item);
387                         return;
388                 }
389         }
390
391         TBM_LOG_E("Cannot find notifiy\n");
392 }
393
394 static void
395 _notify_remove_all(struct list_head *list)
396 {
397         queue_notify *item = NULL, *tmp = NULL;
398
399         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
400                 LIST_DEL(&item->link);
401                 free(item);
402         }
403 }
404
405 static void
406 _notify_emit(tbm_surface_queue_h surface_queue,
407              struct list_head *list)
408 {
409         queue_notify *item = NULL, *tmp = NULL;
410
411         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
412                 item->cb(surface_queue, item->data);
413         }
414 }
415
416 static int
417 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
418 {
419         queue_node *node = NULL;
420         queue_node *tmp = NULL;
421         int count = 0;
422
423         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) {
424                 if (node->type == type)
425                         count++;
426         }
427
428         return count;
429 }
430
431 void
432 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
433                           tbm_surface_h surface)
434 {
435         queue_node *node = NULL;
436
437         node = _queue_node_create();
438         TBM_RETURN_IF_FAIL(node != NULL);
439
440         tbm_surface_internal_ref(surface);
441         node->surface = surface;
442
443         LIST_ADDTAIL(&node->link, &surface_queue->list);
444         _queue_node_push_back(&surface_queue->free_queue, node);
445 }
446
447 void
448 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
449                           tbm_surface_h surface)
450 {
451         queue_node *node = NULL;
452         int queue_type;
453
454         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
455         if (node)
456                 _queue_delete_node(surface_queue, node);
457 }
458
459 void
460 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
461                            queue_node *node, int push_back)
462 {
463         if (push_back)
464                 _queue_node_push_back(&surface_queue->dirty_queue, node);
465         else
466                 _queue_node_push_front(&surface_queue->dirty_queue, node);
467 }
468
469 queue_node *
470 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
471 {
472         queue_node *node = NULL;
473
474         if (_queue_is_empty(&surface_queue->free_queue)) {
475                 if (surface_queue->impl && surface_queue->impl->need_attach)
476                         surface_queue->impl->need_attach(surface_queue);
477
478                 if (_queue_is_empty(&surface_queue->free_queue))
479                         return NULL;
480         }
481
482         node = _queue_node_pop_front(&surface_queue->free_queue);
483
484         return node;
485 }
486
487 queue_node *
488 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
489 {
490         queue_node *node = NULL;
491
492         if (_queue_is_empty(&surface_queue->dirty_queue))
493                 return NULL;
494
495         node = _queue_node_pop_front(&surface_queue->dirty_queue);
496
497         return node;
498 }
499
500 void
501 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
502                            queue_node *node, int push_back)
503 {
504         if (push_back)
505                 _queue_node_push_back(&surface_queue->free_queue, node);
506         else
507                 _queue_node_push_front(&surface_queue->free_queue, node);
508 }
509
510 void
511 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
512                         int queue_size,
513                         int width, int height, int format,
514                         const tbm_surface_queue_interface *impl, void *data)
515 {
516         TBM_RETURN_IF_FAIL(surface_queue != NULL);
517         TBM_RETURN_IF_FAIL(impl != NULL);
518
519         memset(surface_queue, 0x00, sizeof(struct _tbm_surface_queue));
520
521         if (!g_surf_queue_bufmgr)
522                 _init_tbm_surf_queue_bufmgr();
523
524         pthread_mutex_init(&surface_queue->lock, NULL);
525         pthread_cond_init(&surface_queue->free_cond, NULL);
526         pthread_cond_init(&surface_queue->dirty_cond, NULL);
527
528         surface_queue->queue_size = queue_size;
529         surface_queue->width = width;
530         surface_queue->height = height;
531         surface_queue->format = format;
532         surface_queue->impl = impl;
533         surface_queue->impl_data = data;
534
535         _queue_init(&surface_queue->free_queue);
536         _queue_init(&surface_queue->dirty_queue);
537         LIST_INITHEAD(&surface_queue->list);
538
539         LIST_INITHEAD(&surface_queue->destory_noti);
540         LIST_INITHEAD(&surface_queue->acquirable_noti);
541         LIST_INITHEAD(&surface_queue->dequeuable_noti);
542         LIST_INITHEAD(&surface_queue->dequeue_noti);
543         LIST_INITHEAD(&surface_queue->reset_noti);
544
545         if (surface_queue->impl && surface_queue->impl->init)
546                 surface_queue->impl->init(surface_queue);
547
548         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
549 }
550
551 tbm_surface_queue_error_e
552 tbm_surface_queue_add_destroy_cb(
553         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
554         void *data)
555 {
556         _tbm_surf_queue_mutex_lock();
557
558         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
559                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
560
561         pthread_mutex_lock(&surface_queue->lock);
562
563         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
564
565         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
566
567         pthread_mutex_unlock(&surface_queue->lock);
568
569         _tbm_surf_queue_mutex_unlock();
570
571         return TBM_SURFACE_QUEUE_ERROR_NONE;
572 }
573
574 tbm_surface_queue_error_e
575 tbm_surface_queue_remove_destroy_cb(
576         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
577         void *data)
578 {
579         _tbm_surf_queue_mutex_lock();
580
581         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
582                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
583
584         pthread_mutex_lock(&surface_queue->lock);
585
586         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
587
588         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
589
590         pthread_mutex_unlock(&surface_queue->lock);
591
592         _tbm_surf_queue_mutex_unlock();
593
594         return TBM_SURFACE_QUEUE_ERROR_NONE;
595 }
596
597 tbm_surface_queue_error_e
598 tbm_surface_queue_add_dequeuable_cb(
599         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
600         void *data)
601 {
602         _tbm_surf_queue_mutex_lock();
603
604         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
605                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
606
607         pthread_mutex_lock(&surface_queue->lock);
608
609         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
610
611         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
612
613         pthread_mutex_unlock(&surface_queue->lock);
614
615         _tbm_surf_queue_mutex_unlock();
616
617         return TBM_SURFACE_QUEUE_ERROR_NONE;
618 }
619
620 tbm_surface_queue_error_e
621 tbm_surface_queue_remove_dequeuable_cb(
622         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
623         void *data)
624 {
625         _tbm_surf_queue_mutex_lock();
626
627         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
628                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
629
630         pthread_mutex_lock(&surface_queue->lock);
631
632         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
633
634         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
635
636         pthread_mutex_unlock(&surface_queue->lock);
637
638         _tbm_surf_queue_mutex_unlock();
639
640         return TBM_SURFACE_QUEUE_ERROR_NONE;
641 }
642
643 tbm_surface_queue_error_e
644 tbm_surface_queue_add_dequeue_cb(
645         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
646         void *data)
647 {
648         _tbm_surf_queue_mutex_lock();
649
650         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
651                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
652
653         pthread_mutex_lock(&surface_queue->lock);
654
655         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
656
657         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
658
659         pthread_mutex_unlock(&surface_queue->lock);
660
661         _tbm_surf_queue_mutex_unlock();
662
663         return TBM_SURFACE_QUEUE_ERROR_NONE;
664 }
665
666 tbm_surface_queue_error_e
667 tbm_surface_queue_remove_dequeue_cb(
668         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
669         void *data)
670 {
671         _tbm_surf_queue_mutex_lock();
672
673         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
674                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
675
676         pthread_mutex_lock(&surface_queue->lock);
677
678         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
679
680         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
681
682         pthread_mutex_unlock(&surface_queue->lock);
683
684         _tbm_surf_queue_mutex_unlock();
685
686         return TBM_SURFACE_QUEUE_ERROR_NONE;
687 }
688
689 tbm_surface_queue_error_e
690 tbm_surface_queue_add_acquirable_cb(
691         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
692         void *data)
693 {
694         _tbm_surf_queue_mutex_lock();
695
696         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
697                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
698
699         pthread_mutex_lock(&surface_queue->lock);
700
701         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
702
703         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
704
705         pthread_mutex_unlock(&surface_queue->lock);
706
707         _tbm_surf_queue_mutex_unlock();
708
709         return TBM_SURFACE_QUEUE_ERROR_NONE;
710 }
711
712 tbm_surface_queue_error_e
713 tbm_surface_queue_remove_acquirable_cb(
714         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
715         void *data)
716 {
717         _tbm_surf_queue_mutex_lock();
718
719         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
720                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
721
722         pthread_mutex_lock(&surface_queue->lock);
723
724         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
725
726         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
727
728         pthread_mutex_unlock(&surface_queue->lock);
729
730         _tbm_surf_queue_mutex_unlock();
731
732         return TBM_SURFACE_QUEUE_ERROR_NONE;
733 }
734
735 tbm_surface_queue_error_e
736 tbm_surface_queue_set_alloc_cb(
737         tbm_surface_queue_h surface_queue,
738         tbm_surface_alloc_cb alloc_cb,
739         tbm_surface_free_cb free_cb,
740         void *data)
741 {
742         _tbm_surf_queue_mutex_lock();
743
744         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
745                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
746
747         pthread_mutex_lock(&surface_queue->lock);
748
749         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
750
751         surface_queue->alloc_cb = alloc_cb;
752         surface_queue->free_cb = free_cb;
753         surface_queue->alloc_cb_data = data;
754
755         pthread_mutex_unlock(&surface_queue->lock);
756
757         _tbm_surf_queue_mutex_unlock();
758
759         return TBM_SURFACE_QUEUE_ERROR_NONE;
760 }
761
762 int
763 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
764 {
765         int width;
766
767         _tbm_surf_queue_mutex_lock();
768
769         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
770
771         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
772
773         width = surface_queue->width;
774
775         _tbm_surf_queue_mutex_unlock();
776
777         return width;
778 }
779
780 int
781 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
782 {
783         int height;
784
785         _tbm_surf_queue_mutex_lock();
786
787         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
788
789         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
790
791         height = surface_queue->height;
792
793         _tbm_surf_queue_mutex_unlock();
794
795         return height;
796 }
797
798 int
799 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
800 {
801         int format;
802
803         _tbm_surf_queue_mutex_lock();
804
805         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
806
807         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
808
809         format = surface_queue->format;
810
811         _tbm_surf_queue_mutex_unlock();
812
813         return format;
814 }
815
816 int
817 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
818 {
819         int queue_size;
820
821         _tbm_surf_queue_mutex_lock();
822
823         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
824
825         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
826
827         queue_size = surface_queue->queue_size;
828
829         _tbm_surf_queue_mutex_unlock();
830
831         return queue_size;
832 }
833
834 tbm_surface_queue_error_e
835 tbm_surface_queue_add_reset_cb(
836         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
837         void *data)
838 {
839         _tbm_surf_queue_mutex_lock();
840
841         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
842                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
843
844         pthread_mutex_lock(&surface_queue->lock);
845
846         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
847
848         _notify_add(&surface_queue->reset_noti, reset_cb, data);
849
850         pthread_mutex_unlock(&surface_queue->lock);
851
852         _tbm_surf_queue_mutex_unlock();
853
854         return TBM_SURFACE_QUEUE_ERROR_NONE;
855 }
856
857 tbm_surface_queue_error_e
858 tbm_surface_queue_remove_reset_cb(
859         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
860         void *data)
861 {
862         _tbm_surf_queue_mutex_lock();
863
864         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
865                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
866
867         pthread_mutex_lock(&surface_queue->lock);
868
869         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
870
871         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
872
873         pthread_mutex_unlock(&surface_queue->lock);
874
875         _tbm_surf_queue_mutex_unlock();
876
877         return TBM_SURFACE_QUEUE_ERROR_NONE;
878 }
879
880 tbm_surface_queue_error_e
881 tbm_surface_queue_enqueue(tbm_surface_queue_h
882                           surface_queue, tbm_surface_h surface)
883 {
884         queue_node *node = NULL;
885         int queue_type;
886
887         _tbm_surf_queue_mutex_lock();
888
889         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
890                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
891         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
892                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
893
894         if (b_dump_queue)
895                 tbm_surface_internal_dump_buffer(surface, "enqueue");
896
897         pthread_mutex_lock(&surface_queue->lock);
898
899         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
900
901         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
902         if (node == NULL || queue_type != NODE_LIST) {
903                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface exist in free_queue or dirty_queue node:%p, queue:%d\n",
904                         node, queue_type);
905                 pthread_mutex_unlock(&surface_queue->lock);
906
907                 _tbm_surf_queue_mutex_unlock();
908                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
909         }
910
911         if (surface_queue->impl && surface_queue->impl->enqueue)
912                 surface_queue->impl->enqueue(surface_queue, node);
913         else
914                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
915
916         if (_queue_is_empty(&surface_queue->dirty_queue)) {
917                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
918                 pthread_mutex_unlock(&surface_queue->lock);
919
920                 _tbm_surf_queue_mutex_unlock();
921                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
922         }
923
924         node->type = QUEUE_NODE_TYPE_ENQUEUE;
925
926         pthread_mutex_unlock(&surface_queue->lock);
927         pthread_cond_signal(&surface_queue->dirty_cond);
928
929         _tbm_surf_queue_mutex_unlock();
930
931         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
932
933         return TBM_SURFACE_QUEUE_ERROR_NONE;
934 }
935
936 tbm_surface_queue_error_e
937 tbm_surface_queue_dequeue(tbm_surface_queue_h
938                           surface_queue, tbm_surface_h *surface)
939 {
940         queue_node *node = NULL;
941
942         _tbm_surf_queue_mutex_lock();
943
944         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
945                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
946         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
947                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
948
949         pthread_mutex_lock(&surface_queue->lock);
950
951         if (surface_queue->impl && surface_queue->impl->dequeue)
952                 node = surface_queue->impl->dequeue(surface_queue);
953         else
954                 node = _tbm_surface_queue_dequeue(surface_queue);
955
956         if (node == NULL) {
957                 *surface = NULL;
958                 pthread_mutex_unlock(&surface_queue->lock);
959
960                 _tbm_surf_queue_mutex_unlock();
961                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
962         }
963
964         if (node->surface == NULL) {
965                 *surface = NULL;
966                 TBM_LOG_E("_queue_node_pop_front  failed\n");
967                 pthread_mutex_unlock(&surface_queue->lock);
968
969                 _tbm_surf_queue_mutex_unlock();
970                 return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
971         }
972
973         node->type = QUEUE_NODE_TYPE_DEQUEUE;
974         *surface = node->surface;
975
976         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
977
978         pthread_mutex_unlock(&surface_queue->lock);
979
980         _tbm_surf_queue_mutex_unlock();
981
982         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
983
984         return TBM_SURFACE_QUEUE_ERROR_NONE;
985 }
986
987 int
988 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
989 {
990         _tbm_surf_queue_mutex_lock();
991
992         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
993
994         pthread_mutex_lock(&surface_queue->lock);
995
996         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
997
998         if (_queue_is_empty(&surface_queue->free_queue)) {
999                 if (surface_queue->impl && surface_queue->impl->need_attach)
1000                         surface_queue->impl->need_attach(surface_queue);
1001
1002                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1003                                 TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1004                                 _tbm_surf_queue_mutex_unlock();
1005                                 return 0;
1006                 }
1007         }
1008
1009         if (_queue_is_empty(&surface_queue->free_queue)) {
1010                 if (wait &&
1011                         _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE)) {
1012
1013                         _tbm_surf_queue_mutex_unlock();
1014
1015                         pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1016
1017                         _tbm_surf_queue_mutex_lock();
1018
1019                         if (!_tbm_surface_queue_is_valid(surface_queue)) {
1020                                   TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1021                                   _tbm_surf_queue_mutex_unlock();
1022                                   return 0;
1023                         }
1024
1025                         pthread_mutex_unlock(&surface_queue->lock);
1026
1027                         _tbm_surf_queue_mutex_unlock();
1028                         return 1;
1029                 }
1030
1031                 pthread_mutex_unlock(&surface_queue->lock);
1032
1033                 _tbm_surf_queue_mutex_unlock();
1034                 return 0;
1035         }
1036
1037         pthread_mutex_unlock(&surface_queue->lock);
1038
1039         _tbm_surf_queue_mutex_unlock();
1040
1041         return 1;
1042 }
1043
1044 tbm_surface_queue_error_e
1045 tbm_surface_queue_release(tbm_surface_queue_h
1046                           surface_queue, tbm_surface_h surface)
1047 {
1048         queue_node *node = NULL;
1049         int queue_type;
1050
1051         _tbm_surf_queue_mutex_lock();
1052
1053         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1054                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1055         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1056                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1057
1058         pthread_mutex_lock(&surface_queue->lock);
1059
1060         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1061
1062         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1063         if (node == NULL || queue_type != NODE_LIST) {
1064                 TBM_LOG_E("tbm_surface_queue_release::Surface exist in free_queue or dirty_queue node:%p, queue:%d\n",
1065                         node, queue_type);
1066                 pthread_mutex_unlock(&surface_queue->lock);
1067
1068                 _tbm_surf_queue_mutex_unlock();
1069                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1070         }
1071
1072         if (surface_queue->impl && surface_queue->impl->release)
1073                 surface_queue->impl->release(surface_queue, node);
1074         else
1075                 _tbm_surface_queue_release(surface_queue, node, 1);
1076
1077         if (_queue_is_empty(&surface_queue->free_queue)) {
1078                 pthread_mutex_unlock(&surface_queue->lock);
1079
1080                 _tbm_surf_queue_mutex_unlock();
1081                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1082         }
1083
1084         node->type = QUEUE_NODE_TYPE_RELEASE;
1085
1086         pthread_mutex_unlock(&surface_queue->lock);
1087         pthread_cond_signal(&surface_queue->free_cond);
1088
1089         _tbm_surf_queue_mutex_unlock();
1090
1091         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1092
1093         return TBM_SURFACE_QUEUE_ERROR_NONE;
1094 }
1095
1096 tbm_surface_queue_error_e
1097 tbm_surface_queue_acquire(tbm_surface_queue_h
1098                           surface_queue, tbm_surface_h *surface)
1099 {
1100         queue_node *node = NULL;
1101
1102         _tbm_surf_queue_mutex_lock();
1103
1104         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1105                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1106         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1107                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1108
1109         pthread_mutex_lock(&surface_queue->lock);
1110
1111         if (surface_queue->impl && surface_queue->impl->acquire)
1112                 node = surface_queue->impl->acquire(surface_queue);
1113         else
1114                 node = _tbm_surface_queue_acquire(surface_queue);
1115
1116         if (node == NULL) {
1117                 *surface = NULL;
1118                 pthread_mutex_unlock(&surface_queue->lock);
1119
1120                 _tbm_surf_queue_mutex_unlock();
1121                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1122         }
1123
1124         if (node->surface == NULL) {
1125                 *surface = NULL;
1126                 TBM_LOG_E("_queue_node_pop_front  failed\n");
1127                 pthread_mutex_unlock(&surface_queue->lock);
1128
1129                 _tbm_surf_queue_mutex_unlock();
1130                 return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1131         }
1132
1133         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1134
1135         *surface = node->surface;
1136
1137         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1138
1139         pthread_mutex_unlock(&surface_queue->lock);
1140
1141         _tbm_surf_queue_mutex_unlock();
1142
1143         if (b_dump_queue)
1144                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1145
1146         return TBM_SURFACE_QUEUE_ERROR_NONE;
1147 }
1148
1149 int
1150 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1151 {
1152         _tbm_surf_queue_mutex_lock();
1153
1154         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1155
1156         pthread_mutex_lock(&surface_queue->lock);
1157
1158         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1159
1160         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1161                 if (wait &&
1162                         _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE)) {
1163
1164                         _tbm_surf_queue_mutex_unlock();
1165
1166                         pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1167
1168                         _tbm_surf_queue_mutex_lock();
1169
1170                         if (!_tbm_surface_queue_is_valid(surface_queue)) {
1171                                   TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1172                                   _tbm_surf_queue_mutex_unlock();
1173                                   return 0;
1174                         }
1175
1176                         pthread_mutex_unlock(&surface_queue->lock);
1177
1178                         _tbm_surf_queue_mutex_unlock();
1179                         return 1;
1180                 }
1181
1182                 pthread_mutex_unlock(&surface_queue->lock);
1183
1184                 _tbm_surf_queue_mutex_unlock();
1185                 return 0;
1186         }
1187
1188         pthread_mutex_unlock(&surface_queue->lock);
1189
1190         _tbm_surf_queue_mutex_unlock();
1191
1192         return 1;
1193 }
1194
1195 void
1196 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1197 {
1198         queue_node *node = NULL, *tmp = NULL;
1199
1200         _tbm_surf_queue_mutex_lock();
1201
1202         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1203
1204         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1205
1206         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) {
1207                 _queue_delete_node(surface_queue, node);
1208         }
1209
1210         if (surface_queue->impl && surface_queue->impl->destroy)
1211                 surface_queue->impl->destroy(surface_queue);
1212
1213         _notify_emit(surface_queue, &surface_queue->destory_noti);
1214
1215         _notify_remove_all(&surface_queue->destory_noti);
1216         _notify_remove_all(&surface_queue->acquirable_noti);
1217         _notify_remove_all(&surface_queue->dequeuable_noti);
1218         _notify_remove_all(&surface_queue->reset_noti);
1219
1220         pthread_mutex_destroy(&surface_queue->lock);
1221
1222         LIST_DEL(&surface_queue->item_link);
1223
1224         free(surface_queue);
1225         surface_queue = NULL;
1226
1227         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1228                 _deinit_tbm_surf_queue_bufmgr();
1229
1230         _tbm_surf_queue_mutex_unlock();
1231 }
1232
1233 tbm_surface_queue_error_e
1234 tbm_surface_queue_reset(tbm_surface_queue_h
1235                         surface_queue, int width, int height, int format)
1236 {
1237         queue_node *node = NULL, *tmp = NULL;
1238
1239         _tbm_surf_queue_mutex_lock();
1240
1241         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1242                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1243
1244         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1245
1246         if (width == surface_queue->width && height == surface_queue->height &&
1247             format == surface_queue->format) {
1248                 _tbm_surf_queue_mutex_unlock();
1249                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1250         }
1251
1252         pthread_mutex_lock(&surface_queue->lock);
1253
1254         surface_queue->width = width;
1255         surface_queue->height = height;
1256         surface_queue->format = format;
1257
1258         /* Destory surface and Push to free_queue */
1259         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) {
1260                 _queue_delete_node(surface_queue, node);
1261         }
1262
1263         /* Reset queue */
1264         _queue_init(&surface_queue->free_queue);
1265         _queue_init(&surface_queue->dirty_queue);
1266         LIST_INITHEAD(&surface_queue->list);
1267
1268         if (surface_queue->impl && surface_queue->impl->reset)
1269                 surface_queue->impl->reset(surface_queue);
1270
1271         pthread_mutex_unlock(&surface_queue->lock);
1272         pthread_cond_signal(&surface_queue->free_cond);
1273
1274         _tbm_surf_queue_mutex_unlock();
1275
1276         _notify_emit(surface_queue, &surface_queue->reset_noti);
1277
1278         return TBM_SURFACE_QUEUE_ERROR_NONE;
1279 }
1280
1281 tbm_surface_queue_error_e
1282 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1283 {
1284         queue_node *node = NULL, *tmp = NULL;
1285
1286         _tbm_surf_queue_mutex_lock();
1287
1288         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1289                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1290
1291         pthread_mutex_lock(&surface_queue->lock);
1292
1293         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1294
1295         /* Destory surface and Push to free_queue */
1296         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) {
1297                 _queue_delete_node(surface_queue, node);
1298         }
1299
1300         /* Reset queue */
1301         _queue_init(&surface_queue->free_queue);
1302         _queue_init(&surface_queue->dirty_queue);
1303         LIST_INITHEAD(&surface_queue->list);
1304
1305         if (surface_queue->impl && surface_queue->impl->reset)
1306                 surface_queue->impl->reset(surface_queue);
1307
1308         pthread_mutex_unlock(&surface_queue->lock);
1309         pthread_cond_signal(&surface_queue->free_cond);
1310
1311         _tbm_surf_queue_mutex_unlock();
1312
1313         _notify_emit(surface_queue, &surface_queue->reset_noti);
1314
1315         return TBM_SURFACE_QUEUE_ERROR_NONE;
1316 }
1317
1318 tbm_surface_queue_error_e
1319 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1320                         tbm_surface_h *surfaces, int *num)
1321 {
1322         queue_node *node = NULL;
1323         queue_node *tmp = NULL;
1324
1325         _tbm_surf_queue_mutex_lock();
1326
1327         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1328                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1329         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1330                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1331
1332         pthread_mutex_lock(&surface_queue->lock);
1333
1334         *num = 0;
1335         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link) {
1336                 if (surfaces)
1337                         surfaces[*num] = node->surface;
1338
1339                 *num = *num + 1;
1340         }
1341
1342         pthread_mutex_unlock(&surface_queue->lock);
1343
1344         _tbm_surf_queue_mutex_unlock();
1345
1346         return TBM_SURFACE_QUEUE_ERROR_NONE;
1347 }
1348
1349 typedef struct {
1350         int queue_size;
1351         int num_attached;
1352         int flags;
1353 } tbm_queue_default;
1354
1355 static void
1356 __tbm_queue_default_init(tbm_surface_queue_h surface_queue)
1357 {
1358         tbm_queue_default *data = surface_queue->impl_data;
1359
1360         data->num_attached = 0;
1361 }
1362
1363 static void
1364 __tbm_queue_default_reset(tbm_surface_queue_h surface_queue)
1365 {
1366         tbm_queue_default *data = surface_queue->impl_data;
1367
1368         data->num_attached = 0;
1369 }
1370
1371 static void
1372 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1373 {
1374         free(surface_queue->impl_data);
1375 }
1376
1377 static void
1378 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1379 {
1380         tbm_queue_default *data = surface_queue->impl_data;
1381         tbm_surface_h surface;
1382
1383         if (data->queue_size == data->num_attached)
1384                 return;
1385
1386         if (surface_queue->alloc_cb) {
1387                 _tbm_surf_queue_mutex_unlock();
1388                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1389                 _tbm_surf_queue_mutex_lock();
1390
1391                 if (!surface)
1392                         return;
1393
1394                 tbm_surface_internal_ref(surface);
1395         } else {
1396                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1397                                 surface_queue->height,
1398                                 surface_queue->format,
1399                                 data->flags);
1400                 TBM_RETURN_IF_FAIL(surface != NULL);
1401         }
1402
1403         _tbm_surface_queue_attach(surface_queue, surface);
1404         tbm_surface_internal_unref(surface);
1405         data->num_attached++;
1406 }
1407
1408 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1409         __tbm_queue_default_init,
1410         __tbm_queue_default_reset,
1411         __tbm_queue_default_destroy,
1412         __tbm_queue_default_need_attach,
1413         NULL,                           /*__tbm_queue_default_enqueue*/
1414         NULL,                           /*__tbm_queue_default_release*/
1415         NULL,                           /*__tbm_queue_default_dequeue*/
1416         NULL,                           /*__tbm_queue_default_acquire*/
1417 };
1418
1419 tbm_surface_queue_h
1420 tbm_surface_queue_create(int queue_size, int width,
1421                          int height, int format, int flags)
1422 {
1423         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1424         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1425         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1426         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1427
1428         _tbm_surf_queue_mutex_lock();
1429
1430         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1431                                             sizeof(struct _tbm_surface_queue));
1432         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1433
1434         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1435
1436         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1437                                   sizeof(tbm_queue_default));
1438         if (data == NULL) {
1439                 free(surface_queue);
1440                 _tbm_surf_queue_mutex_unlock();
1441                 return NULL;
1442         }
1443
1444         data->queue_size = queue_size;
1445         data->flags = flags;
1446         _tbm_surface_queue_init(surface_queue,
1447                                 data->queue_size,
1448                                 width, height, format,
1449                                 &tbm_queue_default_impl, data);
1450
1451         _tbm_surf_queue_mutex_unlock();
1452
1453         return surface_queue;
1454 }
1455
1456 typedef struct {
1457         int queue_size;
1458         int num_attached;
1459         int flags;
1460         queue dequeue_list;
1461 } tbm_queue_sequence;
1462
1463 static void
1464 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
1465 {
1466         tbm_queue_sequence *data = surface_queue->impl_data;
1467
1468         data->num_attached = 0;
1469         _queue_init(&data->dequeue_list);
1470 }
1471
1472 static void
1473 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
1474 {
1475         tbm_queue_sequence *data = surface_queue->impl_data;
1476
1477         data->num_attached = 0;
1478         _queue_init(&data->dequeue_list);
1479 }
1480
1481 static void
1482 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
1483 {
1484         free(surface_queue->impl_data);
1485 }
1486
1487 static void
1488 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
1489 {
1490         tbm_queue_sequence *data = surface_queue->impl_data;
1491         tbm_surface_h surface;
1492
1493         if (data->queue_size == data->num_attached)
1494                 return;
1495
1496         if (surface_queue->alloc_cb) {
1497                 _tbm_surf_queue_mutex_unlock();
1498                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1499                 _tbm_surf_queue_mutex_lock();
1500
1501                 if (!surface)
1502                         return;
1503
1504                 tbm_surface_internal_ref(surface);
1505         } else {
1506                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1507                                 surface_queue->height,
1508                                 surface_queue->format,
1509                                 data->flags);
1510                 TBM_RETURN_IF_FAIL(surface != NULL);
1511         }
1512
1513         _tbm_surface_queue_attach(surface_queue, surface);
1514         tbm_surface_internal_unref(surface);
1515         data->num_attached++;
1516 }
1517
1518 static void
1519 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
1520                              queue_node *node)
1521 {
1522         tbm_queue_sequence *data = surface_queue->impl_data;
1523         queue_node *next = NULL;
1524         queue_node *tmp = NULL;
1525
1526         node->priv_flags = 0;
1527
1528         LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) {
1529                 if (next->priv_flags)
1530                         break;
1531                 _queue_node_pop(&data->dequeue_list, next);
1532                 _tbm_surface_queue_enqueue(surface_queue, next, 1);
1533         }
1534 }
1535
1536 static queue_node *
1537 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
1538                              surface_queue)
1539 {
1540         tbm_queue_sequence *data = surface_queue->impl_data;
1541         queue_node *node = NULL;
1542
1543         node = _tbm_surface_queue_dequeue(surface_queue);
1544         if (node) {
1545                 _queue_node_push_back(&data->dequeue_list, node);
1546                 node->priv_flags = 1;
1547         }
1548
1549         return node;
1550 }
1551
1552 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
1553         __tbm_queue_sequence_init,
1554         __tbm_queue_sequence_reset,
1555         __tbm_queue_sequence_destroy,
1556         __tbm_queue_sequence_need_attach,
1557         __tbm_queue_sequence_enqueue,
1558         NULL,                                   /*__tbm_queue_sequence_release*/
1559         __tbm_queue_sequence_dequeue,
1560         NULL,                                   /*__tbm_queue_sequence_acquire*/
1561 };
1562
1563 tbm_surface_queue_h
1564 tbm_surface_queue_sequence_create(int queue_size, int width,
1565                                   int height, int format, int flags)
1566 {
1567         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1568         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1569         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1570         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1571
1572         _tbm_surf_queue_mutex_lock();
1573
1574         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1575                                             sizeof(struct _tbm_surface_queue));
1576         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1577
1578         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1579
1580         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
1581                                    sizeof(tbm_queue_sequence));
1582         if (data == NULL) {
1583                 free(surface_queue);
1584                 return NULL;
1585         }
1586
1587         data->queue_size = queue_size;
1588         data->flags = flags;
1589         _tbm_surface_queue_init(surface_queue,
1590                                 data->queue_size,
1591                                 width, height, format,
1592                                 &tbm_queue_sequence_impl, data);
1593
1594         _tbm_surf_queue_mutex_unlock();
1595
1596         return surface_queue;
1597 }
1598 /* LCOV_EXCL_STOP */