tbm_surface_queue: clean up tbm_surface_queue_can_dequeue / acquire
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36
37 #define FREE_QUEUE      1
38 #define DIRTY_QUEUE     2
39 #define NODE_LIST       4
40
41 #define TBM_QUEUE_DEBUG 0
42
43 #ifdef TRACE
44 #define TBM_QUEUE_TRACE(fmt, ...)  { if (bTrace&0x1) fprintf(stderr, "[TBM:TRACE(%d)(%s:%d)] " fmt, getpid(), __func__, __LINE__, ##__VA_ARGS__); }
45 #else
46 #define TBM_QUEUE_TRACE(fmt, ...)
47 #endif /* TRACE */
48
49 #if TBM_QUEUE_DEBUG
50 #define TBM_LOCK() TBM_LOG_D("[LOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
51 #define TBM_UNLOCK() TBM_LOG_D("[UNLOCK] %s:%d surface:%p\n", __func__, __LINE__, surface_queue)
52 #else
53 #define TBM_LOCK()
54 #define TBM_UNLOCK()
55 #endif
56
57 static tbm_bufmgr g_surf_queue_bufmgr;
58 static pthread_mutex_t tbm_surf_queue_lock;
59 void _tbm_surface_queue_mutex_unlock(void);
60
61 /* check condition */
62 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
63         if (!(cond)) {\
64                 TBM_LOG_E("'%s' failed.\n", #cond);\
65                 _tbm_surf_queue_mutex_unlock();\
66                 return;\
67         } \
68 }
69
70 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
71         if (!(cond)) {\
72                 TBM_LOG_E("'%s' failed.\n", #cond);\
73                 _tbm_surf_queue_mutex_unlock();\
74                 return val;\
75         } \
76 }
77
78 typedef enum _queue_node_type {
79         QUEUE_NODE_TYPE_NONE,
80         QUEUE_NODE_TYPE_DEQUEUE,
81         QUEUE_NODE_TYPE_ENQUEUE,
82         QUEUE_NODE_TYPE_ACQUIRE,
83         QUEUE_NODE_TYPE_RELEASE
84 } Queue_Node_Type;
85
86 typedef struct {
87         struct list_head head;
88         int count;
89 } queue;
90
91 typedef struct {
92         tbm_surface_h surface;
93
94         struct list_head item_link;
95         struct list_head link;
96
97         Queue_Node_Type type;
98
99         unsigned int priv_flags;        /*for each queue*/
100 } queue_node;
101
102 typedef struct {
103         struct list_head link;
104
105         tbm_surface_queue_notify_cb cb;
106         void *data;
107 } queue_notify;
108
109 typedef struct _tbm_surface_queue_interface {
110         void (*init)(tbm_surface_queue_h queue);
111         void (*reset)(tbm_surface_queue_h queue);
112         void (*destroy)(tbm_surface_queue_h queue);
113         void (*need_attach)(tbm_surface_queue_h queue);
114
115         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
116         void (*release)(tbm_surface_queue_h queue, queue_node *node);
117         queue_node *(*dequeue)(tbm_surface_queue_h queue);
118         queue_node *(*acquire)(tbm_surface_queue_h queue);
119         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
120 } tbm_surface_queue_interface;
121
122 struct _tbm_surface_queue {
123         int width;
124         int height;
125         int format;
126         int queue_size;
127         int num_attached;
128
129         queue free_queue;
130         queue dirty_queue;
131         struct list_head list;
132
133         struct list_head destory_noti;
134         struct list_head dequeuable_noti;
135         struct list_head dequeue_noti;
136         struct list_head acquirable_noti;
137         struct list_head reset_noti;
138
139         pthread_mutex_t lock;
140         pthread_cond_t free_cond;
141         pthread_cond_t dirty_cond;
142
143         const tbm_surface_queue_interface *impl;
144         void *impl_data;
145
146         //For external buffer allocation
147         tbm_surface_alloc_cb alloc_cb;
148         tbm_surface_free_cb free_cb;
149         void *alloc_cb_data;
150
151         struct list_head item_link; /* link of surface queue */
152 };
153
154 /* LCOV_EXCL_START */
155
156 static bool
157 _tbm_surf_queue_mutex_init(void)
158 {
159         static bool tbm_surf_queue_mutex_init = false;
160
161         if (tbm_surf_queue_mutex_init)
162                 return true;
163
164         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
165                 TBM_LOG_E("fail: tbm_surf_queue mutex init\n");
166                 return false;
167         }
168
169         tbm_surf_queue_mutex_init = true;
170
171         return true;
172 }
173
174 static void
175 _tbm_surf_queue_mutex_lock(void)
176 {
177         if (!_tbm_surf_queue_mutex_init())
178                 return;
179
180         pthread_mutex_lock(&tbm_surf_queue_lock);
181 }
182
183 static void
184 _tbm_surf_queue_mutex_unlock(void)
185 {
186         pthread_mutex_unlock(&tbm_surf_queue_lock);
187 }
188
189 static void
190 _init_tbm_surf_queue_bufmgr(void)
191 {
192         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
193 }
194
195 static void
196 _deinit_tbm_surf_queue_bufmgr(void)
197 {
198         if (!g_surf_queue_bufmgr)
199                 return;
200
201         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
202         g_surf_queue_bufmgr = NULL;
203 }
204
205 static int
206 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
207 {
208         tbm_surface_queue_h old_data;
209
210         if (surface_queue == NULL || g_surf_queue_bufmgr == NULL) {
211                 TBM_TRACE("error: surface_queue is NULL or not initialized\n");
212                 return 0;
213         }
214
215         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
216                 TBM_TRACE("error: surf_queue_list is empty\n");
217                 return 0;
218         }
219
220         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
221                                 item_link) {
222                 if (old_data == surface_queue) {
223                         TBM_TRACE("tbm_surface_queue(%p)\n", surface_queue);
224                         return 1;
225                 }
226         }
227
228         TBM_TRACE("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
229         return 0;
230 }
231
232 static queue_node *
233 _queue_node_create(void)
234 {
235         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
236
237         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
238
239         return node;
240 }
241
242 static void
243 _queue_node_delete(queue_node *node)
244 {
245         LIST_DEL(&node->item_link);
246         LIST_DEL(&node->link);
247         free(node);
248 }
249
250 static int
251 _queue_is_empty(queue *queue)
252 {
253         if (LIST_IS_EMPTY(&queue->head))
254                 return 1;
255
256         return 0;
257 }
258
259 static void
260 _queue_node_push_back(queue *queue, queue_node *node)
261 {
262         LIST_ADDTAIL(&node->item_link, &queue->head);
263         queue->count++;
264 }
265
266 static void
267 _queue_node_push_front(queue *queue, queue_node *node)
268 {
269         LIST_ADD(&node->item_link, &queue->head);
270         queue->count++;
271 }
272
273 static queue_node *
274 _queue_node_pop_front(queue *queue)
275 {
276         queue_node *node;
277
278         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
279
280         LIST_DEL(&node->item_link);
281         queue->count--;
282
283         return node;
284 }
285
286 static queue_node *
287 _queue_node_pop(queue *queue, queue_node *node)
288 {
289         LIST_DEL(&node->item_link);
290         queue->count--;
291
292         return node;
293 }
294
295 static queue_node *
296 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
297                 tbm_surface_h surface, int *out_type)
298 {
299         queue_node *node;
300
301         if (type == 0)
302                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
303         if (out_type)
304                 *out_type = 0;
305
306         if (type & FREE_QUEUE) {
307                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
308                                          item_link) {
309                         if (node->surface == surface) {
310                                 if (out_type)
311                                         *out_type = FREE_QUEUE;
312
313                                 return node;
314                         }
315                 }
316         }
317
318         if (type & DIRTY_QUEUE) {
319                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
320                                          item_link) {
321                         if (node->surface == surface) {
322                                 if (out_type)
323                                         *out_type = DIRTY_QUEUE;
324
325                                 return node;
326                         }
327                 }
328         }
329
330         if (type & NODE_LIST) {
331                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
332                         if (node->surface == surface) {
333                                 if (out_type)
334                                         *out_type = NODE_LIST;
335
336                                 return node;
337                         }
338                 }
339         }
340
341         return NULL;
342 }
343
344 static void
345 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
346 {
347         if (node->surface) {
348                 if (surface_queue->free_cb) {
349                         surface_queue->free_cb(surface_queue,
350                                         surface_queue->alloc_cb_data,
351                                         node->surface);
352                 }
353
354                 tbm_surface_destroy(node->surface);
355         }
356
357         _queue_node_delete(node);
358 }
359
360 static void
361 _queue_init(queue *queue)
362 {
363         LIST_INITHEAD(&queue->head);
364
365         queue->count = 0;
366 }
367
368 static void
369 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
370             void *data)
371 {
372         TBM_RETURN_IF_FAIL(cb != NULL);
373
374         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
375
376         TBM_RETURN_IF_FAIL(item != NULL);
377
378         LIST_INITHEAD(&item->link);
379         item->cb = cb;
380         item->data = data;
381
382         LIST_ADDTAIL(&item->link, list);
383 }
384
385 static void
386 _notify_remove(struct list_head *list,
387                tbm_surface_queue_notify_cb cb, void *data)
388 {
389         queue_notify *item, *tmp;
390
391         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
392                 if (item->cb == cb && item->data == data) {
393                         LIST_DEL(&item->link);
394                         free(item);
395                         return;
396                 }
397         }
398
399         TBM_LOG_E("Cannot find notifiy\n");
400 }
401
402 static void
403 _notify_remove_all(struct list_head *list)
404 {
405         queue_notify *item, *tmp;
406
407         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
408                 LIST_DEL(&item->link);
409                 free(item);
410         }
411 }
412
413 static void
414 _notify_emit(tbm_surface_queue_h surface_queue,
415              struct list_head *list)
416 {
417         queue_notify *item;
418
419         LIST_FOR_EACH_ENTRY(item, list, link)
420                 item->cb(surface_queue, item->data);
421 }
422
423 static int
424 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
425 {
426         queue_node *node;
427         int count = 0;
428
429         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
430                 if (node->type == type)
431                         count++;
432         }
433
434         return count;
435 }
436
437 static void
438 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
439                           tbm_surface_h surface)
440 {
441         queue_node *node;
442
443         node = _queue_node_create();
444         TBM_RETURN_IF_FAIL(node != NULL);
445
446         tbm_surface_internal_ref(surface);
447         node->surface = surface;
448
449         LIST_ADDTAIL(&node->link, &surface_queue->list);
450         surface_queue->num_attached++;
451         _queue_node_push_back(&surface_queue->free_queue, node);
452 }
453
454 static void
455 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
456                           tbm_surface_h surface)
457 {
458         queue_node *node;
459         int queue_type;
460
461         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
462         if (node) {
463                 _queue_delete_node(surface_queue, node);
464                 surface_queue->num_attached--;
465         }
466 }
467
468 static void
469 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
470                            queue_node *node, int push_back)
471 {
472         if (push_back)
473                 _queue_node_push_back(&surface_queue->dirty_queue, node);
474         else
475                 _queue_node_push_front(&surface_queue->dirty_queue, node);
476 }
477
478 static queue_node *
479 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
480 {
481         queue_node *node;
482
483         if (_queue_is_empty(&surface_queue->free_queue)) {
484                 if (surface_queue->impl && surface_queue->impl->need_attach)
485                         surface_queue->impl->need_attach(surface_queue);
486
487                 if (_queue_is_empty(&surface_queue->free_queue))
488                         return NULL;
489         }
490
491         node = _queue_node_pop_front(&surface_queue->free_queue);
492
493         return node;
494 }
495
496 static queue_node *
497 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
498 {
499         queue_node *node;
500
501         if (_queue_is_empty(&surface_queue->dirty_queue))
502                 return NULL;
503
504         node = _queue_node_pop_front(&surface_queue->dirty_queue);
505
506         return node;
507 }
508
509 static void
510 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
511                            queue_node *node, int push_back)
512 {
513         if (push_back)
514                 _queue_node_push_back(&surface_queue->free_queue, node);
515         else
516                 _queue_node_push_front(&surface_queue->free_queue, node);
517 }
518
519 static void
520 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
521                         int queue_size,
522                         int width, int height, int format,
523                         const tbm_surface_queue_interface *impl, void *data)
524 {
525         TBM_RETURN_IF_FAIL(surface_queue != NULL);
526         TBM_RETURN_IF_FAIL(impl != NULL);
527
528         if (!g_surf_queue_bufmgr)
529                 _init_tbm_surf_queue_bufmgr();
530
531         pthread_mutex_init(&surface_queue->lock, NULL);
532         pthread_cond_init(&surface_queue->free_cond, NULL);
533         pthread_cond_init(&surface_queue->dirty_cond, NULL);
534
535         surface_queue->queue_size = queue_size;
536         surface_queue->width = width;
537         surface_queue->height = height;
538         surface_queue->format = format;
539         surface_queue->impl = impl;
540         surface_queue->impl_data = data;
541
542         _queue_init(&surface_queue->free_queue);
543         _queue_init(&surface_queue->dirty_queue);
544         LIST_INITHEAD(&surface_queue->list);
545
546         LIST_INITHEAD(&surface_queue->destory_noti);
547         LIST_INITHEAD(&surface_queue->dequeuable_noti);
548         LIST_INITHEAD(&surface_queue->dequeue_noti);
549         LIST_INITHEAD(&surface_queue->acquirable_noti);
550         LIST_INITHEAD(&surface_queue->reset_noti);
551
552         if (surface_queue->impl && surface_queue->impl->init)
553                 surface_queue->impl->init(surface_queue);
554
555         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
556 }
557
558 tbm_surface_queue_error_e
559 tbm_surface_queue_add_destroy_cb(
560         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
561         void *data)
562 {
563         _tbm_surf_queue_mutex_lock();
564
565         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
566                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
567
568         pthread_mutex_lock(&surface_queue->lock);
569
570         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
571
572         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
573
574         pthread_mutex_unlock(&surface_queue->lock);
575
576         _tbm_surf_queue_mutex_unlock();
577
578         return TBM_SURFACE_QUEUE_ERROR_NONE;
579 }
580
581 tbm_surface_queue_error_e
582 tbm_surface_queue_remove_destroy_cb(
583         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
584         void *data)
585 {
586         _tbm_surf_queue_mutex_lock();
587
588         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
589                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
590
591         pthread_mutex_lock(&surface_queue->lock);
592
593         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
594
595         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
596
597         pthread_mutex_unlock(&surface_queue->lock);
598
599         _tbm_surf_queue_mutex_unlock();
600
601         return TBM_SURFACE_QUEUE_ERROR_NONE;
602 }
603
604 tbm_surface_queue_error_e
605 tbm_surface_queue_add_dequeuable_cb(
606         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
607         void *data)
608 {
609         _tbm_surf_queue_mutex_lock();
610
611         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
612                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
613
614         pthread_mutex_lock(&surface_queue->lock);
615
616         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
617
618         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
619
620         pthread_mutex_unlock(&surface_queue->lock);
621
622         _tbm_surf_queue_mutex_unlock();
623
624         return TBM_SURFACE_QUEUE_ERROR_NONE;
625 }
626
627 tbm_surface_queue_error_e
628 tbm_surface_queue_remove_dequeuable_cb(
629         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
630         void *data)
631 {
632         _tbm_surf_queue_mutex_lock();
633
634         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
635                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
636
637         pthread_mutex_lock(&surface_queue->lock);
638
639         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
640
641         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
642
643         pthread_mutex_unlock(&surface_queue->lock);
644
645         _tbm_surf_queue_mutex_unlock();
646
647         return TBM_SURFACE_QUEUE_ERROR_NONE;
648 }
649
650 tbm_surface_queue_error_e
651 tbm_surface_queue_add_dequeue_cb(
652         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
653         void *data)
654 {
655         _tbm_surf_queue_mutex_lock();
656
657         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
658                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
659
660         pthread_mutex_lock(&surface_queue->lock);
661
662         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
663
664         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
665
666         pthread_mutex_unlock(&surface_queue->lock);
667
668         _tbm_surf_queue_mutex_unlock();
669
670         return TBM_SURFACE_QUEUE_ERROR_NONE;
671 }
672
673 tbm_surface_queue_error_e
674 tbm_surface_queue_remove_dequeue_cb(
675         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
676         void *data)
677 {
678         _tbm_surf_queue_mutex_lock();
679
680         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
681                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
682
683         pthread_mutex_lock(&surface_queue->lock);
684
685         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
686
687         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
688
689         pthread_mutex_unlock(&surface_queue->lock);
690
691         _tbm_surf_queue_mutex_unlock();
692
693         return TBM_SURFACE_QUEUE_ERROR_NONE;
694 }
695
696 tbm_surface_queue_error_e
697 tbm_surface_queue_add_acquirable_cb(
698         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
699         void *data)
700 {
701         _tbm_surf_queue_mutex_lock();
702
703         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
704                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
705
706         pthread_mutex_lock(&surface_queue->lock);
707
708         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
709
710         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
711
712         pthread_mutex_unlock(&surface_queue->lock);
713
714         _tbm_surf_queue_mutex_unlock();
715
716         return TBM_SURFACE_QUEUE_ERROR_NONE;
717 }
718
719 tbm_surface_queue_error_e
720 tbm_surface_queue_remove_acquirable_cb(
721         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
722         void *data)
723 {
724         _tbm_surf_queue_mutex_lock();
725
726         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
727                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
728
729         pthread_mutex_lock(&surface_queue->lock);
730
731         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
732
733         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
734
735         pthread_mutex_unlock(&surface_queue->lock);
736
737         _tbm_surf_queue_mutex_unlock();
738
739         return TBM_SURFACE_QUEUE_ERROR_NONE;
740 }
741
742 tbm_surface_queue_error_e
743 tbm_surface_queue_set_alloc_cb(
744         tbm_surface_queue_h surface_queue,
745         tbm_surface_alloc_cb alloc_cb,
746         tbm_surface_free_cb free_cb,
747         void *data)
748 {
749         _tbm_surf_queue_mutex_lock();
750
751         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
752                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
753
754         pthread_mutex_lock(&surface_queue->lock);
755
756         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
757
758         surface_queue->alloc_cb = alloc_cb;
759         surface_queue->free_cb = free_cb;
760         surface_queue->alloc_cb_data = data;
761
762         pthread_mutex_unlock(&surface_queue->lock);
763
764         _tbm_surf_queue_mutex_unlock();
765
766         return TBM_SURFACE_QUEUE_ERROR_NONE;
767 }
768
769 int
770 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
771 {
772         int width;
773
774         _tbm_surf_queue_mutex_lock();
775
776         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
777
778         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
779
780         width = surface_queue->width;
781
782         _tbm_surf_queue_mutex_unlock();
783
784         return width;
785 }
786
787 int
788 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
789 {
790         int height;
791
792         _tbm_surf_queue_mutex_lock();
793
794         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
795
796         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
797
798         height = surface_queue->height;
799
800         _tbm_surf_queue_mutex_unlock();
801
802         return height;
803 }
804
805 int
806 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
807 {
808         int format;
809
810         _tbm_surf_queue_mutex_lock();
811
812         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
813
814         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
815
816         format = surface_queue->format;
817
818         _tbm_surf_queue_mutex_unlock();
819
820         return format;
821 }
822
823 int
824 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
825 {
826         int queue_size;
827
828         _tbm_surf_queue_mutex_lock();
829
830         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
831
832         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
833
834         queue_size = surface_queue->queue_size;
835
836         _tbm_surf_queue_mutex_unlock();
837
838         return queue_size;
839 }
840
841 tbm_surface_queue_error_e
842 tbm_surface_queue_add_reset_cb(
843         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
844         void *data)
845 {
846         _tbm_surf_queue_mutex_lock();
847
848         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
849                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
850
851         pthread_mutex_lock(&surface_queue->lock);
852
853         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
854
855         _notify_add(&surface_queue->reset_noti, reset_cb, data);
856
857         pthread_mutex_unlock(&surface_queue->lock);
858
859         _tbm_surf_queue_mutex_unlock();
860
861         return TBM_SURFACE_QUEUE_ERROR_NONE;
862 }
863
864 tbm_surface_queue_error_e
865 tbm_surface_queue_remove_reset_cb(
866         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
867         void *data)
868 {
869         _tbm_surf_queue_mutex_lock();
870
871         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
872                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
873
874         pthread_mutex_lock(&surface_queue->lock);
875
876         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
877
878         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
879
880         pthread_mutex_unlock(&surface_queue->lock);
881
882         _tbm_surf_queue_mutex_unlock();
883
884         return TBM_SURFACE_QUEUE_ERROR_NONE;
885 }
886
887 tbm_surface_queue_error_e
888 tbm_surface_queue_enqueue(tbm_surface_queue_h
889                           surface_queue, tbm_surface_h surface)
890 {
891         queue_node *node;
892         int queue_type;
893
894         _tbm_surf_queue_mutex_lock();
895
896         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
897                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
898         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
899                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
900
901         if (b_dump_queue)
902                 tbm_surface_internal_dump_buffer(surface, "enqueue");
903
904         pthread_mutex_lock(&surface_queue->lock);
905
906         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
907
908         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
909         if (node == NULL || queue_type != NODE_LIST) {
910                 TBM_LOG_E("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
911                         node, queue_type);
912                 pthread_mutex_unlock(&surface_queue->lock);
913
914                 _tbm_surf_queue_mutex_unlock();
915                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
916         }
917
918         if (surface_queue->impl && surface_queue->impl->enqueue)
919                 surface_queue->impl->enqueue(surface_queue, node);
920         else
921                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
922
923         if (_queue_is_empty(&surface_queue->dirty_queue)) {
924                 TBM_LOG_E("enqueue surface but queue is empty node:%p\n", node);
925                 pthread_mutex_unlock(&surface_queue->lock);
926
927                 _tbm_surf_queue_mutex_unlock();
928                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
929         }
930
931         node->type = QUEUE_NODE_TYPE_ENQUEUE;
932
933         pthread_mutex_unlock(&surface_queue->lock);
934         pthread_cond_signal(&surface_queue->dirty_cond);
935
936         _tbm_surf_queue_mutex_unlock();
937
938         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
939
940         return TBM_SURFACE_QUEUE_ERROR_NONE;
941 }
942
943 tbm_surface_queue_error_e
944 tbm_surface_queue_dequeue(tbm_surface_queue_h
945                           surface_queue, tbm_surface_h *surface)
946 {
947         queue_node *node;
948
949         _tbm_surf_queue_mutex_lock();
950
951         *surface = NULL;
952
953         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
954                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
955         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
956                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
957
958         pthread_mutex_lock(&surface_queue->lock);
959
960         if (surface_queue->impl && surface_queue->impl->dequeue)
961                 node = surface_queue->impl->dequeue(surface_queue);
962         else
963                 node = _tbm_surface_queue_dequeue(surface_queue);
964
965         if (node == NULL || node->surface == NULL) {
966                 TBM_LOG_E("_queue_node_pop_front failed\n");
967                 pthread_mutex_unlock(&surface_queue->lock);
968
969                 _tbm_surf_queue_mutex_unlock();
970                 return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
971         }
972
973         node->type = QUEUE_NODE_TYPE_DEQUEUE;
974         *surface = node->surface;
975
976         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
977
978         pthread_mutex_unlock(&surface_queue->lock);
979
980         _tbm_surf_queue_mutex_unlock();
981
982         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
983
984         return TBM_SURFACE_QUEUE_ERROR_NONE;
985 }
986
987 int
988 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
989 {
990         _tbm_surf_queue_mutex_lock();
991
992         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
993
994         pthread_mutex_lock(&surface_queue->lock);
995
996         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
997
998         if (_queue_is_empty(&surface_queue->free_queue)) {
999                 if (surface_queue->impl && surface_queue->impl->need_attach)
1000                         surface_queue->impl->need_attach(surface_queue);
1001
1002                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1003                         TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1004                         _tbm_surf_queue_mutex_unlock();
1005                         return 0;
1006                 }
1007         }
1008
1009         if (!_queue_is_empty(&surface_queue->free_queue)) {
1010                 pthread_mutex_unlock(&surface_queue->lock);
1011                 _tbm_surf_queue_mutex_unlock();
1012                 return 1;
1013         }
1014
1015         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1016                                                 QUEUE_NODE_TYPE_ACQUIRE)) {
1017                 _tbm_surf_queue_mutex_unlock();
1018                 pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1019                 _tbm_surf_queue_mutex_lock();
1020
1021                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1022                           TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1023                         pthread_mutex_unlock(&surface_queue->lock);
1024                           _tbm_surf_queue_mutex_unlock();
1025                           return 0;
1026                 }
1027
1028                 pthread_mutex_unlock(&surface_queue->lock);
1029                 _tbm_surf_queue_mutex_unlock();
1030                 return 1;
1031         }
1032
1033         pthread_mutex_unlock(&surface_queue->lock);
1034         _tbm_surf_queue_mutex_unlock();
1035         return 0;
1036 }
1037
1038 tbm_surface_queue_error_e
1039 tbm_surface_queue_release(tbm_surface_queue_h
1040                           surface_queue, tbm_surface_h surface)
1041 {
1042         queue_node *node;
1043         int queue_type;
1044
1045         _tbm_surf_queue_mutex_lock();
1046
1047         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1048                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1049         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1050                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1051
1052         pthread_mutex_lock(&surface_queue->lock);
1053
1054         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1055
1056         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1057         if (node == NULL || queue_type != NODE_LIST) {
1058                 TBM_LOG_E("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1059                         node, queue_type);
1060                 pthread_mutex_unlock(&surface_queue->lock);
1061
1062                 _tbm_surf_queue_mutex_unlock();
1063                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1064         }
1065
1066         if (surface_queue->queue_size < surface_queue->num_attached) {
1067                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1068
1069                 if (surface_queue->impl && surface_queue->impl->need_detach)
1070                         surface_queue->impl->need_detach(surface_queue, node);
1071                 else
1072                         _tbm_surface_queue_detach(surface_queue, surface);
1073
1074                 pthread_mutex_unlock(&surface_queue->lock);
1075
1076                 _tbm_surf_queue_mutex_unlock();
1077                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1078         }
1079
1080         if (surface_queue->impl && surface_queue->impl->release)
1081                 surface_queue->impl->release(surface_queue, node);
1082         else
1083                 _tbm_surface_queue_release(surface_queue, node, 1);
1084
1085         if (_queue_is_empty(&surface_queue->free_queue)) {
1086                 pthread_mutex_unlock(&surface_queue->lock);
1087
1088                 _tbm_surf_queue_mutex_unlock();
1089                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1090         }
1091
1092         node->type = QUEUE_NODE_TYPE_RELEASE;
1093
1094         pthread_mutex_unlock(&surface_queue->lock);
1095         pthread_cond_signal(&surface_queue->free_cond);
1096
1097         _tbm_surf_queue_mutex_unlock();
1098
1099         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1100
1101         return TBM_SURFACE_QUEUE_ERROR_NONE;
1102 }
1103
1104 tbm_surface_queue_error_e
1105 tbm_surface_queue_acquire(tbm_surface_queue_h
1106                           surface_queue, tbm_surface_h *surface)
1107 {
1108         queue_node *node;
1109
1110         _tbm_surf_queue_mutex_lock();
1111
1112         *surface = NULL;
1113
1114         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1115                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1116         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1117                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1118
1119         pthread_mutex_lock(&surface_queue->lock);
1120
1121         if (surface_queue->impl && surface_queue->impl->acquire)
1122                 node = surface_queue->impl->acquire(surface_queue);
1123         else
1124                 node = _tbm_surface_queue_acquire(surface_queue);
1125
1126         if (node == NULL || node->surface == NULL) {
1127                 TBM_LOG_E("_queue_node_pop_front failed\n");
1128                 pthread_mutex_unlock(&surface_queue->lock);
1129
1130                 _tbm_surf_queue_mutex_unlock();
1131                 return TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE;
1132         }
1133
1134         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1135
1136         *surface = node->surface;
1137
1138         TBM_QUEUE_TRACE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1139
1140         pthread_mutex_unlock(&surface_queue->lock);
1141
1142         _tbm_surf_queue_mutex_unlock();
1143
1144         if (b_dump_queue)
1145                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1146
1147         return TBM_SURFACE_QUEUE_ERROR_NONE;
1148 }
1149
1150 int
1151 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1152 {
1153         _tbm_surf_queue_mutex_lock();
1154
1155         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1156
1157         pthread_mutex_lock(&surface_queue->lock);
1158
1159         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1160
1161         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1162                 pthread_mutex_unlock(&surface_queue->lock);
1163                 _tbm_surf_queue_mutex_unlock();
1164                 return 1;
1165         }
1166
1167         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1168                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1169                 _tbm_surf_queue_mutex_unlock();
1170                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1171                 _tbm_surf_queue_mutex_lock();
1172
1173                 if (!_tbm_surface_queue_is_valid(surface_queue)) {
1174                           TBM_LOG_E("surface_queue:%p is invalid", surface_queue);
1175                         pthread_mutex_unlock(&surface_queue->lock);
1176                           _tbm_surf_queue_mutex_unlock();
1177                           return 0;
1178                 }
1179
1180                 pthread_mutex_unlock(&surface_queue->lock);
1181                 _tbm_surf_queue_mutex_unlock();
1182                 return 1;
1183         }
1184
1185         pthread_mutex_unlock(&surface_queue->lock);
1186         _tbm_surf_queue_mutex_unlock();
1187         return 0;
1188 }
1189
1190 void
1191 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1192 {
1193         queue_node *node, *tmp;
1194
1195         _tbm_surf_queue_mutex_lock();
1196
1197         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1198
1199         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1200
1201         LIST_DEL(&surface_queue->item_link);
1202
1203         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1204                 _queue_delete_node(surface_queue, node);
1205
1206         if (surface_queue->impl && surface_queue->impl->destroy)
1207                 surface_queue->impl->destroy(surface_queue);
1208
1209         _notify_emit(surface_queue, &surface_queue->destory_noti);
1210
1211         _notify_remove_all(&surface_queue->destory_noti);
1212         _notify_remove_all(&surface_queue->dequeuable_noti);
1213         _notify_remove_all(&surface_queue->dequeue_noti);
1214         _notify_remove_all(&surface_queue->acquirable_noti);
1215         _notify_remove_all(&surface_queue->reset_noti);
1216
1217         pthread_mutex_destroy(&surface_queue->lock);
1218
1219         free(surface_queue);
1220
1221         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1222                 _deinit_tbm_surf_queue_bufmgr();
1223
1224         _tbm_surf_queue_mutex_unlock();
1225 }
1226
1227 tbm_surface_queue_error_e
1228 tbm_surface_queue_reset(tbm_surface_queue_h
1229                         surface_queue, int width, int height, int format)
1230 {
1231         queue_node *node, *tmp;
1232
1233         _tbm_surf_queue_mutex_lock();
1234
1235         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1236                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1237
1238         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1239
1240         if (width == surface_queue->width && height == surface_queue->height &&
1241                 format == surface_queue->format) {
1242                 _tbm_surf_queue_mutex_unlock();
1243                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1244         }
1245
1246         pthread_mutex_lock(&surface_queue->lock);
1247
1248         surface_queue->width = width;
1249         surface_queue->height = height;
1250         surface_queue->format = format;
1251
1252         /* Destory surface and Push to free_queue */
1253         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1254                 _queue_delete_node(surface_queue, node);
1255
1256         /* Reset queue */
1257         _queue_init(&surface_queue->free_queue);
1258         _queue_init(&surface_queue->dirty_queue);
1259         LIST_INITHEAD(&surface_queue->list);
1260
1261         surface_queue->num_attached = 0;
1262
1263         if (surface_queue->impl && surface_queue->impl->reset)
1264                 surface_queue->impl->reset(surface_queue);
1265
1266         pthread_mutex_unlock(&surface_queue->lock);
1267         pthread_cond_signal(&surface_queue->free_cond);
1268
1269         _tbm_surf_queue_mutex_unlock();
1270
1271         _notify_emit(surface_queue, &surface_queue->reset_noti);
1272
1273         return TBM_SURFACE_QUEUE_ERROR_NONE;
1274 }
1275
1276 tbm_surface_queue_error_e
1277 tbm_surface_queue_set_size(tbm_surface_queue_h
1278                         surface_queue, int queue_size, int flush)
1279 {
1280         queue_node *node, *tmp;
1281
1282         _tbm_surf_queue_mutex_lock();
1283
1284         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1285                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1286         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1287                                         TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1288
1289         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1290
1291         if ((surface_queue->queue_size == queue_size) && !flush) {
1292                 _tbm_surf_queue_mutex_unlock();
1293                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1294         }
1295
1296         pthread_mutex_lock(&surface_queue->lock);
1297
1298         if (flush) {
1299                 /* Destory surface and Push to free_queue */
1300                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1301                         _queue_delete_node(surface_queue, node);
1302
1303                 /* Reset queue */
1304                 _queue_init(&surface_queue->free_queue);
1305                 _queue_init(&surface_queue->dirty_queue);
1306                 LIST_INITHEAD(&surface_queue->list);
1307
1308                 surface_queue->num_attached = 0;
1309                 surface_queue->queue_size = queue_size;
1310
1311                 if (surface_queue->impl && surface_queue->impl->reset)
1312                         surface_queue->impl->reset(surface_queue);
1313
1314                 pthread_mutex_unlock(&surface_queue->lock);
1315                 pthread_cond_signal(&surface_queue->free_cond);
1316
1317                 _tbm_surf_queue_mutex_unlock();
1318
1319                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1320
1321                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1322         } else {
1323                 if (surface_queue->queue_size > queue_size) {
1324                         int need_del = surface_queue->queue_size - queue_size;
1325
1326                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1327                                 TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1328
1329                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1330                                         surface_queue->impl->need_detach(surface_queue, node);
1331                                 else
1332                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1333
1334                                 need_del--;
1335                                 if (need_del == 0)
1336                                         break;
1337                         }
1338                 }
1339
1340                 surface_queue->queue_size = queue_size;
1341
1342                 pthread_mutex_unlock(&surface_queue->lock);
1343
1344                 _tbm_surf_queue_mutex_unlock();
1345
1346                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1347         }
1348 }
1349
1350 tbm_surface_queue_error_e
1351 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1352 {
1353         queue_node *node, *tmp;
1354
1355         _tbm_surf_queue_mutex_lock();
1356
1357         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1358                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1359
1360         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1361
1362         if (surface_queue->num_attached == 0) {
1363                 _tbm_surf_queue_mutex_unlock();
1364                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1365         }
1366
1367         pthread_mutex_lock(&surface_queue->lock);
1368
1369         /* Destory surface and Push to free_queue */
1370         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1371                 _queue_delete_node(surface_queue, node);
1372
1373         /* Reset queue */
1374         _queue_init(&surface_queue->free_queue);
1375         _queue_init(&surface_queue->dirty_queue);
1376         LIST_INITHEAD(&surface_queue->list);
1377
1378         surface_queue->num_attached = 0;
1379
1380         if (surface_queue->impl && surface_queue->impl->reset)
1381                 surface_queue->impl->reset(surface_queue);
1382
1383         pthread_mutex_unlock(&surface_queue->lock);
1384         pthread_cond_signal(&surface_queue->free_cond);
1385
1386         _tbm_surf_queue_mutex_unlock();
1387
1388         _notify_emit(surface_queue, &surface_queue->reset_noti);
1389
1390         return TBM_SURFACE_QUEUE_ERROR_NONE;
1391 }
1392
1393 tbm_surface_queue_error_e
1394 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1395                         tbm_surface_h *surfaces, int *num)
1396 {
1397         queue_node *node;
1398
1399         _tbm_surf_queue_mutex_lock();
1400
1401         *num = 0;
1402
1403         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1404                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1405         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
1406                                TBM_SURFACE_QUEUE_ERROR_INVALID_PARAMETER);
1407
1408         pthread_mutex_lock(&surface_queue->lock);
1409
1410         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
1411                 if (surfaces)
1412                         surfaces[*num] = node->surface;
1413
1414                 *num = *num + 1;
1415         }
1416
1417         pthread_mutex_unlock(&surface_queue->lock);
1418
1419         _tbm_surf_queue_mutex_unlock();
1420
1421         return TBM_SURFACE_QUEUE_ERROR_NONE;
1422 }
1423
1424 typedef struct {
1425         int flags;
1426 } tbm_queue_default;
1427
1428 static void
1429 __tbm_queue_default_destroy(tbm_surface_queue_h surface_queue)
1430 {
1431         free(surface_queue->impl_data);
1432 }
1433
1434 static void
1435 __tbm_queue_default_need_attach(tbm_surface_queue_h surface_queue)
1436 {
1437         tbm_queue_default *data = (tbm_queue_default *)surface_queue->impl_data;
1438         tbm_surface_h surface;
1439
1440         if (surface_queue->queue_size == surface_queue->num_attached)
1441                 return;
1442
1443         if (surface_queue->alloc_cb) {
1444                 _tbm_surf_queue_mutex_unlock();
1445                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1446                 _tbm_surf_queue_mutex_lock();
1447
1448                 if (!surface)
1449                         return;
1450
1451                 tbm_surface_internal_ref(surface);
1452         } else {
1453                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1454                                 surface_queue->height,
1455                                 surface_queue->format,
1456                                 data->flags);
1457                 TBM_RETURN_IF_FAIL(surface != NULL);
1458         }
1459
1460         _tbm_surface_queue_attach(surface_queue, surface);
1461         tbm_surface_internal_unref(surface);
1462 }
1463
1464 static const tbm_surface_queue_interface tbm_queue_default_impl = {
1465         NULL,                           /*__tbm_queue_default_init*/
1466         NULL,                           /*__tbm_queue_default_reset*/
1467         __tbm_queue_default_destroy,
1468         __tbm_queue_default_need_attach,
1469         NULL,                           /*__tbm_queue_default_enqueue*/
1470         NULL,                           /*__tbm_queue_default_release*/
1471         NULL,                           /*__tbm_queue_default_dequeue*/
1472         NULL,                           /*__tbm_queue_default_acquire*/
1473         NULL,                           /*__tbm_queue_default_need_detach*/
1474 };
1475
1476 tbm_surface_queue_h
1477 tbm_surface_queue_create(int queue_size, int width,
1478                          int height, int format, int flags)
1479 {
1480         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1481         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1482         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1483         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1484
1485         _tbm_surf_queue_mutex_lock();
1486
1487         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1488                                             sizeof(struct _tbm_surface_queue));
1489         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1490
1491         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1492
1493         tbm_queue_default *data = (tbm_queue_default *) calloc(1,
1494                                   sizeof(tbm_queue_default));
1495         if (data == NULL) {
1496                 free(surface_queue);
1497                 _tbm_surf_queue_mutex_unlock();
1498                 return NULL;
1499         }
1500
1501         data->flags = flags;
1502         _tbm_surface_queue_init(surface_queue,
1503                                 queue_size,
1504                                 width, height, format,
1505                                 &tbm_queue_default_impl, data);
1506
1507         _tbm_surf_queue_mutex_unlock();
1508
1509         return surface_queue;
1510 }
1511
1512 typedef struct {
1513         int flags;
1514         queue dequeue_list;
1515 } tbm_queue_sequence;
1516
1517 static void
1518 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
1519 {
1520         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1521
1522         _queue_init(&data->dequeue_list);
1523 }
1524
1525 static void
1526 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
1527 {
1528         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1529
1530         _queue_init(&data->dequeue_list);
1531 }
1532
1533 static void
1534 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
1535 {
1536         free(surface_queue->impl_data);
1537 }
1538
1539 static void
1540 __tbm_queue_sequence_need_attach(tbm_surface_queue_h surface_queue)
1541 {
1542         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1543         tbm_surface_h surface;
1544
1545         if (surface_queue->queue_size == surface_queue->num_attached)
1546                 return;
1547
1548         if (surface_queue->alloc_cb) {
1549                 _tbm_surf_queue_mutex_unlock();
1550                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
1551                 _tbm_surf_queue_mutex_lock();
1552
1553                 if (!surface)
1554                         return;
1555
1556                 tbm_surface_internal_ref(surface);
1557         } else {
1558                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
1559                                 surface_queue->height,
1560                                 surface_queue->format,
1561                                 data->flags);
1562                 TBM_RETURN_IF_FAIL(surface != NULL);
1563         }
1564
1565         _tbm_surface_queue_attach(surface_queue, surface);
1566         tbm_surface_internal_unref(surface);
1567 }
1568
1569 static void
1570 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
1571                              queue_node *node)
1572 {
1573         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1574         queue_node *next, *tmp;
1575
1576         node->priv_flags = 0;
1577
1578         LIST_FOR_EACH_ENTRY_SAFE(next, tmp, &data->dequeue_list.head, item_link) {
1579                 if (next->priv_flags)
1580                         break;
1581                 _queue_node_pop(&data->dequeue_list, next);
1582                 _tbm_surface_queue_enqueue(surface_queue, next, 1);
1583         }
1584 }
1585
1586 static queue_node *
1587 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
1588                              surface_queue)
1589 {
1590         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
1591         queue_node *node;
1592
1593         node = _tbm_surface_queue_dequeue(surface_queue);
1594         if (node) {
1595                 _queue_node_push_back(&data->dequeue_list, node);
1596                 node->priv_flags = 1;
1597         }
1598
1599         return node;
1600 }
1601
1602 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
1603         __tbm_queue_sequence_init,
1604         __tbm_queue_sequence_reset,
1605         __tbm_queue_sequence_destroy,
1606         __tbm_queue_sequence_need_attach,
1607         __tbm_queue_sequence_enqueue,
1608         NULL,                                   /*__tbm_queue_sequence_release*/
1609         __tbm_queue_sequence_dequeue,
1610         NULL,                                   /*__tbm_queue_sequence_acquire*/
1611         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
1612 };
1613
1614 tbm_surface_queue_h
1615 tbm_surface_queue_sequence_create(int queue_size, int width,
1616                                   int height, int format, int flags)
1617 {
1618         TBM_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
1619         TBM_RETURN_VAL_IF_FAIL(width > 0, NULL);
1620         TBM_RETURN_VAL_IF_FAIL(height > 0, NULL);
1621         TBM_RETURN_VAL_IF_FAIL(format > 0, NULL);
1622
1623         _tbm_surf_queue_mutex_lock();
1624
1625         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
1626                                             sizeof(struct _tbm_surface_queue));
1627         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface_queue != NULL, NULL);
1628
1629         TBM_QUEUE_TRACE("tbm_surface_queue(%p)\n", surface_queue);
1630
1631         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
1632                                    sizeof(tbm_queue_sequence));
1633         if (data == NULL) {
1634                 free(surface_queue);
1635                 _tbm_surf_queue_mutex_unlock();
1636                 return NULL;
1637         }
1638
1639         data->flags = flags;
1640         _tbm_surface_queue_init(surface_queue,
1641                                 queue_size,
1642                                 width, height, format,
1643                                 &tbm_queue_sequence_impl, data);
1644
1645         _tbm_surf_queue_mutex_unlock();
1646
1647         return surface_queue;
1648 }
1649 /* LCOV_EXCL_STOP */