surface_queue: use magic number for checking valid of surface_queue
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36 #include <time.h>
37
38 #define FREE_QUEUE      1
39 #define DIRTY_QUEUE     2
40 #define NODE_LIST       4
41
42 #define TBM_SURFACE_QUEUE_MAGIC 0xBF031234
43
44 static pthread_mutex_t tbm_surf_queue_lock = PTHREAD_MUTEX_INITIALIZER;
45 void _tbm_surface_queue_mutex_unlock(void);
46
47 /* check condition */
48 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
49         if (!(cond)) {\
50                 TBM_ERR("'%s' failed.\n", #cond);\
51                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
52                 _tbm_surf_queue_mutex_unlock();\
53                 return;\
54         } \
55 }
56
57 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
58         if (!(cond)) {\
59                 TBM_ERR("'%s' failed.\n", #cond);\
60                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
61                 _tbm_surf_queue_mutex_unlock();\
62                 return val;\
63         } \
64 }
65
66 typedef enum _queue_node_type {
67         QUEUE_NODE_TYPE_NONE,
68         QUEUE_NODE_TYPE_DEQUEUE,
69         QUEUE_NODE_TYPE_ENQUEUE,
70         QUEUE_NODE_TYPE_ACQUIRE,
71         QUEUE_NODE_TYPE_RELEASE
72 } Queue_Node_Type;
73
74 typedef struct {
75         struct list_head head;
76         int count;
77 } queue;
78
79 typedef struct {
80         tbm_surface_h surface;
81
82         struct list_head item_link;
83         struct list_head link;
84
85         Queue_Node_Type type;
86
87         unsigned int priv_flags;        /*for each queue*/
88
89         int delete_pending;
90 } queue_node;
91
92 typedef struct {
93         struct list_head link;
94
95         tbm_surface_queue_notify_cb cb;
96         void *data;
97 } queue_notify;
98
99 typedef struct {
100         struct list_head link;
101
102         tbm_surface_queue_trace_cb cb;
103         void *data;
104 } queue_trace;
105
106 typedef struct _tbm_surface_queue_interface {
107         void (*init)(tbm_surface_queue_h queue);
108         void (*reset)(tbm_surface_queue_h queue);
109         void (*destroy)(tbm_surface_queue_h queue);
110         void (*need_attach)(tbm_surface_queue_h queue);
111
112         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
113         void (*release)(tbm_surface_queue_h queue, queue_node *node);
114         queue_node *(*dequeue)(tbm_surface_queue_h queue);
115         queue_node *(*acquire)(tbm_surface_queue_h queue);
116         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
117 } tbm_surface_queue_interface;
118
119 struct _tbm_surface_queue {
120         unsigned int magic;
121         int width;
122         int height;
123         int format;
124         int queue_size;
125         int flags;
126         int num_attached;
127
128         queue free_queue;
129         queue dirty_queue;
130         struct list_head list;
131
132         struct list_head destory_noti;
133         struct list_head dequeuable_noti;
134         struct list_head dequeue_noti;
135         struct list_head can_dequeue_noti;
136         struct list_head acquirable_noti;
137         struct list_head reset_noti;
138         struct list_head trace_noti;
139
140         pthread_mutex_t lock;
141         pthread_cond_t free_cond;
142         pthread_cond_t dirty_cond;
143
144         const tbm_surface_queue_interface *impl;
145         void *impl_data;
146
147         //For external buffer allocation
148         tbm_surface_alloc_cb alloc_cb;
149         tbm_surface_free_cb free_cb;
150         void *alloc_cb_data;
151
152         int modes;
153         unsigned int enqueue_sync_count;
154         unsigned int acquire_sync_count;
155 };
156
157 static void
158 _tbm_surf_queue_mutex_lock(void)
159 {
160         pthread_mutex_lock(&tbm_surf_queue_lock);
161 }
162
163 static void
164 _tbm_surf_queue_mutex_unlock(void)
165 {
166         pthread_mutex_unlock(&tbm_surf_queue_lock);
167 }
168
169 static int
170 _tbm_surface_queue_magic_check(tbm_surface_queue_h surface_queue)
171 {
172         if (surface_queue->magic != TBM_SURFACE_QUEUE_MAGIC)
173                 return 0;
174
175         return 1;
176 }
177
178 static int
179 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
180 {
181         if (!surface_queue) {
182                 TBM_ERR("error: surface_queue is NULL.\n");
183                 return 0;
184         }
185
186         if (!_tbm_surface_queue_magic_check(surface_queue)) {
187                 TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
188                 return 0;
189         }
190
191         return 1;
192 }
193
194 static queue_node *
195 _queue_node_create(void)
196 {
197         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
198
199         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
200
201         return node;
202 }
203
204 static void
205 _queue_node_delete(queue_node *node)
206 {
207         LIST_DEL(&node->item_link);
208         LIST_DEL(&node->link);
209         free(node);
210 }
211
212 static int
213 _queue_is_empty(queue *queue)
214 {
215         if (LIST_IS_EMPTY(&queue->head))
216                 return 1;
217
218         return 0;
219 }
220
221 static void
222 _queue_node_push_back(queue *queue, queue_node *node)
223 {
224         LIST_ADDTAIL(&node->item_link, &queue->head);
225         queue->count++;
226 }
227
228 static void
229 _queue_node_push_front(queue *queue, queue_node *node)
230 {
231         LIST_ADD(&node->item_link, &queue->head);
232         queue->count++;
233 }
234
235 static queue_node *
236 _queue_node_pop_front(queue *queue)
237 {
238         queue_node *node;
239
240         if (!queue->head.next) return NULL;
241         if (!queue->count) return NULL;
242
243         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
244
245         LIST_DELINIT(&node->item_link);
246         queue->count--;
247
248         return node;
249 }
250
251 static queue_node *
252 _queue_node_pop(queue *queue, queue_node *node)
253 {
254         LIST_DELINIT(&node->item_link);
255         queue->count--;
256
257         return node;
258 }
259
260 static queue_node *
261 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
262                 tbm_surface_h surface, int *out_type)
263 {
264         queue_node *node = NULL;
265
266         if (type == 0)
267                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
268         if (out_type)
269                 *out_type = 0;
270
271         if (type & FREE_QUEUE) {
272                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
273                                          item_link) {
274                         if (node->surface == surface) {
275                                 if (out_type)
276                                         *out_type = FREE_QUEUE;
277
278                                 return node;
279                         }
280                 }
281         }
282
283         if (type & DIRTY_QUEUE) {
284                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
285                                          item_link) {
286                         if (node->surface == surface) {
287                                 if (out_type)
288                                         *out_type = DIRTY_QUEUE;
289
290                                 return node;
291                         }
292                 }
293         }
294
295         if (type & NODE_LIST) {
296                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
297                         if (node->surface == surface) {
298                                 if (out_type)
299                                         *out_type = NODE_LIST;
300
301                                 return node;
302                         }
303                 }
304         }
305
306         TBM_ERR("fail to get the queue_node.\n");
307
308         return NULL;
309 }
310
311 static void
312 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
313 {
314         if (node->surface) {
315                 if (surface_queue->free_cb) {
316                         surface_queue->free_cb(surface_queue,
317                                         surface_queue->alloc_cb_data,
318                                         node->surface);
319                 }
320
321                 tbm_surface_destroy(node->surface);
322         }
323
324         _queue_node_delete(node);
325 }
326
327 static void
328 _queue_init(queue *queue)
329 {
330         LIST_INITHEAD(&queue->head);
331
332         queue->count = 0;
333 }
334
335 static void
336 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
337             void *data)
338 {
339         TBM_RETURN_IF_FAIL(cb != NULL);
340
341         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
342
343         TBM_RETURN_IF_FAIL(item != NULL);
344
345         LIST_INITHEAD(&item->link);
346         item->cb = cb;
347         item->data = data;
348
349         LIST_ADDTAIL(&item->link, list);
350 }
351
352 static void
353 _notify_remove(struct list_head *list,
354                tbm_surface_queue_notify_cb cb, void *data)
355 {
356         queue_notify *item = NULL, *tmp;
357
358         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
359                 if (item->cb == cb && item->data == data) {
360                         LIST_DEL(&item->link);
361                         free(item);
362                         return;
363                 }
364         }
365
366         TBM_ERR("Cannot find notifiy\n");
367 }
368
369 static void
370 _notify_remove_all(struct list_head *list)
371 {
372         queue_notify *item = NULL, *tmp;
373
374         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
375                 LIST_DEL(&item->link);
376                 free(item);
377         }
378 }
379
380 static void
381 _notify_emit(tbm_surface_queue_h surface_queue,
382              struct list_head *list)
383 {
384         queue_notify *item = NULL, *tmp;;
385
386         /*
387                 The item->cb is the outside function of the libtbm.
388                 The tbm user may/can remove the item of the list,
389                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
390         */
391         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
392                 item->cb(surface_queue, item->data);
393 }
394
395 static void
396 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
397             void *data)
398 {
399         TBM_RETURN_IF_FAIL(cb != NULL);
400
401         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
402
403         TBM_RETURN_IF_FAIL(item != NULL);
404
405         LIST_INITHEAD(&item->link);
406         item->cb = cb;
407         item->data = data;
408
409         LIST_ADDTAIL(&item->link, list);
410 }
411
412 static void
413 _trace_remove(struct list_head *list,
414                tbm_surface_queue_trace_cb cb, void *data)
415 {
416         queue_trace *item = NULL, *tmp;
417
418         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
419                 if (item->cb == cb && item->data == data) {
420                         LIST_DEL(&item->link);
421                         free(item);
422                         return;
423                 }
424         }
425
426         TBM_ERR("Cannot find notifiy\n");
427 }
428
429 static void
430 _trace_remove_all(struct list_head *list)
431 {
432         queue_trace *item = NULL, *tmp;
433
434         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
435                 LIST_DEL(&item->link);
436                 free(item);
437         }
438 }
439
440 static void
441 _trace_emit(tbm_surface_queue_h surface_queue,
442              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
443 {
444         queue_trace *item = NULL, *tmp;;
445
446         /*
447                 The item->cb is the outside function of the libtbm.
448                 The tbm user may/can remove the item of the list,
449                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
450         */
451         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
452                 item->cb(surface_queue, surface, trace, item->data);
453 }
454
455 static int
456 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
457 {
458         queue_node *node = NULL;
459         int count = 0;
460
461         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
462                 if (node->type == type)
463                         count++;
464         }
465
466         return count;
467 }
468
469 static void
470 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
471                           tbm_surface_h surface)
472 {
473         queue_node *node;
474
475         node = _queue_node_create();
476         TBM_RETURN_IF_FAIL(node != NULL);
477
478         tbm_surface_internal_ref(surface);
479         node->surface = surface;
480
481         LIST_ADDTAIL(&node->link, &surface_queue->list);
482         surface_queue->num_attached++;
483         _queue_node_push_back(&surface_queue->free_queue, node);
484 }
485
486 static void
487 _tbm_surface_queue_need_attach(tbm_surface_queue_h surface_queue)
488 {
489         tbm_surface_h surface;
490
491         if (surface_queue->queue_size == surface_queue->num_attached)
492                 return;
493
494         if (surface_queue->alloc_cb) {
495                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
496
497                 /* silent return */
498                 if (!surface)
499                         return;
500
501                 tbm_surface_internal_ref(surface);
502         } else {
503                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
504                                 surface_queue->height,
505                                 surface_queue->format,
506                                 surface_queue->flags);
507                 TBM_RETURN_IF_FAIL(surface != NULL);
508         }
509
510         _tbm_surface_queue_attach(surface_queue, surface);
511         tbm_surface_internal_unref(surface);
512 }
513
514 static void
515 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
516                           tbm_surface_h surface)
517 {
518         queue_node *node;
519         int queue_type;
520
521         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
522         if (node) {
523                 _queue_delete_node(surface_queue, node);
524                 surface_queue->num_attached--;
525         }
526 }
527
528 static void
529 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
530                            queue_node *node, int push_back)
531 {
532         if (push_back)
533                 _queue_node_push_back(&surface_queue->dirty_queue, node);
534         else
535                 _queue_node_push_front(&surface_queue->dirty_queue, node);
536 }
537
538 static queue_node *
539 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
540 {
541         queue_node *node;
542
543         node = _queue_node_pop_front(&surface_queue->free_queue);
544
545         return node;
546 }
547
548 static queue_node *
549 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
550 {
551         queue_node *node;
552
553         if (_queue_is_empty(&surface_queue->dirty_queue))
554                 return NULL;
555
556         node = _queue_node_pop_front(&surface_queue->dirty_queue);
557
558         return node;
559 }
560
561 static void
562 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
563                            queue_node *node, int push_back)
564 {
565         if (push_back)
566                 _queue_node_push_back(&surface_queue->free_queue, node);
567         else
568                 _queue_node_push_front(&surface_queue->free_queue, node);
569 }
570
571 static void
572 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
573                         int queue_size,
574                         int width, int height, int format, int flags,
575                         const tbm_surface_queue_interface *impl, void *data)
576 {
577         pthread_condattr_t free_attr, dirty_attr;
578
579         TBM_RETURN_IF_FAIL(surface_queue != NULL);
580         TBM_RETURN_IF_FAIL(impl != NULL);
581
582         pthread_mutex_init(&surface_queue->lock, NULL);
583
584         pthread_condattr_init(&free_attr);
585         pthread_condattr_setclock(&free_attr, CLOCK_MONOTONIC);
586         pthread_cond_init(&surface_queue->free_cond, &free_attr);
587         pthread_condattr_destroy(&free_attr);
588
589         pthread_condattr_init(&dirty_attr);
590         pthread_condattr_setclock(&dirty_attr, CLOCK_MONOTONIC);
591         pthread_cond_init(&surface_queue->dirty_cond, &dirty_attr);
592         pthread_condattr_destroy(&dirty_attr);
593
594         surface_queue->magic = TBM_SURFACE_QUEUE_MAGIC;
595         surface_queue->queue_size = queue_size;
596         surface_queue->width = width;
597         surface_queue->height = height;
598         surface_queue->format = format;
599         surface_queue->flags = flags;
600         surface_queue->impl = impl;
601         surface_queue->impl_data = data;
602         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
603
604         _queue_init(&surface_queue->free_queue);
605         _queue_init(&surface_queue->dirty_queue);
606         LIST_INITHEAD(&surface_queue->list);
607
608         LIST_INITHEAD(&surface_queue->destory_noti);
609         LIST_INITHEAD(&surface_queue->dequeuable_noti);
610         LIST_INITHEAD(&surface_queue->dequeue_noti);
611         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
612         LIST_INITHEAD(&surface_queue->acquirable_noti);
613         LIST_INITHEAD(&surface_queue->reset_noti);
614         LIST_INITHEAD(&surface_queue->trace_noti);
615
616         if (surface_queue->impl && surface_queue->impl->init)
617                 surface_queue->impl->init(surface_queue);
618 }
619
620 tbm_surface_queue_error_e
621 tbm_surface_queue_add_destroy_cb(
622         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
623         void *data)
624 {
625         _tbm_surf_queue_mutex_lock();
626         _tbm_set_last_result(TBM_ERROR_NONE);
627
628         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
629                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
630         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
631                                TBM_ERROR_INVALID_PARAMETER);
632
633         pthread_mutex_lock(&surface_queue->lock);
634
635         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
636
637         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
638
639         pthread_mutex_unlock(&surface_queue->lock);
640
641         _tbm_surf_queue_mutex_unlock();
642
643         return TBM_SURFACE_QUEUE_ERROR_NONE;
644 }
645
646 tbm_surface_queue_error_e
647 tbm_surface_queue_remove_destroy_cb(
648         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
649         void *data)
650 {
651         _tbm_surf_queue_mutex_lock();
652         _tbm_set_last_result(TBM_ERROR_NONE);
653
654         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
655                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
656
657         pthread_mutex_lock(&surface_queue->lock);
658
659         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
660
661         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
662
663         pthread_mutex_unlock(&surface_queue->lock);
664
665         _tbm_surf_queue_mutex_unlock();
666
667         return TBM_SURFACE_QUEUE_ERROR_NONE;
668 }
669
670 tbm_surface_queue_error_e
671 tbm_surface_queue_add_dequeuable_cb(
672         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
673         void *data)
674 {
675         _tbm_surf_queue_mutex_lock();
676         _tbm_set_last_result(TBM_ERROR_NONE);
677
678         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
679                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
680         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
681                                TBM_ERROR_INVALID_PARAMETER);
682
683         pthread_mutex_lock(&surface_queue->lock);
684
685         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
686
687         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
688
689         pthread_mutex_unlock(&surface_queue->lock);
690
691         _tbm_surf_queue_mutex_unlock();
692
693         return TBM_SURFACE_QUEUE_ERROR_NONE;
694 }
695
696 tbm_surface_queue_error_e
697 tbm_surface_queue_remove_dequeuable_cb(
698         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
699         void *data)
700 {
701         _tbm_surf_queue_mutex_lock();
702         _tbm_set_last_result(TBM_ERROR_NONE);
703
704         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
705                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
706
707         pthread_mutex_lock(&surface_queue->lock);
708
709         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
710
711         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
712
713         pthread_mutex_unlock(&surface_queue->lock);
714
715         _tbm_surf_queue_mutex_unlock();
716
717         return TBM_SURFACE_QUEUE_ERROR_NONE;
718 }
719
720 tbm_surface_queue_error_e
721 tbm_surface_queue_add_dequeue_cb(
722         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
723         void *data)
724 {
725         _tbm_surf_queue_mutex_lock();
726         _tbm_set_last_result(TBM_ERROR_NONE);
727
728         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
729                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
730         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
731                                TBM_ERROR_INVALID_PARAMETER);
732
733         pthread_mutex_lock(&surface_queue->lock);
734
735         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
736
737         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
738
739         pthread_mutex_unlock(&surface_queue->lock);
740
741         _tbm_surf_queue_mutex_unlock();
742
743         return TBM_SURFACE_QUEUE_ERROR_NONE;
744 }
745
746 tbm_surface_queue_error_e
747 tbm_surface_queue_remove_dequeue_cb(
748         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
749         void *data)
750 {
751         _tbm_surf_queue_mutex_lock();
752         _tbm_set_last_result(TBM_ERROR_NONE);
753
754         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
755                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
756
757         pthread_mutex_lock(&surface_queue->lock);
758
759         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
760
761         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
762
763         pthread_mutex_unlock(&surface_queue->lock);
764
765         _tbm_surf_queue_mutex_unlock();
766
767         return TBM_SURFACE_QUEUE_ERROR_NONE;
768 }
769
770 tbm_surface_queue_error_e
771 tbm_surface_queue_add_can_dequeue_cb(
772         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
773         void *data)
774 {
775         _tbm_surf_queue_mutex_lock();
776         _tbm_set_last_result(TBM_ERROR_NONE);
777
778         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
779                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
780         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
781                                TBM_ERROR_INVALID_PARAMETER);
782
783         pthread_mutex_lock(&surface_queue->lock);
784
785         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
786
787         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
788
789         pthread_mutex_unlock(&surface_queue->lock);
790
791         _tbm_surf_queue_mutex_unlock();
792
793         return TBM_SURFACE_QUEUE_ERROR_NONE;
794 }
795
796 tbm_surface_queue_error_e
797 tbm_surface_queue_remove_can_dequeue_cb(
798         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
799         void *data)
800 {
801         _tbm_surf_queue_mutex_lock();
802         _tbm_set_last_result(TBM_ERROR_NONE);
803
804         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
805                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
806
807         pthread_mutex_lock(&surface_queue->lock);
808
809         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
810
811         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
812
813         pthread_mutex_unlock(&surface_queue->lock);
814
815         _tbm_surf_queue_mutex_unlock();
816
817         return TBM_SURFACE_QUEUE_ERROR_NONE;
818 }
819
820 tbm_surface_queue_error_e
821 tbm_surface_queue_add_acquirable_cb(
822         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
823         void *data)
824 {
825         _tbm_surf_queue_mutex_lock();
826         _tbm_set_last_result(TBM_ERROR_NONE);
827
828         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
829                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
830         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
831                                TBM_ERROR_INVALID_PARAMETER);
832
833         pthread_mutex_lock(&surface_queue->lock);
834
835         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
836
837         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
838
839         pthread_mutex_unlock(&surface_queue->lock);
840
841         _tbm_surf_queue_mutex_unlock();
842
843         return TBM_SURFACE_QUEUE_ERROR_NONE;
844 }
845
846 tbm_surface_queue_error_e
847 tbm_surface_queue_remove_acquirable_cb(
848         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
849         void *data)
850 {
851         _tbm_surf_queue_mutex_lock();
852         _tbm_set_last_result(TBM_ERROR_NONE);
853
854         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
855                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
856
857         pthread_mutex_lock(&surface_queue->lock);
858
859         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
860
861         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
862
863         pthread_mutex_unlock(&surface_queue->lock);
864
865         _tbm_surf_queue_mutex_unlock();
866
867         return TBM_SURFACE_QUEUE_ERROR_NONE;
868 }
869
870 tbm_surface_queue_error_e
871 tbm_surface_queue_add_trace_cb(
872         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
873         void *data)
874 {
875         _tbm_surf_queue_mutex_lock();
876         _tbm_set_last_result(TBM_ERROR_NONE);
877
878         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
879                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
880         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
881                                TBM_ERROR_INVALID_PARAMETER);
882
883         pthread_mutex_lock(&surface_queue->lock);
884
885         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
886
887         _trace_add(&surface_queue->trace_noti, trace_cb, data);
888
889         pthread_mutex_unlock(&surface_queue->lock);
890
891         _tbm_surf_queue_mutex_unlock();
892
893         return TBM_SURFACE_QUEUE_ERROR_NONE;
894 }
895
896 tbm_surface_queue_error_e
897 tbm_surface_queue_remove_trace_cb(
898         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
899         void *data)
900 {
901         _tbm_surf_queue_mutex_lock();
902         _tbm_set_last_result(TBM_ERROR_NONE);
903
904         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
905                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
906
907         pthread_mutex_lock(&surface_queue->lock);
908
909         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
910
911         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
912
913         pthread_mutex_unlock(&surface_queue->lock);
914
915         _tbm_surf_queue_mutex_unlock();
916
917         return TBM_SURFACE_QUEUE_ERROR_NONE;
918 }
919
920 tbm_surface_queue_error_e
921 tbm_surface_queue_set_alloc_cb(
922         tbm_surface_queue_h surface_queue,
923         tbm_surface_alloc_cb alloc_cb,
924         tbm_surface_free_cb free_cb,
925         void *data)
926 {
927         _tbm_surf_queue_mutex_lock();
928         _tbm_set_last_result(TBM_ERROR_NONE);
929
930         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
931                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
932
933         pthread_mutex_lock(&surface_queue->lock);
934
935         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
936
937         surface_queue->alloc_cb = alloc_cb;
938         surface_queue->free_cb = free_cb;
939         surface_queue->alloc_cb_data = data;
940
941         pthread_mutex_unlock(&surface_queue->lock);
942
943         _tbm_surf_queue_mutex_unlock();
944
945         return TBM_SURFACE_QUEUE_ERROR_NONE;
946 }
947
948 int
949 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
950 {
951         int width;
952
953         _tbm_surf_queue_mutex_lock();
954         _tbm_set_last_result(TBM_ERROR_NONE);
955
956         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
957
958         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
959
960         width = surface_queue->width;
961
962         _tbm_surf_queue_mutex_unlock();
963
964         return width;
965 }
966
967 int
968 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
969 {
970         int height;
971
972         _tbm_surf_queue_mutex_lock();
973         _tbm_set_last_result(TBM_ERROR_NONE);
974
975         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
976
977         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
978
979         height = surface_queue->height;
980
981         _tbm_surf_queue_mutex_unlock();
982
983         return height;
984 }
985
986 int
987 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
988 {
989         int format;
990
991         _tbm_surf_queue_mutex_lock();
992         _tbm_set_last_result(TBM_ERROR_NONE);
993
994         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
995
996         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
997
998         format = surface_queue->format;
999
1000         _tbm_surf_queue_mutex_unlock();
1001
1002         return format;
1003 }
1004
1005 int
1006 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1007 {
1008         int queue_size;
1009
1010         _tbm_surf_queue_mutex_lock();
1011         _tbm_set_last_result(TBM_ERROR_NONE);
1012
1013         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1014
1015         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1016
1017         queue_size = surface_queue->queue_size;
1018
1019         _tbm_surf_queue_mutex_unlock();
1020
1021         return queue_size;
1022 }
1023
1024 tbm_surface_queue_error_e
1025 tbm_surface_queue_add_reset_cb(
1026         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1027         void *data)
1028 {
1029         _tbm_surf_queue_mutex_lock();
1030         _tbm_set_last_result(TBM_ERROR_NONE);
1031
1032         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1033                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1034         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
1035                                TBM_ERROR_INVALID_PARAMETER);
1036
1037         pthread_mutex_lock(&surface_queue->lock);
1038
1039         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1040
1041         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1042
1043         pthread_mutex_unlock(&surface_queue->lock);
1044
1045         _tbm_surf_queue_mutex_unlock();
1046
1047         return TBM_SURFACE_QUEUE_ERROR_NONE;
1048 }
1049
1050 tbm_surface_queue_error_e
1051 tbm_surface_queue_remove_reset_cb(
1052         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1053         void *data)
1054 {
1055         _tbm_surf_queue_mutex_lock();
1056         _tbm_set_last_result(TBM_ERROR_NONE);
1057
1058         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1059                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1060
1061         pthread_mutex_lock(&surface_queue->lock);
1062
1063         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1064
1065         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1066
1067         pthread_mutex_unlock(&surface_queue->lock);
1068
1069         _tbm_surf_queue_mutex_unlock();
1070
1071         return TBM_SURFACE_QUEUE_ERROR_NONE;
1072 }
1073
1074 tbm_surface_queue_error_e
1075 tbm_surface_queue_enqueue(tbm_surface_queue_h
1076                           surface_queue, tbm_surface_h surface)
1077 {
1078         queue_node *node;
1079         int queue_type;
1080
1081         _tbm_surf_queue_mutex_lock();
1082         _tbm_set_last_result(TBM_ERROR_NONE);
1083
1084         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1085                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1086         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1087                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1088
1089         if (b_dump_queue)
1090                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1091
1092         pthread_mutex_lock(&surface_queue->lock);
1093
1094         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1095
1096         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1097         if (node == NULL || queue_type != NODE_LIST) {
1098                 TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1099                         node, queue_type);
1100                 pthread_mutex_unlock(&surface_queue->lock);
1101
1102                 _tbm_surf_queue_mutex_unlock();
1103
1104                 if (!node) {
1105                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1106                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1107                 } else {
1108                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1109                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1110                 }
1111         }
1112
1113         if (surface_queue->impl && surface_queue->impl->enqueue)
1114                 surface_queue->impl->enqueue(surface_queue, node);
1115         else
1116                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1117
1118         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1119                 TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1120                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE);
1121                 pthread_mutex_unlock(&surface_queue->lock);
1122
1123                 _tbm_surf_queue_mutex_unlock();
1124                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE;
1125         }
1126
1127         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1128
1129         if (surface_queue->enqueue_sync_count == 1) {
1130                 tbm_surface_info_s info;
1131                 int ret;
1132
1133                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1134                 if (ret == TBM_SURFACE_ERROR_NONE)
1135                         tbm_surface_unmap(surface);
1136         }
1137
1138         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1139
1140         pthread_mutex_unlock(&surface_queue->lock);
1141         pthread_cond_signal(&surface_queue->dirty_cond);
1142
1143         _tbm_surf_queue_mutex_unlock();
1144
1145         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1146
1147         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1148
1149         return TBM_SURFACE_QUEUE_ERROR_NONE;
1150 }
1151
1152 tbm_surface_queue_error_e
1153 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1154                           surface_queue, tbm_surface_h surface)
1155 {
1156         queue_node *node;
1157         int queue_type;
1158
1159         _tbm_surf_queue_mutex_lock();
1160         _tbm_set_last_result(TBM_ERROR_NONE);
1161
1162         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1163                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1164         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1165                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1166
1167         pthread_mutex_lock(&surface_queue->lock);
1168
1169         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1170
1171         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1172         if (node == NULL || queue_type != NODE_LIST) {
1173                 TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1174                         node, queue_type);
1175                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1176                 pthread_mutex_unlock(&surface_queue->lock);
1177
1178                 _tbm_surf_queue_mutex_unlock();
1179                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1180         }
1181
1182         if (node->delete_pending) {
1183                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1184
1185                 _queue_delete_node(surface_queue, node);
1186
1187                 pthread_mutex_unlock(&surface_queue->lock);
1188
1189                 _tbm_surf_queue_mutex_unlock();
1190
1191                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1192
1193                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1194         }
1195
1196         if (surface_queue->queue_size < surface_queue->num_attached) {
1197                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1198
1199                 if (surface_queue->impl && surface_queue->impl->need_detach)
1200                         surface_queue->impl->need_detach(surface_queue, node);
1201                 else
1202                         _tbm_surface_queue_detach(surface_queue, surface);
1203
1204                 pthread_mutex_unlock(&surface_queue->lock);
1205
1206                 _tbm_surf_queue_mutex_unlock();
1207
1208                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1209
1210                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1211         }
1212
1213         if (surface_queue->impl && surface_queue->impl->release)
1214                 surface_queue->impl->release(surface_queue, node);
1215         else
1216                 _tbm_surface_queue_release(surface_queue, node, 1);
1217
1218         if (_queue_is_empty(&surface_queue->free_queue)) {
1219                 TBM_ERR("surface_queue->free_queue is empty.\n");
1220                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1221                 pthread_mutex_unlock(&surface_queue->lock);
1222
1223                 _tbm_surf_queue_mutex_unlock();
1224                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1225         }
1226
1227         node->type = QUEUE_NODE_TYPE_RELEASE;
1228
1229         pthread_mutex_unlock(&surface_queue->lock);
1230         pthread_cond_signal(&surface_queue->free_cond);
1231
1232         _tbm_surf_queue_mutex_unlock();
1233
1234         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1235
1236         return TBM_SURFACE_QUEUE_ERROR_NONE;
1237 }
1238
1239 tbm_surface_queue_error_e
1240 tbm_surface_queue_dequeue(tbm_surface_queue_h
1241                           surface_queue, tbm_surface_h *surface)
1242 {
1243         queue_node *node;
1244
1245         _tbm_surf_queue_mutex_lock();
1246         _tbm_set_last_result(TBM_ERROR_NONE);
1247
1248         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1249                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1250         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1251                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1252
1253         *surface = NULL;
1254
1255         pthread_mutex_lock(&surface_queue->lock);
1256
1257         if (_queue_is_empty(&surface_queue->free_queue)) {
1258                 if (surface_queue->impl && surface_queue->impl->need_attach)
1259                         surface_queue->impl->need_attach(surface_queue);
1260                 else
1261                         _tbm_surface_queue_need_attach(surface_queue);
1262         }
1263
1264         if (surface_queue->impl && surface_queue->impl->dequeue)
1265                 node = surface_queue->impl->dequeue(surface_queue);
1266         else
1267                 node = _tbm_surface_queue_dequeue(surface_queue);
1268
1269         if (node == NULL || node->surface == NULL) {
1270                 TBM_ERR("_queue_node_pop_front failed\n");
1271                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1272                 pthread_mutex_unlock(&surface_queue->lock);
1273
1274                 _tbm_surf_queue_mutex_unlock();
1275                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1276         }
1277
1278         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1279         *surface = node->surface;
1280
1281         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1282
1283         pthread_mutex_unlock(&surface_queue->lock);
1284
1285         _tbm_surf_queue_mutex_unlock();
1286
1287         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1288
1289         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1290
1291         return TBM_SURFACE_QUEUE_ERROR_NONE;
1292 }
1293
1294 tbm_surface_queue_error_e
1295 tbm_surface_queue_can_dequeue_wait_timeout(tbm_surface_queue_h surface_queue, int ms_timeout)
1296 {
1297         int ret;
1298         struct timespec tp;
1299
1300         _tbm_surf_queue_mutex_lock();
1301         _tbm_set_last_result(TBM_ERROR_NONE);
1302
1303         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1304                                                                           TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1305
1306         _tbm_surf_queue_mutex_unlock();
1307
1308         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1309
1310         _tbm_surf_queue_mutex_lock();
1311
1312         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1313                                                                           TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1314
1315         pthread_mutex_lock(&surface_queue->lock);
1316
1317         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1318
1319         if (_queue_is_empty(&surface_queue->free_queue)) {
1320                 if (surface_queue->impl && surface_queue->impl->need_attach)
1321                         surface_queue->impl->need_attach(surface_queue);
1322                 else
1323                         _tbm_surface_queue_need_attach(surface_queue);
1324         }
1325
1326         if (!_queue_is_empty(&surface_queue->free_queue)) {
1327                 pthread_mutex_unlock(&surface_queue->lock);
1328                 _tbm_surf_queue_mutex_unlock();
1329                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1330         }
1331
1332         _tbm_surf_queue_mutex_unlock();
1333
1334         while (1) {
1335                 clock_gettime(CLOCK_MONOTONIC, &tp);
1336
1337                 if (ms_timeout > 1000)
1338                         tp.tv_sec += ms_timeout / 1000;
1339
1340                 tp.tv_nsec += (ms_timeout % 1000) * 1000000;
1341
1342                 if (tp.tv_nsec > 1000000000L) {
1343                         tp.tv_sec++;
1344                         tp.tv_nsec -= 1000000000L;
1345                 }
1346
1347                 ret = pthread_cond_timedwait(&surface_queue->free_cond, &surface_queue->lock, &tp);
1348                 if (ret) {
1349                         if (ret == ETIMEDOUT) {
1350                                 TBM_ERR("surface_queue:%p can dequeue wait timeout", surface_queue);
1351                                 pthread_mutex_unlock(&surface_queue->lock);
1352                                 return TBM_SURFACE_QUEUE_ERROR_TIMEOUT;
1353                         } else {
1354                                 TBM_INFO("surface_queue:%p timedwait error retry wait", surface_queue);
1355                         }
1356                 } else {
1357                         if (surface_queue->impl && surface_queue->impl->need_attach)
1358                                 surface_queue->impl->need_attach(surface_queue);
1359                         else
1360                                 _tbm_surface_queue_need_attach(surface_queue);
1361
1362                         if (!_queue_is_empty(&surface_queue->free_queue)) {
1363                                 pthread_mutex_unlock(&surface_queue->lock);
1364                                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1365                         }
1366                 }
1367         }
1368 }
1369
1370 int
1371 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1372 {
1373         _tbm_surf_queue_mutex_lock();
1374         _tbm_set_last_result(TBM_ERROR_NONE);
1375
1376         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1377
1378         _tbm_surf_queue_mutex_unlock();
1379
1380         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1381
1382         _tbm_surf_queue_mutex_lock();
1383
1384         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1385
1386         pthread_mutex_lock(&surface_queue->lock);
1387
1388         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1389
1390         if (_queue_is_empty(&surface_queue->free_queue)) {
1391                 if (surface_queue->impl && surface_queue->impl->need_attach)
1392                         surface_queue->impl->need_attach(surface_queue);
1393                 else
1394                         _tbm_surface_queue_need_attach(surface_queue);
1395         }
1396
1397         if (!_queue_is_empty(&surface_queue->free_queue)) {
1398                 pthread_mutex_unlock(&surface_queue->lock);
1399                 _tbm_surf_queue_mutex_unlock();
1400                 return 1;
1401         }
1402
1403         if (wait) {
1404                 _tbm_surf_queue_mutex_unlock();
1405                 while (1) {
1406                         pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1407
1408                         if (surface_queue->impl && surface_queue->impl->need_attach)
1409                                 surface_queue->impl->need_attach(surface_queue);
1410                         else
1411                                 _tbm_surface_queue_need_attach(surface_queue);
1412
1413                         if (!_queue_is_empty(&surface_queue->free_queue)) {
1414                                 pthread_mutex_unlock(&surface_queue->lock);
1415                                 return 1;
1416                         }
1417                 }
1418         }
1419
1420         pthread_mutex_unlock(&surface_queue->lock);
1421         _tbm_surf_queue_mutex_unlock();
1422         return 0;
1423 }
1424
1425 tbm_surface_queue_error_e
1426 tbm_surface_queue_release(tbm_surface_queue_h
1427                           surface_queue, tbm_surface_h surface)
1428 {
1429         queue_node *node;
1430         int queue_type;
1431
1432         _tbm_surf_queue_mutex_lock();
1433         _tbm_set_last_result(TBM_ERROR_NONE);
1434
1435         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1436                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1437         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1438                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1439
1440         pthread_mutex_lock(&surface_queue->lock);
1441
1442         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1443
1444         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1445         if (node == NULL || queue_type != NODE_LIST) {
1446                 TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1447                         node, queue_type);
1448                 pthread_mutex_unlock(&surface_queue->lock);
1449
1450                 _tbm_surf_queue_mutex_unlock();
1451
1452                 if (!node) {
1453                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1454                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1455                 } else {
1456                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1457                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1458                 }
1459         }
1460
1461         if (node->delete_pending) {
1462                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1463
1464                 _queue_delete_node(surface_queue, node);
1465
1466                 pthread_mutex_unlock(&surface_queue->lock);
1467
1468                 _tbm_surf_queue_mutex_unlock();
1469
1470                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1471
1472                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1473         }
1474
1475         if (surface_queue->queue_size < surface_queue->num_attached) {
1476                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1477
1478                 if (surface_queue->impl && surface_queue->impl->need_detach)
1479                         surface_queue->impl->need_detach(surface_queue, node);
1480                 else
1481                         _tbm_surface_queue_detach(surface_queue, surface);
1482
1483                 pthread_mutex_unlock(&surface_queue->lock);
1484
1485                 _tbm_surf_queue_mutex_unlock();
1486
1487                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1488
1489                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1490         }
1491
1492         if (surface_queue->impl && surface_queue->impl->release)
1493                 surface_queue->impl->release(surface_queue, node);
1494         else
1495                 _tbm_surface_queue_release(surface_queue, node, 1);
1496
1497         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1498                 TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
1499                 _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
1500                 pthread_mutex_unlock(&surface_queue->lock);
1501
1502                 _tbm_surf_queue_mutex_unlock();
1503                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1504         }
1505
1506         node->type = QUEUE_NODE_TYPE_RELEASE;
1507
1508         pthread_mutex_unlock(&surface_queue->lock);
1509         pthread_cond_signal(&surface_queue->free_cond);
1510
1511         _tbm_surf_queue_mutex_unlock();
1512
1513         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1514
1515         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1516
1517         return TBM_SURFACE_QUEUE_ERROR_NONE;
1518 }
1519
1520 tbm_surface_queue_error_e
1521 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1522                         surface_queue, tbm_surface_h surface)
1523 {
1524         queue_node *node;
1525         int queue_type;
1526
1527         _tbm_surf_queue_mutex_lock();
1528         _tbm_set_last_result(TBM_ERROR_NONE);
1529
1530         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1531                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1532         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1533                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1534
1535         pthread_mutex_lock(&surface_queue->lock);
1536
1537         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1538
1539         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1540         if (node == NULL || queue_type != NODE_LIST) {
1541                 TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1542                         node, queue_type);
1543                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1544                 pthread_mutex_unlock(&surface_queue->lock);
1545
1546                 _tbm_surf_queue_mutex_unlock();
1547                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1548         }
1549
1550         if (surface_queue->impl && surface_queue->impl->enqueue)
1551                 surface_queue->impl->enqueue(surface_queue, node);
1552         else
1553                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1554
1555         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1556                 TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
1557                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1558                 pthread_mutex_unlock(&surface_queue->lock);
1559
1560                 _tbm_surf_queue_mutex_unlock();
1561                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1562         }
1563
1564         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1565
1566         pthread_mutex_unlock(&surface_queue->lock);
1567         pthread_cond_signal(&surface_queue->dirty_cond);
1568
1569         _tbm_surf_queue_mutex_unlock();
1570
1571         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1572
1573         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1574
1575         return TBM_SURFACE_QUEUE_ERROR_NONE;
1576 }
1577
1578 tbm_surface_queue_error_e
1579 tbm_surface_queue_acquire(tbm_surface_queue_h
1580                           surface_queue, tbm_surface_h *surface)
1581 {
1582         queue_node *node;
1583
1584         _tbm_surf_queue_mutex_lock();
1585         _tbm_set_last_result(TBM_ERROR_NONE);
1586
1587         *surface = NULL;
1588
1589         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1590                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1591         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1592                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1593
1594         pthread_mutex_lock(&surface_queue->lock);
1595
1596         if (surface_queue->impl && surface_queue->impl->acquire)
1597                 node = surface_queue->impl->acquire(surface_queue);
1598         else
1599                 node = _tbm_surface_queue_acquire(surface_queue);
1600
1601         if (node == NULL || node->surface == NULL) {
1602                 TBM_ERR("_queue_node_pop_front failed\n");
1603                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1604                 pthread_mutex_unlock(&surface_queue->lock);
1605
1606                 _tbm_surf_queue_mutex_unlock();
1607                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1608         }
1609
1610         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1611
1612         *surface = node->surface;
1613
1614         if (surface_queue->acquire_sync_count == 1) {
1615                 tbm_surface_info_s info;
1616                 int ret;
1617
1618                 TBM_ERR("start map surface:%p", *surface);
1619                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1620                 TBM_ERR("end map surface:%p", *surface);
1621                 if (ret == TBM_SURFACE_ERROR_NONE)
1622                         tbm_surface_unmap(*surface);
1623         }
1624
1625         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1626
1627         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1628
1629         pthread_mutex_unlock(&surface_queue->lock);
1630
1631         _tbm_surf_queue_mutex_unlock();
1632
1633         if (b_dump_queue)
1634                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1635
1636         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1637
1638         return TBM_SURFACE_QUEUE_ERROR_NONE;
1639 }
1640
1641 int
1642 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1643 {
1644         _tbm_surf_queue_mutex_lock();
1645         _tbm_set_last_result(TBM_ERROR_NONE);
1646
1647         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1648
1649         pthread_mutex_lock(&surface_queue->lock);
1650
1651         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1652
1653         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1654                 pthread_mutex_unlock(&surface_queue->lock);
1655                 _tbm_surf_queue_mutex_unlock();
1656                 return 1;
1657         }
1658
1659         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1660                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1661                 _tbm_surf_queue_mutex_unlock();
1662                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1663                 pthread_mutex_unlock(&surface_queue->lock);
1664                 return 1;
1665         }
1666
1667         pthread_mutex_unlock(&surface_queue->lock);
1668         _tbm_surf_queue_mutex_unlock();
1669         return 0;
1670 }
1671
1672 void
1673 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1674 {
1675         queue_node *node = NULL, *tmp;
1676
1677         _tbm_surf_queue_mutex_lock();
1678         _tbm_set_last_result(TBM_ERROR_NONE);
1679
1680         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1681
1682         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1683
1684         surface_queue->magic = 0;
1685
1686         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1687                 _queue_delete_node(surface_queue, node);
1688
1689         if (surface_queue->impl && surface_queue->impl->destroy)
1690                 surface_queue->impl->destroy(surface_queue);
1691
1692         _notify_emit(surface_queue, &surface_queue->destory_noti);
1693
1694         _notify_remove_all(&surface_queue->destory_noti);
1695         _notify_remove_all(&surface_queue->dequeuable_noti);
1696         _notify_remove_all(&surface_queue->dequeue_noti);
1697         _notify_remove_all(&surface_queue->can_dequeue_noti);
1698         _notify_remove_all(&surface_queue->acquirable_noti);
1699         _notify_remove_all(&surface_queue->reset_noti);
1700         _trace_remove_all(&surface_queue->trace_noti);
1701
1702         pthread_mutex_destroy(&surface_queue->lock);
1703
1704         free(surface_queue);
1705
1706         _tbm_surf_queue_mutex_unlock();
1707 }
1708
1709 tbm_surface_queue_error_e
1710 tbm_surface_queue_reset(tbm_surface_queue_h
1711                         surface_queue, int width, int height, int format)
1712 {
1713         queue_node *node = NULL, *tmp;
1714
1715         _tbm_surf_queue_mutex_lock();
1716         _tbm_set_last_result(TBM_ERROR_NONE);
1717
1718         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1719                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1720
1721         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1722
1723         if (width == surface_queue->width && height == surface_queue->height &&
1724                 format == surface_queue->format) {
1725                 _tbm_surf_queue_mutex_unlock();
1726                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1727         }
1728
1729         pthread_mutex_lock(&surface_queue->lock);
1730
1731         surface_queue->width = width;
1732         surface_queue->height = height;
1733         surface_queue->format = format;
1734
1735         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1736                 /* Destory surface and Push to free_queue */
1737                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1738                         _queue_delete_node(surface_queue, node);
1739
1740                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1741                         node->delete_pending = 1;
1742         } else {
1743                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1744                         _queue_delete_node(surface_queue, node);
1745
1746                 _queue_init(&surface_queue->dirty_queue);
1747                 LIST_INITHEAD(&surface_queue->list);
1748         }
1749
1750         /* Reset queue */
1751         _queue_init(&surface_queue->free_queue);
1752
1753         surface_queue->num_attached = 0;
1754
1755         if (surface_queue->impl && surface_queue->impl->reset)
1756                 surface_queue->impl->reset(surface_queue);
1757
1758         pthread_mutex_unlock(&surface_queue->lock);
1759         pthread_cond_signal(&surface_queue->free_cond);
1760
1761         _tbm_surf_queue_mutex_unlock();
1762
1763         _notify_emit(surface_queue, &surface_queue->reset_noti);
1764
1765         return TBM_SURFACE_QUEUE_ERROR_NONE;
1766 }
1767
1768 tbm_surface_queue_error_e
1769 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1770 {
1771         _tbm_surf_queue_mutex_lock();
1772         _tbm_set_last_result(TBM_ERROR_NONE);
1773
1774         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1775                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1776
1777         _tbm_surf_queue_mutex_unlock();
1778
1779         _notify_emit(surface_queue, &surface_queue->reset_noti);
1780
1781         return TBM_SURFACE_QUEUE_ERROR_NONE;
1782 }
1783
1784 tbm_surface_queue_error_e
1785 tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
1786 {
1787         _tbm_surf_queue_mutex_lock();
1788         _tbm_set_last_result(TBM_ERROR_NONE);
1789
1790         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1791                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1792
1793         pthread_mutex_lock(&surface_queue->lock);
1794         pthread_mutex_unlock(&surface_queue->lock);
1795         pthread_cond_signal(&surface_queue->free_cond);
1796
1797         _tbm_surf_queue_mutex_unlock();
1798
1799         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1800
1801         return TBM_SURFACE_QUEUE_ERROR_NONE;
1802 }
1803
1804 tbm_surface_queue_error_e
1805 tbm_surface_queue_set_size(tbm_surface_queue_h
1806                         surface_queue, int queue_size, int flush)
1807 {
1808         queue_node *node = NULL, *tmp;
1809
1810         _tbm_surf_queue_mutex_lock();
1811         _tbm_set_last_result(TBM_ERROR_NONE);
1812
1813         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1814                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1815         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1816                                         TBM_ERROR_INVALID_PARAMETER);
1817
1818         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1819
1820         if ((surface_queue->queue_size == queue_size) && !flush) {
1821                 _tbm_surf_queue_mutex_unlock();
1822                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1823         }
1824
1825         pthread_mutex_lock(&surface_queue->lock);
1826
1827         if (flush) {
1828                 surface_queue->queue_size = queue_size;
1829
1830                 if (surface_queue->num_attached == 0) {
1831                         pthread_mutex_unlock(&surface_queue->lock);
1832                         _tbm_surf_queue_mutex_unlock();
1833                         return TBM_SURFACE_QUEUE_ERROR_NONE;
1834                 }
1835
1836                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1837                         /* Destory surface and Push to free_queue */
1838                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1839                                 _queue_delete_node(surface_queue, node);
1840
1841                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1842                                 node->delete_pending = 1;
1843                 } else {
1844                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1845                                 _queue_delete_node(surface_queue, node);
1846
1847                         _queue_init(&surface_queue->dirty_queue);
1848                         LIST_INITHEAD(&surface_queue->list);
1849                 }
1850
1851                 /* Reset queue */
1852                 _queue_init(&surface_queue->free_queue);
1853
1854                 surface_queue->num_attached = 0;
1855
1856                 if (surface_queue->impl && surface_queue->impl->reset)
1857                         surface_queue->impl->reset(surface_queue);
1858
1859                 pthread_mutex_unlock(&surface_queue->lock);
1860                 pthread_cond_signal(&surface_queue->free_cond);
1861
1862                 _tbm_surf_queue_mutex_unlock();
1863
1864                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1865
1866                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1867         } else {
1868                 if (surface_queue->queue_size > queue_size) {
1869                         int need_del = surface_queue->queue_size - queue_size;
1870
1871                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1872                                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1873
1874                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1875                                         surface_queue->impl->need_detach(surface_queue, node);
1876                                 else
1877                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1878
1879                                 need_del--;
1880                                 if (need_del == 0)
1881                                         break;
1882                         }
1883                 }
1884
1885                 surface_queue->queue_size = queue_size;
1886
1887                 pthread_mutex_unlock(&surface_queue->lock);
1888
1889                 _tbm_surf_queue_mutex_unlock();
1890
1891                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1892         }
1893 }
1894
1895 tbm_surface_queue_error_e
1896 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1897 {
1898         queue_node *node = NULL;
1899         int is_guarantee_cycle = 0;
1900
1901         _tbm_surf_queue_mutex_lock();
1902         _tbm_set_last_result(TBM_ERROR_NONE);
1903
1904         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1905                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1906
1907         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1908
1909         if (surface_queue->num_attached == 0) {
1910                 _tbm_surf_queue_mutex_unlock();
1911                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1912         }
1913
1914         pthread_mutex_lock(&surface_queue->lock);
1915
1916         /* Destory surface in free_queue */
1917         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1918                 if (surface_queue->impl && surface_queue->impl->need_detach)
1919                         surface_queue->impl->need_detach(surface_queue, node);
1920                 else
1921                         _tbm_surface_queue_detach(surface_queue, node->surface);
1922         }
1923
1924         /* Reset queue */
1925         _queue_init(&surface_queue->free_queue);
1926
1927         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
1928                 is_guarantee_cycle = 1;
1929
1930         pthread_mutex_unlock(&surface_queue->lock);
1931         _tbm_surf_queue_mutex_unlock();
1932
1933         if (is_guarantee_cycle)
1934                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1935
1936         return TBM_SURFACE_QUEUE_ERROR_NONE;
1937 }
1938
1939 tbm_surface_queue_error_e
1940 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1941 {
1942         queue_node *node = NULL, *tmp;
1943
1944         _tbm_surf_queue_mutex_lock();
1945         _tbm_set_last_result(TBM_ERROR_NONE);
1946
1947         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1948                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1949
1950         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1951
1952         if (surface_queue->num_attached == 0) {
1953                 _tbm_surf_queue_mutex_unlock();
1954                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1955         }
1956
1957         pthread_mutex_lock(&surface_queue->lock);
1958
1959         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1960                 /* Destory surface and Push to free_queue */
1961                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1962                         _queue_delete_node(surface_queue, node);
1963
1964                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1965                         node->delete_pending = 1;
1966         } else {
1967                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1968                         _queue_delete_node(surface_queue, node);
1969
1970                 _queue_init(&surface_queue->dirty_queue);
1971                 LIST_INITHEAD(&surface_queue->list);
1972         }
1973
1974         /* Reset queue */
1975         _queue_init(&surface_queue->free_queue);
1976
1977         surface_queue->num_attached = 0;
1978
1979         if (surface_queue->impl && surface_queue->impl->reset)
1980                 surface_queue->impl->reset(surface_queue);
1981
1982         pthread_mutex_unlock(&surface_queue->lock);
1983         pthread_cond_signal(&surface_queue->free_cond);
1984
1985         _tbm_surf_queue_mutex_unlock();
1986
1987         _notify_emit(surface_queue, &surface_queue->reset_noti);
1988
1989         return TBM_SURFACE_QUEUE_ERROR_NONE;
1990 }
1991
1992 tbm_surface_queue_error_e
1993 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
1994                         tbm_surface_h *surfaces, int *num)
1995 {
1996         queue_node *node = NULL;
1997
1998         _tbm_surf_queue_mutex_lock();
1999         _tbm_set_last_result(TBM_ERROR_NONE);
2000
2001         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2002                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2003         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2004                                TBM_ERROR_INVALID_PARAMETER);
2005
2006         *num = 0;
2007
2008         pthread_mutex_lock(&surface_queue->lock);
2009
2010         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
2011                 if (node->delete_pending) continue;
2012
2013                 if (surfaces)
2014                         surfaces[*num] = node->surface;
2015
2016                 *num = *num + 1;
2017         }
2018
2019         pthread_mutex_unlock(&surface_queue->lock);
2020
2021         _tbm_surf_queue_mutex_unlock();
2022
2023         return TBM_SURFACE_QUEUE_ERROR_NONE;
2024 }
2025
2026 tbm_surface_queue_error_e
2027 tbm_surface_queue_get_acquirable_surfaces(tbm_surface_queue_h surface_queue,
2028                         tbm_surface_h *surfaces, int *num)
2029 {
2030         queue_node *node = NULL;
2031
2032         _tbm_surf_queue_mutex_lock();
2033
2034         *num = 0;
2035
2036         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2037                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2038         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2039                                TBM_ERROR_INVALID_PARAMETER);
2040
2041         pthread_mutex_lock(&surface_queue->lock);
2042
2043         LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head, item_link) {
2044                 if (surfaces)
2045                         surfaces[*num] = node->surface;
2046
2047                 *num = *num + 1;
2048         }
2049
2050         pthread_mutex_unlock(&surface_queue->lock);
2051
2052         _tbm_surf_queue_mutex_unlock();
2053
2054         return TBM_SURFACE_QUEUE_ERROR_NONE;
2055 }
2056
2057 tbm_surface_queue_error_e
2058 tbm_surface_queue_get_trace_surface_num(
2059                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
2060 {
2061         _tbm_surf_queue_mutex_lock();
2062         _tbm_set_last_result(TBM_ERROR_NONE);
2063
2064         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2065                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2066         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2067                                TBM_ERROR_INVALID_PARAMETER);
2068
2069         *num = 0;
2070
2071         pthread_mutex_lock(&surface_queue->lock);
2072
2073         switch (trace) {
2074         case TBM_SURFACE_QUEUE_TRACE_NONE:
2075                 *num = 0;
2076                 break;
2077         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
2078                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2079                 break;
2080         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
2081                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2082                 break;
2083         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
2084                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
2085                 break;
2086         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
2087                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
2088                 break;
2089         default:
2090                 break;
2091         }
2092
2093         pthread_mutex_unlock(&surface_queue->lock);
2094
2095         _tbm_surf_queue_mutex_unlock();
2096
2097         return TBM_SURFACE_QUEUE_ERROR_NONE;
2098 }
2099
2100 static const tbm_surface_queue_interface tbm_queue_default_impl = {
2101         NULL,                           /*__tbm_queue_default_init*/
2102         NULL,                           /*__tbm_queue_default_reset*/
2103         NULL,                           /*__tbm_queue_default_destroy*/
2104         NULL,                           /*__tbm_queue_default_need_attach*/
2105         NULL,                           /*__tbm_queue_default_enqueue*/
2106         NULL,                           /*__tbm_queue_default_release*/
2107         NULL,                           /*__tbm_queue_default_dequeue*/
2108         NULL,                           /*__tbm_queue_default_acquire*/
2109         NULL,                           /*__tbm_queue_default_need_detach*/
2110 };
2111
2112 tbm_surface_queue_h
2113 tbm_surface_queue_create(int queue_size, int width,
2114                          int height, int format, int flags)
2115 {
2116         _tbm_surf_queue_mutex_lock();
2117         _tbm_set_last_result(TBM_ERROR_NONE);
2118
2119         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2120         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2121         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2122         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2123
2124         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2125                                             sizeof(struct _tbm_surface_queue));
2126         if (!surface_queue) {
2127                 TBM_ERR("cannot allocate the surface_queue.\n");
2128                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2129                 _tbm_surf_queue_mutex_unlock();
2130                 return NULL;
2131         }
2132
2133         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2134
2135         _tbm_surface_queue_init(surface_queue,
2136                                 queue_size,
2137                                 width, height, format, flags,
2138                                 &tbm_queue_default_impl, NULL);
2139
2140         _tbm_surf_queue_mutex_unlock();
2141
2142         return surface_queue;
2143 }
2144
2145 typedef struct {
2146         queue dequeue_list;
2147 } tbm_queue_sequence;
2148
2149 static void
2150 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2151 {
2152         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2153
2154         _queue_init(&data->dequeue_list);
2155 }
2156
2157 static void
2158 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2159 {
2160         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2161
2162         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2163                 return;
2164
2165         _queue_init(&data->dequeue_list);
2166 }
2167
2168 static void
2169 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2170 {
2171         free(surface_queue->impl_data);
2172 }
2173
2174 static void
2175 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2176                              queue_node *node)
2177 {
2178         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2179         queue_node *first = NULL;
2180
2181         first = container_of(data->dequeue_list.head.next, first, item_link);
2182         if (first != node) {
2183                 return;
2184         }
2185
2186         node->priv_flags = 0;
2187
2188         _queue_node_pop(&data->dequeue_list, node);
2189         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2190 }
2191
2192 static void
2193 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2194                                 queue_node *node)
2195 {
2196         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2197
2198         if (node->priv_flags) {
2199                 node->priv_flags = 0;
2200                 _queue_node_pop(&data->dequeue_list, node);
2201         }
2202
2203         _tbm_surface_queue_release(surface_queue, node, 1);
2204 }
2205
2206 static queue_node *
2207 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2208                              surface_queue)
2209 {
2210         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2211         queue_node *node;
2212
2213         node = _tbm_surface_queue_dequeue(surface_queue);
2214         if (node) {
2215                 _queue_node_push_back(&data->dequeue_list, node);
2216                 node->priv_flags = 1;
2217         }
2218
2219         return node;
2220 }
2221
2222 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2223         __tbm_queue_sequence_init,
2224         __tbm_queue_sequence_reset,
2225         __tbm_queue_sequence_destroy,
2226         NULL,
2227         __tbm_queue_sequence_enqueue,
2228         __tbm_queue_sequence_release,
2229         __tbm_queue_sequence_dequeue,
2230         NULL,                                   /*__tbm_queue_sequence_acquire*/
2231         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2232 };
2233
2234 tbm_surface_queue_h
2235 tbm_surface_queue_sequence_create(int queue_size, int width,
2236                                   int height, int format, int flags)
2237 {
2238         _tbm_surf_queue_mutex_lock();
2239         _tbm_set_last_result(TBM_ERROR_NONE);
2240
2241         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2242         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2243         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2244         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2245
2246         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2247                                             sizeof(struct _tbm_surface_queue));
2248         if (surface_queue == NULL) {
2249                 TBM_ERR("cannot allocate the surface_queue.\n");
2250                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2251                 _tbm_surf_queue_mutex_unlock();
2252                 return NULL;
2253         }
2254
2255         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2256
2257         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2258                                    sizeof(tbm_queue_sequence));
2259         if (data == NULL) {
2260                 TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
2261                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2262                 free(surface_queue);
2263                 _tbm_surf_queue_mutex_unlock();
2264                 return NULL;
2265         }
2266
2267         _tbm_surface_queue_init(surface_queue,
2268                                 queue_size,
2269                                 width, height, format, flags,
2270                                 &tbm_queue_sequence_impl, data);
2271
2272         _tbm_surf_queue_mutex_unlock();
2273
2274         return surface_queue;
2275 }
2276
2277 tbm_surface_queue_error_e
2278 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2279                                   int modes)
2280 {
2281         _tbm_surf_queue_mutex_lock();
2282         _tbm_set_last_result(TBM_ERROR_NONE);
2283
2284         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2285                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2286
2287         pthread_mutex_lock(&surface_queue->lock);
2288
2289         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2290                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2291         else
2292                 surface_queue->modes |= modes;
2293
2294         pthread_mutex_unlock(&surface_queue->lock);
2295
2296         _tbm_surf_queue_mutex_unlock();
2297
2298         return TBM_SURFACE_QUEUE_ERROR_NONE;
2299 }
2300
2301 tbm_surface_queue_error_e
2302 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2303                                   unsigned int sync_count)
2304 {
2305         int dequeue_num, enqueue_num;
2306
2307         _tbm_surf_queue_mutex_lock();
2308         _tbm_set_last_result(TBM_ERROR_NONE);
2309
2310         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2311                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2312
2313         pthread_mutex_lock(&surface_queue->lock);
2314
2315         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2316         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2317
2318         if (dequeue_num + sync_count == 0)
2319                 surface_queue->acquire_sync_count = enqueue_num;
2320         else
2321                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2322
2323         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2324                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2325
2326         pthread_mutex_unlock(&surface_queue->lock);
2327
2328         _tbm_surf_queue_mutex_unlock();
2329
2330         return TBM_SURFACE_QUEUE_ERROR_NONE;
2331 }