tbm_surface_queue: attach and check surface when free_cond is awakened
[platform/core/uifw/libtbm.git] / src / tbm_surface_queue.c
1 /**************************************************************************
2
3 libtbm
4
5 Copyright 2014 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8 Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
9
10 Permission is hereby granted, free of charge, to any person obtaining a
11 copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sub license, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial portions
20 of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
23 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
25 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
26 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 #include "config.h"
33
34 #include "tbm_bufmgr_int.h"
35 #include "list.h"
36 #include <time.h>
37
38 #define FREE_QUEUE      1
39 #define DIRTY_QUEUE     2
40 #define NODE_LIST       4
41
42 static tbm_bufmgr g_surf_queue_bufmgr;
43 static pthread_mutex_t tbm_surf_queue_lock;
44 void _tbm_surface_queue_mutex_unlock(void);
45
46 /* check condition */
47 #define TBM_SURF_QUEUE_RETURN_IF_FAIL(cond) {\
48         if (!(cond)) {\
49                 TBM_ERR("'%s' failed.\n", #cond);\
50                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
51                 _tbm_surf_queue_mutex_unlock();\
52                 return;\
53         } \
54 }
55
56 #define TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(cond, val) {\
57         if (!(cond)) {\
58                 TBM_ERR("'%s' failed.\n", #cond);\
59                 _tbm_set_last_result(TBM_ERROR_INVALID_PARAMETER);\
60                 _tbm_surf_queue_mutex_unlock();\
61                 return val;\
62         } \
63 }
64
65 typedef enum _queue_node_type {
66         QUEUE_NODE_TYPE_NONE,
67         QUEUE_NODE_TYPE_DEQUEUE,
68         QUEUE_NODE_TYPE_ENQUEUE,
69         QUEUE_NODE_TYPE_ACQUIRE,
70         QUEUE_NODE_TYPE_RELEASE
71 } Queue_Node_Type;
72
73 typedef struct {
74         struct list_head head;
75         int count;
76 } queue;
77
78 typedef struct {
79         tbm_surface_h surface;
80
81         struct list_head item_link;
82         struct list_head link;
83
84         Queue_Node_Type type;
85
86         unsigned int priv_flags;        /*for each queue*/
87
88         int delete_pending;
89 } queue_node;
90
91 typedef struct {
92         struct list_head link;
93
94         tbm_surface_queue_notify_cb cb;
95         void *data;
96 } queue_notify;
97
98 typedef struct {
99         struct list_head link;
100
101         tbm_surface_queue_trace_cb cb;
102         void *data;
103 } queue_trace;
104
105 typedef struct _tbm_surface_queue_interface {
106         void (*init)(tbm_surface_queue_h queue);
107         void (*reset)(tbm_surface_queue_h queue);
108         void (*destroy)(tbm_surface_queue_h queue);
109         void (*need_attach)(tbm_surface_queue_h queue);
110
111         void (*enqueue)(tbm_surface_queue_h queue, queue_node *node);
112         void (*release)(tbm_surface_queue_h queue, queue_node *node);
113         queue_node *(*dequeue)(tbm_surface_queue_h queue);
114         queue_node *(*acquire)(tbm_surface_queue_h queue);
115         void (*need_detach)(tbm_surface_queue_h queue, queue_node *node);
116 } tbm_surface_queue_interface;
117
118 struct _tbm_surface_queue {
119         int width;
120         int height;
121         int format;
122         int queue_size;
123         int flags;
124         int num_attached;
125
126         queue free_queue;
127         queue dirty_queue;
128         struct list_head list;
129
130         struct list_head destory_noti;
131         struct list_head dequeuable_noti;
132         struct list_head dequeue_noti;
133         struct list_head can_dequeue_noti;
134         struct list_head acquirable_noti;
135         struct list_head reset_noti;
136         struct list_head trace_noti;
137
138         pthread_mutex_t lock;
139         pthread_cond_t free_cond;
140         pthread_cond_t dirty_cond;
141
142         const tbm_surface_queue_interface *impl;
143         void *impl_data;
144
145         //For external buffer allocation
146         tbm_surface_alloc_cb alloc_cb;
147         tbm_surface_free_cb free_cb;
148         void *alloc_cb_data;
149
150         struct list_head item_link; /* link of surface queue */
151
152         int modes;
153         unsigned int enqueue_sync_count;
154         unsigned int acquire_sync_count;
155 };
156
157 static bool
158 _tbm_surf_queue_mutex_init(void)
159 {
160         static bool tbm_surf_queue_mutex_init = false;
161
162         if (tbm_surf_queue_mutex_init)
163                 return true;
164
165         if (pthread_mutex_init(&tbm_surf_queue_lock, NULL)) {
166                 TBM_ERR("fail: pthread_mutex_init\n");
167                 return false;
168         }
169
170         tbm_surf_queue_mutex_init = true;
171
172         return true;
173 }
174
175 static void
176 _tbm_surf_queue_mutex_lock(void)
177 {
178         if (!_tbm_surf_queue_mutex_init()) {
179                 TBM_ERR("fail: _tbm_surf_queue_mutex_init\n");
180                 return;
181         }
182
183         pthread_mutex_lock(&tbm_surf_queue_lock);
184 }
185
186 static void
187 _tbm_surf_queue_mutex_unlock(void)
188 {
189         pthread_mutex_unlock(&tbm_surf_queue_lock);
190 }
191
192 static void
193 _init_tbm_surf_queue_bufmgr(void)
194 {
195         g_surf_queue_bufmgr = tbm_bufmgr_init(-1);
196 }
197
198 static void
199 _deinit_tbm_surf_queue_bufmgr(void)
200 {
201         if (!g_surf_queue_bufmgr)
202                 return;
203
204         tbm_bufmgr_deinit(g_surf_queue_bufmgr);
205         g_surf_queue_bufmgr = NULL;
206 }
207
208 static int
209 _tbm_surface_queue_is_valid(tbm_surface_queue_h surface_queue)
210 {
211         tbm_surface_queue_h old_data = NULL;
212
213         if (surface_queue == NULL) {
214                 TBM_ERR("error: surface_queue is NULL.\n");
215                 return 0;
216         }
217
218         if (g_surf_queue_bufmgr == NULL) {
219                 TBM_ERR("error: g_surf_queue_bufmgr is NULL.\n");
220                 return 0;
221         }
222
223         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list)) {
224                 TBM_ERR("error: surf_queue_list is empty\n");
225                 return 0;
226         }
227
228         LIST_FOR_EACH_ENTRY(old_data, &g_surf_queue_bufmgr->surf_queue_list,
229                                 item_link) {
230                 if (old_data == surface_queue)
231                         return 1;
232         }
233
234         TBM_ERR("error: Invalid tbm_surface_queue(%p)\n", surface_queue);
235
236         return 0;
237 }
238
239 static queue_node *
240 _queue_node_create(void)
241 {
242         queue_node *node = (queue_node *) calloc(1, sizeof(queue_node));
243
244         TBM_RETURN_VAL_IF_FAIL(node != NULL, NULL);
245
246         return node;
247 }
248
249 static void
250 _queue_node_delete(queue_node *node)
251 {
252         LIST_DEL(&node->item_link);
253         LIST_DEL(&node->link);
254         free(node);
255 }
256
257 static int
258 _queue_is_empty(queue *queue)
259 {
260         if (LIST_IS_EMPTY(&queue->head))
261                 return 1;
262
263         return 0;
264 }
265
266 static void
267 _queue_node_push_back(queue *queue, queue_node *node)
268 {
269         LIST_ADDTAIL(&node->item_link, &queue->head);
270         queue->count++;
271 }
272
273 static void
274 _queue_node_push_front(queue *queue, queue_node *node)
275 {
276         LIST_ADD(&node->item_link, &queue->head);
277         queue->count++;
278 }
279
280 static queue_node *
281 _queue_node_pop_front(queue *queue)
282 {
283         queue_node *node;
284
285         if (!queue->head.next) return NULL;
286         if (!queue->count) return NULL;
287
288         node = LIST_ENTRY(queue_node, queue->head.next, item_link);
289
290         LIST_DELINIT(&node->item_link);
291         queue->count--;
292
293         return node;
294 }
295
296 static queue_node *
297 _queue_node_pop(queue *queue, queue_node *node)
298 {
299         LIST_DELINIT(&node->item_link);
300         queue->count--;
301
302         return node;
303 }
304
305 static queue_node *
306 _queue_get_node(tbm_surface_queue_h surface_queue, int type,
307                 tbm_surface_h surface, int *out_type)
308 {
309         queue_node *node = NULL;
310
311         if (type == 0)
312                 type = FREE_QUEUE | DIRTY_QUEUE | NODE_LIST;
313         if (out_type)
314                 *out_type = 0;
315
316         if (type & FREE_QUEUE) {
317                 LIST_FOR_EACH_ENTRY(node, &surface_queue->free_queue.head,
318                                          item_link) {
319                         if (node->surface == surface) {
320                                 if (out_type)
321                                         *out_type = FREE_QUEUE;
322
323                                 return node;
324                         }
325                 }
326         }
327
328         if (type & DIRTY_QUEUE) {
329                 LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head,
330                                          item_link) {
331                         if (node->surface == surface) {
332                                 if (out_type)
333                                         *out_type = DIRTY_QUEUE;
334
335                                 return node;
336                         }
337                 }
338         }
339
340         if (type & NODE_LIST) {
341                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
342                         if (node->surface == surface) {
343                                 if (out_type)
344                                         *out_type = NODE_LIST;
345
346                                 return node;
347                         }
348                 }
349         }
350
351         TBM_ERR("fail to get the queue_node.\n");
352
353         return NULL;
354 }
355
356 static void
357 _queue_delete_node(tbm_surface_queue_h surface_queue, queue_node *node)
358 {
359         if (node->surface) {
360                 if (surface_queue->free_cb) {
361                         surface_queue->free_cb(surface_queue,
362                                         surface_queue->alloc_cb_data,
363                                         node->surface);
364                 }
365
366                 tbm_surface_destroy(node->surface);
367         }
368
369         _queue_node_delete(node);
370 }
371
372 static void
373 _queue_init(queue *queue)
374 {
375         LIST_INITHEAD(&queue->head);
376
377         queue->count = 0;
378 }
379
380 static void
381 _notify_add(struct list_head *list, tbm_surface_queue_notify_cb cb,
382             void *data)
383 {
384         TBM_RETURN_IF_FAIL(cb != NULL);
385
386         queue_notify *item = (queue_notify *)calloc(1, sizeof(queue_notify));
387
388         TBM_RETURN_IF_FAIL(item != NULL);
389
390         LIST_INITHEAD(&item->link);
391         item->cb = cb;
392         item->data = data;
393
394         LIST_ADDTAIL(&item->link, list);
395 }
396
397 static void
398 _notify_remove(struct list_head *list,
399                tbm_surface_queue_notify_cb cb, void *data)
400 {
401         queue_notify *item = NULL, *tmp;
402
403         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
404                 if (item->cb == cb && item->data == data) {
405                         LIST_DEL(&item->link);
406                         free(item);
407                         return;
408                 }
409         }
410
411         TBM_ERR("Cannot find notifiy\n");
412 }
413
414 static void
415 _notify_remove_all(struct list_head *list)
416 {
417         queue_notify *item = NULL, *tmp;
418
419         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
420                 LIST_DEL(&item->link);
421                 free(item);
422         }
423 }
424
425 static void
426 _notify_emit(tbm_surface_queue_h surface_queue,
427              struct list_head *list)
428 {
429         queue_notify *item = NULL, *tmp;;
430
431         /*
432                 The item->cb is the outside function of the libtbm.
433                 The tbm user may/can remove the item of the list,
434                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
435         */
436         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
437                 item->cb(surface_queue, item->data);
438 }
439
440 static void
441 _trace_add(struct list_head *list, tbm_surface_queue_trace_cb cb,
442             void *data)
443 {
444         TBM_RETURN_IF_FAIL(cb != NULL);
445
446         queue_trace *item = (queue_trace *)calloc(1, sizeof(queue_trace));
447
448         TBM_RETURN_IF_FAIL(item != NULL);
449
450         LIST_INITHEAD(&item->link);
451         item->cb = cb;
452         item->data = data;
453
454         LIST_ADDTAIL(&item->link, list);
455 }
456
457 static void
458 _trace_remove(struct list_head *list,
459                tbm_surface_queue_trace_cb cb, void *data)
460 {
461         queue_trace *item = NULL, *tmp;
462
463         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
464                 if (item->cb == cb && item->data == data) {
465                         LIST_DEL(&item->link);
466                         free(item);
467                         return;
468                 }
469         }
470
471         TBM_ERR("Cannot find notifiy\n");
472 }
473
474 static void
475 _trace_remove_all(struct list_head *list)
476 {
477         queue_trace *item = NULL, *tmp;
478
479         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link) {
480                 LIST_DEL(&item->link);
481                 free(item);
482         }
483 }
484
485 static void
486 _trace_emit(tbm_surface_queue_h surface_queue,
487              struct list_head *list, tbm_surface_h surface, tbm_surface_queue_trace trace)
488 {
489         queue_trace *item = NULL, *tmp;;
490
491         /*
492                 The item->cb is the outside function of the libtbm.
493                 The tbm user may/can remove the item of the list,
494                 so we have to use the LIST_FOR_EACH_ENTRY_SAFE.
495         */
496         LIST_FOR_EACH_ENTRY_SAFE(item, tmp, list, link)
497                 item->cb(surface_queue, surface, trace, item->data);
498 }
499
500 static int
501 _tbm_surface_queue_get_node_count(tbm_surface_queue_h surface_queue, Queue_Node_Type type)
502 {
503         queue_node *node = NULL;
504         int count = 0;
505
506         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
507                 if (node->type == type)
508                         count++;
509         }
510
511         return count;
512 }
513
514 static void
515 _tbm_surface_queue_attach(tbm_surface_queue_h surface_queue,
516                           tbm_surface_h surface)
517 {
518         queue_node *node;
519
520         node = _queue_node_create();
521         TBM_RETURN_IF_FAIL(node != NULL);
522
523         tbm_surface_internal_ref(surface);
524         node->surface = surface;
525
526         LIST_ADDTAIL(&node->link, &surface_queue->list);
527         surface_queue->num_attached++;
528         _queue_node_push_back(&surface_queue->free_queue, node);
529 }
530
531 static void
532 _tbm_surface_queue_need_attach(tbm_surface_queue_h surface_queue)
533 {
534         tbm_surface_h surface;
535
536         if (surface_queue->queue_size == surface_queue->num_attached)
537                 return;
538
539         if (surface_queue->alloc_cb) {
540                 surface = surface_queue->alloc_cb(surface_queue, surface_queue->alloc_cb_data);
541
542                 /* silent return */
543                 if (!surface)
544                         return;
545
546                 tbm_surface_internal_ref(surface);
547         } else {
548                 surface = tbm_surface_internal_create_with_flags(surface_queue->width,
549                                 surface_queue->height,
550                                 surface_queue->format,
551                                 surface_queue->flags);
552                 TBM_RETURN_IF_FAIL(surface != NULL);
553         }
554
555         _tbm_surface_queue_attach(surface_queue, surface);
556         tbm_surface_internal_unref(surface);
557 }
558
559 static void
560 _tbm_surface_queue_detach(tbm_surface_queue_h surface_queue,
561                           tbm_surface_h surface)
562 {
563         queue_node *node;
564         int queue_type;
565
566         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
567         if (node) {
568                 _queue_delete_node(surface_queue, node);
569                 surface_queue->num_attached--;
570         }
571 }
572
573 static void
574 _tbm_surface_queue_enqueue(tbm_surface_queue_h surface_queue,
575                            queue_node *node, int push_back)
576 {
577         if (push_back)
578                 _queue_node_push_back(&surface_queue->dirty_queue, node);
579         else
580                 _queue_node_push_front(&surface_queue->dirty_queue, node);
581 }
582
583 static queue_node *
584 _tbm_surface_queue_dequeue(tbm_surface_queue_h surface_queue)
585 {
586         queue_node *node;
587
588         node = _queue_node_pop_front(&surface_queue->free_queue);
589
590         return node;
591 }
592
593 static queue_node *
594 _tbm_surface_queue_acquire(tbm_surface_queue_h surface_queue)
595 {
596         queue_node *node;
597
598         if (_queue_is_empty(&surface_queue->dirty_queue))
599                 return NULL;
600
601         node = _queue_node_pop_front(&surface_queue->dirty_queue);
602
603         return node;
604 }
605
606 static void
607 _tbm_surface_queue_release(tbm_surface_queue_h surface_queue,
608                            queue_node *node, int push_back)
609 {
610         if (push_back)
611                 _queue_node_push_back(&surface_queue->free_queue, node);
612         else
613                 _queue_node_push_front(&surface_queue->free_queue, node);
614 }
615
616 static void
617 _tbm_surface_queue_init(tbm_surface_queue_h surface_queue,
618                         int queue_size,
619                         int width, int height, int format, int flags,
620                         const tbm_surface_queue_interface *impl, void *data)
621 {
622         pthread_condattr_t free_attr, dirty_attr;
623
624         TBM_RETURN_IF_FAIL(surface_queue != NULL);
625         TBM_RETURN_IF_FAIL(impl != NULL);
626
627         if (!g_surf_queue_bufmgr)
628                 _init_tbm_surf_queue_bufmgr();
629
630         pthread_mutex_init(&surface_queue->lock, NULL);
631
632         pthread_condattr_init(&free_attr);
633         pthread_condattr_setclock(&free_attr, CLOCK_MONOTONIC);
634         pthread_cond_init(&surface_queue->free_cond, &free_attr);
635         pthread_condattr_destroy(&free_attr);
636
637         pthread_condattr_init(&dirty_attr);
638         pthread_condattr_setclock(&dirty_attr, CLOCK_MONOTONIC);
639         pthread_cond_init(&surface_queue->dirty_cond, &dirty_attr);
640         pthread_condattr_destroy(&dirty_attr);
641
642         surface_queue->queue_size = queue_size;
643         surface_queue->width = width;
644         surface_queue->height = height;
645         surface_queue->format = format;
646         surface_queue->flags = flags;
647         surface_queue->impl = impl;
648         surface_queue->impl_data = data;
649         surface_queue->modes = TBM_SURFACE_QUEUE_MODE_NONE;
650
651         _queue_init(&surface_queue->free_queue);
652         _queue_init(&surface_queue->dirty_queue);
653         LIST_INITHEAD(&surface_queue->list);
654
655         LIST_INITHEAD(&surface_queue->destory_noti);
656         LIST_INITHEAD(&surface_queue->dequeuable_noti);
657         LIST_INITHEAD(&surface_queue->dequeue_noti);
658         LIST_INITHEAD(&surface_queue->can_dequeue_noti);
659         LIST_INITHEAD(&surface_queue->acquirable_noti);
660         LIST_INITHEAD(&surface_queue->reset_noti);
661         LIST_INITHEAD(&surface_queue->trace_noti);
662
663         if (surface_queue->impl && surface_queue->impl->init)
664                 surface_queue->impl->init(surface_queue);
665
666         LIST_ADD(&surface_queue->item_link, &g_surf_queue_bufmgr->surf_queue_list);
667 }
668
669 tbm_surface_queue_error_e
670 tbm_surface_queue_add_destroy_cb(
671         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
672         void *data)
673 {
674         _tbm_surf_queue_mutex_lock();
675         _tbm_set_last_result(TBM_ERROR_NONE);
676
677         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
678                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
679         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(destroy_cb,
680                                TBM_ERROR_INVALID_PARAMETER);
681
682         pthread_mutex_lock(&surface_queue->lock);
683
684         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
685
686         _notify_add(&surface_queue->destory_noti, destroy_cb, data);
687
688         pthread_mutex_unlock(&surface_queue->lock);
689
690         _tbm_surf_queue_mutex_unlock();
691
692         return TBM_SURFACE_QUEUE_ERROR_NONE;
693 }
694
695 tbm_surface_queue_error_e
696 tbm_surface_queue_remove_destroy_cb(
697         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb destroy_cb,
698         void *data)
699 {
700         _tbm_surf_queue_mutex_lock();
701         _tbm_set_last_result(TBM_ERROR_NONE);
702
703         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
704                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
705
706         pthread_mutex_lock(&surface_queue->lock);
707
708         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
709
710         _notify_remove(&surface_queue->destory_noti, destroy_cb, data);
711
712         pthread_mutex_unlock(&surface_queue->lock);
713
714         _tbm_surf_queue_mutex_unlock();
715
716         return TBM_SURFACE_QUEUE_ERROR_NONE;
717 }
718
719 tbm_surface_queue_error_e
720 tbm_surface_queue_add_dequeuable_cb(
721         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
722         void *data)
723 {
724         _tbm_surf_queue_mutex_lock();
725         _tbm_set_last_result(TBM_ERROR_NONE);
726
727         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
728                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
729         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeuable_cb,
730                                TBM_ERROR_INVALID_PARAMETER);
731
732         pthread_mutex_lock(&surface_queue->lock);
733
734         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
735
736         _notify_add(&surface_queue->dequeuable_noti, dequeuable_cb, data);
737
738         pthread_mutex_unlock(&surface_queue->lock);
739
740         _tbm_surf_queue_mutex_unlock();
741
742         return TBM_SURFACE_QUEUE_ERROR_NONE;
743 }
744
745 tbm_surface_queue_error_e
746 tbm_surface_queue_remove_dequeuable_cb(
747         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeuable_cb,
748         void *data)
749 {
750         _tbm_surf_queue_mutex_lock();
751         _tbm_set_last_result(TBM_ERROR_NONE);
752
753         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
754                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
755
756         pthread_mutex_lock(&surface_queue->lock);
757
758         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
759
760         _notify_remove(&surface_queue->dequeuable_noti, dequeuable_cb, data);
761
762         pthread_mutex_unlock(&surface_queue->lock);
763
764         _tbm_surf_queue_mutex_unlock();
765
766         return TBM_SURFACE_QUEUE_ERROR_NONE;
767 }
768
769 tbm_surface_queue_error_e
770 tbm_surface_queue_add_dequeue_cb(
771         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
772         void *data)
773 {
774         _tbm_surf_queue_mutex_lock();
775         _tbm_set_last_result(TBM_ERROR_NONE);
776
777         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
778                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
779         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(dequeue_cb,
780                                TBM_ERROR_INVALID_PARAMETER);
781
782         pthread_mutex_lock(&surface_queue->lock);
783
784         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
785
786         _notify_add(&surface_queue->dequeue_noti, dequeue_cb, data);
787
788         pthread_mutex_unlock(&surface_queue->lock);
789
790         _tbm_surf_queue_mutex_unlock();
791
792         return TBM_SURFACE_QUEUE_ERROR_NONE;
793 }
794
795 tbm_surface_queue_error_e
796 tbm_surface_queue_remove_dequeue_cb(
797         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb dequeue_cb,
798         void *data)
799 {
800         _tbm_surf_queue_mutex_lock();
801         _tbm_set_last_result(TBM_ERROR_NONE);
802
803         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
804                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
805
806         pthread_mutex_lock(&surface_queue->lock);
807
808         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
809
810         _notify_remove(&surface_queue->dequeue_noti, dequeue_cb, data);
811
812         pthread_mutex_unlock(&surface_queue->lock);
813
814         _tbm_surf_queue_mutex_unlock();
815
816         return TBM_SURFACE_QUEUE_ERROR_NONE;
817 }
818
819 tbm_surface_queue_error_e
820 tbm_surface_queue_add_can_dequeue_cb(
821         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
822         void *data)
823 {
824         _tbm_surf_queue_mutex_lock();
825         _tbm_set_last_result(TBM_ERROR_NONE);
826
827         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
828                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
829         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(can_dequeue_cb,
830                                TBM_ERROR_INVALID_PARAMETER);
831
832         pthread_mutex_lock(&surface_queue->lock);
833
834         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
835
836         _notify_add(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
837
838         pthread_mutex_unlock(&surface_queue->lock);
839
840         _tbm_surf_queue_mutex_unlock();
841
842         return TBM_SURFACE_QUEUE_ERROR_NONE;
843 }
844
845 tbm_surface_queue_error_e
846 tbm_surface_queue_remove_can_dequeue_cb(
847         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb can_dequeue_cb,
848         void *data)
849 {
850         _tbm_surf_queue_mutex_lock();
851         _tbm_set_last_result(TBM_ERROR_NONE);
852
853         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
854                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
855
856         pthread_mutex_lock(&surface_queue->lock);
857
858         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
859
860         _notify_remove(&surface_queue->can_dequeue_noti, can_dequeue_cb, data);
861
862         pthread_mutex_unlock(&surface_queue->lock);
863
864         _tbm_surf_queue_mutex_unlock();
865
866         return TBM_SURFACE_QUEUE_ERROR_NONE;
867 }
868
869 tbm_surface_queue_error_e
870 tbm_surface_queue_add_acquirable_cb(
871         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
872         void *data)
873 {
874         _tbm_surf_queue_mutex_lock();
875         _tbm_set_last_result(TBM_ERROR_NONE);
876
877         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
878                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
879         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(acquirable_cb,
880                                TBM_ERROR_INVALID_PARAMETER);
881
882         pthread_mutex_lock(&surface_queue->lock);
883
884         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
885
886         _notify_add(&surface_queue->acquirable_noti, acquirable_cb, data);
887
888         pthread_mutex_unlock(&surface_queue->lock);
889
890         _tbm_surf_queue_mutex_unlock();
891
892         return TBM_SURFACE_QUEUE_ERROR_NONE;
893 }
894
895 tbm_surface_queue_error_e
896 tbm_surface_queue_remove_acquirable_cb(
897         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb acquirable_cb,
898         void *data)
899 {
900         _tbm_surf_queue_mutex_lock();
901         _tbm_set_last_result(TBM_ERROR_NONE);
902
903         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
904                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
905
906         pthread_mutex_lock(&surface_queue->lock);
907
908         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
909
910         _notify_remove(&surface_queue->acquirable_noti, acquirable_cb, data);
911
912         pthread_mutex_unlock(&surface_queue->lock);
913
914         _tbm_surf_queue_mutex_unlock();
915
916         return TBM_SURFACE_QUEUE_ERROR_NONE;
917 }
918
919 tbm_surface_queue_error_e
920 tbm_surface_queue_add_trace_cb(
921         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
922         void *data)
923 {
924         _tbm_surf_queue_mutex_lock();
925         _tbm_set_last_result(TBM_ERROR_NONE);
926
927         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
928                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
929         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(trace_cb,
930                                TBM_ERROR_INVALID_PARAMETER);
931
932         pthread_mutex_lock(&surface_queue->lock);
933
934         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
935
936         _trace_add(&surface_queue->trace_noti, trace_cb, data);
937
938         pthread_mutex_unlock(&surface_queue->lock);
939
940         _tbm_surf_queue_mutex_unlock();
941
942         return TBM_SURFACE_QUEUE_ERROR_NONE;
943 }
944
945 tbm_surface_queue_error_e
946 tbm_surface_queue_remove_trace_cb(
947         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace_cb trace_cb,
948         void *data)
949 {
950         _tbm_surf_queue_mutex_lock();
951         _tbm_set_last_result(TBM_ERROR_NONE);
952
953         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
954                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
955
956         pthread_mutex_lock(&surface_queue->lock);
957
958         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
959
960         _trace_remove(&surface_queue->trace_noti, trace_cb, data);
961
962         pthread_mutex_unlock(&surface_queue->lock);
963
964         _tbm_surf_queue_mutex_unlock();
965
966         return TBM_SURFACE_QUEUE_ERROR_NONE;
967 }
968
969 tbm_surface_queue_error_e
970 tbm_surface_queue_set_alloc_cb(
971         tbm_surface_queue_h surface_queue,
972         tbm_surface_alloc_cb alloc_cb,
973         tbm_surface_free_cb free_cb,
974         void *data)
975 {
976         _tbm_surf_queue_mutex_lock();
977         _tbm_set_last_result(TBM_ERROR_NONE);
978
979         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
980                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
981
982         pthread_mutex_lock(&surface_queue->lock);
983
984         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
985
986         surface_queue->alloc_cb = alloc_cb;
987         surface_queue->free_cb = free_cb;
988         surface_queue->alloc_cb_data = data;
989
990         pthread_mutex_unlock(&surface_queue->lock);
991
992         _tbm_surf_queue_mutex_unlock();
993
994         return TBM_SURFACE_QUEUE_ERROR_NONE;
995 }
996
997 int
998 tbm_surface_queue_get_width(tbm_surface_queue_h surface_queue)
999 {
1000         int width;
1001
1002         _tbm_surf_queue_mutex_lock();
1003         _tbm_set_last_result(TBM_ERROR_NONE);
1004
1005         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1006
1007         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1008
1009         width = surface_queue->width;
1010
1011         _tbm_surf_queue_mutex_unlock();
1012
1013         return width;
1014 }
1015
1016 int
1017 tbm_surface_queue_get_height(tbm_surface_queue_h surface_queue)
1018 {
1019         int height;
1020
1021         _tbm_surf_queue_mutex_lock();
1022         _tbm_set_last_result(TBM_ERROR_NONE);
1023
1024         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1025
1026         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1027
1028         height = surface_queue->height;
1029
1030         _tbm_surf_queue_mutex_unlock();
1031
1032         return height;
1033 }
1034
1035 int
1036 tbm_surface_queue_get_format(tbm_surface_queue_h surface_queue)
1037 {
1038         int format;
1039
1040         _tbm_surf_queue_mutex_lock();
1041         _tbm_set_last_result(TBM_ERROR_NONE);
1042
1043         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1044
1045         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1046
1047         format = surface_queue->format;
1048
1049         _tbm_surf_queue_mutex_unlock();
1050
1051         return format;
1052 }
1053
1054 int
1055 tbm_surface_queue_get_size(tbm_surface_queue_h surface_queue)
1056 {
1057         int queue_size;
1058
1059         _tbm_surf_queue_mutex_lock();
1060         _tbm_set_last_result(TBM_ERROR_NONE);
1061
1062         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1063
1064         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1065
1066         queue_size = surface_queue->queue_size;
1067
1068         _tbm_surf_queue_mutex_unlock();
1069
1070         return queue_size;
1071 }
1072
1073 tbm_surface_queue_error_e
1074 tbm_surface_queue_add_reset_cb(
1075         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1076         void *data)
1077 {
1078         _tbm_surf_queue_mutex_lock();
1079         _tbm_set_last_result(TBM_ERROR_NONE);
1080
1081         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1082                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1083         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(reset_cb,
1084                                TBM_ERROR_INVALID_PARAMETER);
1085
1086         pthread_mutex_lock(&surface_queue->lock);
1087
1088         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1089
1090         _notify_add(&surface_queue->reset_noti, reset_cb, data);
1091
1092         pthread_mutex_unlock(&surface_queue->lock);
1093
1094         _tbm_surf_queue_mutex_unlock();
1095
1096         return TBM_SURFACE_QUEUE_ERROR_NONE;
1097 }
1098
1099 tbm_surface_queue_error_e
1100 tbm_surface_queue_remove_reset_cb(
1101         tbm_surface_queue_h surface_queue, tbm_surface_queue_notify_cb reset_cb,
1102         void *data)
1103 {
1104         _tbm_surf_queue_mutex_lock();
1105         _tbm_set_last_result(TBM_ERROR_NONE);
1106
1107         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1108                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1109
1110         pthread_mutex_lock(&surface_queue->lock);
1111
1112         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1113
1114         _notify_remove(&surface_queue->reset_noti, reset_cb, data);
1115
1116         pthread_mutex_unlock(&surface_queue->lock);
1117
1118         _tbm_surf_queue_mutex_unlock();
1119
1120         return TBM_SURFACE_QUEUE_ERROR_NONE;
1121 }
1122
1123 tbm_surface_queue_error_e
1124 tbm_surface_queue_enqueue(tbm_surface_queue_h
1125                           surface_queue, tbm_surface_h surface)
1126 {
1127         queue_node *node;
1128         int queue_type;
1129
1130         _tbm_surf_queue_mutex_lock();
1131         _tbm_set_last_result(TBM_ERROR_NONE);
1132
1133         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1134                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1135         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1136                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1137
1138         if (b_dump_queue)
1139                 tbm_surface_internal_dump_buffer(surface, "enqueue");
1140
1141         pthread_mutex_lock(&surface_queue->lock);
1142
1143         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1144
1145         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1146         if (node == NULL || queue_type != NODE_LIST) {
1147                 TBM_ERR("tbm_surface_queue_enqueue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1148                         node, queue_type);
1149                 pthread_mutex_unlock(&surface_queue->lock);
1150
1151                 _tbm_surf_queue_mutex_unlock();
1152
1153                 if (!node) {
1154                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1155                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1156                 } else {
1157                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1158                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1159                 }
1160         }
1161
1162         if (surface_queue->impl && surface_queue->impl->enqueue)
1163                 surface_queue->impl->enqueue(surface_queue, node);
1164         else
1165                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1166
1167         if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
1168                 TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
1169                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE);
1170                 pthread_mutex_unlock(&surface_queue->lock);
1171
1172                 _tbm_surf_queue_mutex_unlock();
1173                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE;
1174         }
1175
1176         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1177
1178         if (surface_queue->enqueue_sync_count == 1) {
1179                 tbm_surface_info_s info;
1180                 int ret;
1181
1182                 ret = tbm_surface_map(surface, TBM_SURF_OPTION_READ, &info);
1183                 if (ret == TBM_SURFACE_ERROR_NONE)
1184                         tbm_surface_unmap(surface);
1185         }
1186
1187         if (surface_queue->enqueue_sync_count > 0) surface_queue->enqueue_sync_count--;
1188
1189         pthread_mutex_unlock(&surface_queue->lock);
1190         pthread_cond_signal(&surface_queue->dirty_cond);
1191
1192         _tbm_surf_queue_mutex_unlock();
1193
1194         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_ENQUEUE);
1195
1196         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1197
1198         return TBM_SURFACE_QUEUE_ERROR_NONE;
1199 }
1200
1201 tbm_surface_queue_error_e
1202 tbm_surface_queue_cancel_dequeue(tbm_surface_queue_h
1203                           surface_queue, tbm_surface_h surface)
1204 {
1205         queue_node *node;
1206         int queue_type;
1207
1208         _tbm_surf_queue_mutex_lock();
1209         _tbm_set_last_result(TBM_ERROR_NONE);
1210
1211         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1212                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1213         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1214                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1215
1216         pthread_mutex_lock(&surface_queue->lock);
1217
1218         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1219
1220         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1221         if (node == NULL || queue_type != NODE_LIST) {
1222                 TBM_ERR("tbm_surface_queue_cancel_dequeue::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1223                         node, queue_type);
1224                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1225                 pthread_mutex_unlock(&surface_queue->lock);
1226
1227                 _tbm_surf_queue_mutex_unlock();
1228                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1229         }
1230
1231         if (node->delete_pending) {
1232                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1233
1234                 _queue_delete_node(surface_queue, node);
1235
1236                 pthread_mutex_unlock(&surface_queue->lock);
1237
1238                 _tbm_surf_queue_mutex_unlock();
1239
1240                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1241
1242                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1243         }
1244
1245         if (surface_queue->queue_size < surface_queue->num_attached) {
1246                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1247
1248                 if (surface_queue->impl && surface_queue->impl->need_detach)
1249                         surface_queue->impl->need_detach(surface_queue, node);
1250                 else
1251                         _tbm_surface_queue_detach(surface_queue, surface);
1252
1253                 pthread_mutex_unlock(&surface_queue->lock);
1254
1255                 _tbm_surf_queue_mutex_unlock();
1256
1257                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1258
1259                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1260         }
1261
1262         if (surface_queue->impl && surface_queue->impl->release)
1263                 surface_queue->impl->release(surface_queue, node);
1264         else
1265                 _tbm_surface_queue_release(surface_queue, node, 1);
1266
1267         if (_queue_is_empty(&surface_queue->free_queue)) {
1268                 TBM_ERR("surface_queue->free_queue is empty.\n");
1269                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1270                 pthread_mutex_unlock(&surface_queue->lock);
1271
1272                 _tbm_surf_queue_mutex_unlock();
1273                 return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
1274         }
1275
1276         node->type = QUEUE_NODE_TYPE_RELEASE;
1277
1278         pthread_mutex_unlock(&surface_queue->lock);
1279         pthread_cond_signal(&surface_queue->free_cond);
1280
1281         _tbm_surf_queue_mutex_unlock();
1282
1283         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
1284
1285         return TBM_SURFACE_QUEUE_ERROR_NONE;
1286 }
1287
1288 tbm_surface_queue_error_e
1289 tbm_surface_queue_dequeue(tbm_surface_queue_h
1290                           surface_queue, tbm_surface_h *surface)
1291 {
1292         queue_node *node;
1293
1294         _tbm_surf_queue_mutex_lock();
1295         _tbm_set_last_result(TBM_ERROR_NONE);
1296
1297         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1298                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1299         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1300                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1301
1302         *surface = NULL;
1303
1304         pthread_mutex_lock(&surface_queue->lock);
1305
1306         if (_queue_is_empty(&surface_queue->free_queue)) {
1307                 if (surface_queue->impl && surface_queue->impl->need_attach)
1308                         surface_queue->impl->need_attach(surface_queue);
1309                 else
1310                         _tbm_surface_queue_need_attach(surface_queue);
1311         }
1312
1313         if (surface_queue->impl && surface_queue->impl->dequeue)
1314                 node = surface_queue->impl->dequeue(surface_queue);
1315         else
1316                 node = _tbm_surface_queue_dequeue(surface_queue);
1317
1318         if (node == NULL || node->surface == NULL) {
1319                 TBM_ERR("_queue_node_pop_front failed\n");
1320                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1321                 pthread_mutex_unlock(&surface_queue->lock);
1322
1323                 _tbm_surf_queue_mutex_unlock();
1324                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1325         }
1326
1327         node->type = QUEUE_NODE_TYPE_DEQUEUE;
1328         *surface = node->surface;
1329
1330         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1331
1332         pthread_mutex_unlock(&surface_queue->lock);
1333
1334         _tbm_surf_queue_mutex_unlock();
1335
1336         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_DEQUEUE);
1337
1338         _notify_emit(surface_queue, &surface_queue->dequeue_noti);
1339
1340         return TBM_SURFACE_QUEUE_ERROR_NONE;
1341 }
1342
1343 tbm_surface_queue_error_e
1344 tbm_surface_queue_can_dequeue_wait_timeout(tbm_surface_queue_h surface_queue, int ms_timeout)
1345 {
1346         int ret;
1347         struct timespec tp;
1348
1349         _tbm_surf_queue_mutex_lock();
1350         _tbm_set_last_result(TBM_ERROR_NONE);
1351
1352         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1353                                                                           TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1354
1355         _tbm_surf_queue_mutex_unlock();
1356
1357         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1358
1359         _tbm_surf_queue_mutex_lock();
1360
1361         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1362                                                                           TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1363
1364         pthread_mutex_lock(&surface_queue->lock);
1365
1366         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1367
1368         if (_queue_is_empty(&surface_queue->free_queue)) {
1369                 if (surface_queue->impl && surface_queue->impl->need_attach)
1370                         surface_queue->impl->need_attach(surface_queue);
1371                 else
1372                         _tbm_surface_queue_need_attach(surface_queue);
1373         }
1374
1375         if (!_queue_is_empty(&surface_queue->free_queue)) {
1376                 pthread_mutex_unlock(&surface_queue->lock);
1377                 _tbm_surf_queue_mutex_unlock();
1378                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1379         }
1380
1381         _tbm_surf_queue_mutex_unlock();
1382
1383         while (1) {
1384                 clock_gettime(CLOCK_MONOTONIC, &tp);
1385
1386                 if (ms_timeout > 1000)
1387                         tp.tv_sec += ms_timeout / 1000;
1388
1389                 tp.tv_nsec += (ms_timeout % 1000) * 1000000;
1390
1391                 if (tp.tv_nsec > 1000000000L) {
1392                         tp.tv_sec++;
1393                         tp.tv_nsec -= 1000000000L;
1394                 }
1395
1396                 ret = pthread_cond_timedwait(&surface_queue->free_cond, &surface_queue->lock, &tp);
1397                 if (ret) {
1398                         if (ret == ETIMEDOUT) {
1399                                 TBM_ERR("surface_queue:%p can dequeue wait timeout", surface_queue);
1400                                 pthread_mutex_unlock(&surface_queue->lock);
1401                                 return TBM_SURFACE_QUEUE_ERROR_TIMEOUT;
1402                         } else {
1403                                 TBM_INFO("surface_queue:%p timedwait error retry wait", surface_queue);
1404                         }
1405                 } else {
1406                         if (surface_queue->impl && surface_queue->impl->need_attach)
1407                                 surface_queue->impl->need_attach(surface_queue);
1408                         else
1409                                 _tbm_surface_queue_need_attach(surface_queue);
1410
1411                         if (!_queue_is_empty(&surface_queue->free_queue)) {
1412                                 pthread_mutex_unlock(&surface_queue->lock);
1413                                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1414                         }
1415                 }
1416         }
1417 }
1418
1419 int
1420 tbm_surface_queue_can_dequeue(tbm_surface_queue_h surface_queue, int wait)
1421 {
1422         _tbm_surf_queue_mutex_lock();
1423         _tbm_set_last_result(TBM_ERROR_NONE);
1424
1425         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1426
1427         _tbm_surf_queue_mutex_unlock();
1428
1429         _notify_emit(surface_queue, &surface_queue->can_dequeue_noti);
1430
1431         _tbm_surf_queue_mutex_lock();
1432
1433         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1434
1435         pthread_mutex_lock(&surface_queue->lock);
1436
1437         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1438
1439         if (_queue_is_empty(&surface_queue->free_queue)) {
1440                 if (surface_queue->impl && surface_queue->impl->need_attach)
1441                         surface_queue->impl->need_attach(surface_queue);
1442                 else
1443                         _tbm_surface_queue_need_attach(surface_queue);
1444         }
1445
1446         if (!_queue_is_empty(&surface_queue->free_queue)) {
1447                 pthread_mutex_unlock(&surface_queue->lock);
1448                 _tbm_surf_queue_mutex_unlock();
1449                 return 1;
1450         }
1451
1452         if (wait) {
1453                 _tbm_surf_queue_mutex_unlock();
1454                 while (1) {
1455                         pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
1456
1457                         if (surface_queue->impl && surface_queue->impl->need_attach)
1458                                 surface_queue->impl->need_attach(surface_queue);
1459                         else
1460                                 _tbm_surface_queue_need_attach(surface_queue);
1461
1462                         if (!_queue_is_empty(&surface_queue->free_queue)) {
1463                                 pthread_mutex_unlock(&surface_queue->lock);
1464                                 return 1;
1465                         }
1466                 }
1467         }
1468
1469         pthread_mutex_unlock(&surface_queue->lock);
1470         _tbm_surf_queue_mutex_unlock();
1471         return 0;
1472 }
1473
1474 tbm_surface_queue_error_e
1475 tbm_surface_queue_release(tbm_surface_queue_h
1476                           surface_queue, tbm_surface_h surface)
1477 {
1478         queue_node *node;
1479         int queue_type;
1480
1481         _tbm_surf_queue_mutex_lock();
1482         _tbm_set_last_result(TBM_ERROR_NONE);
1483
1484         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1485                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1486         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1487                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1488
1489         pthread_mutex_lock(&surface_queue->lock);
1490
1491         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1492
1493         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1494         if (node == NULL || queue_type != NODE_LIST) {
1495                 TBM_ERR("tbm_surface_queue_release::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1496                         node, queue_type);
1497                 pthread_mutex_unlock(&surface_queue->lock);
1498
1499                 _tbm_surf_queue_mutex_unlock();
1500
1501                 if (!node) {
1502                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1503                         return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1504                 } else {
1505                         _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1506                         return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1507                 }
1508         }
1509
1510         if (node->delete_pending) {
1511                 TBM_TRACE_SURFACE_QUEUE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1512
1513                 _queue_delete_node(surface_queue, node);
1514
1515                 pthread_mutex_unlock(&surface_queue->lock);
1516
1517                 _tbm_surf_queue_mutex_unlock();
1518
1519                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1520
1521                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1522         }
1523
1524         if (surface_queue->queue_size < surface_queue->num_attached) {
1525                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1526
1527                 if (surface_queue->impl && surface_queue->impl->need_detach)
1528                         surface_queue->impl->need_detach(surface_queue, node);
1529                 else
1530                         _tbm_surface_queue_detach(surface_queue, surface);
1531
1532                 pthread_mutex_unlock(&surface_queue->lock);
1533
1534                 _tbm_surf_queue_mutex_unlock();
1535
1536                 _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1537
1538                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1539         }
1540
1541         if (surface_queue->impl && surface_queue->impl->release)
1542                 surface_queue->impl->release(surface_queue, node);
1543         else
1544                 _tbm_surface_queue_release(surface_queue, node, 1);
1545
1546         if (!_queue_get_node(surface_queue, FREE_QUEUE, surface, NULL)) {
1547                 TBM_ERR("release surface(%p) but surface isn't present in the free_queue\n", surface);
1548                 _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
1549                 pthread_mutex_unlock(&surface_queue->lock);
1550
1551                 _tbm_surf_queue_mutex_unlock();
1552                 return TBM_SURFACE_ERROR_INVALID_OPERATION;
1553         }
1554
1555         node->type = QUEUE_NODE_TYPE_RELEASE;
1556
1557         pthread_mutex_unlock(&surface_queue->lock);
1558         pthread_cond_signal(&surface_queue->free_cond);
1559
1560         _tbm_surf_queue_mutex_unlock();
1561
1562         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
1563
1564         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1565
1566         return TBM_SURFACE_QUEUE_ERROR_NONE;
1567 }
1568
1569 tbm_surface_queue_error_e
1570 tbm_surface_queue_cancel_acquire(tbm_surface_queue_h
1571                         surface_queue, tbm_surface_h surface)
1572 {
1573         queue_node *node;
1574         int queue_type;
1575
1576         _tbm_surf_queue_mutex_lock();
1577         _tbm_set_last_result(TBM_ERROR_NONE);
1578
1579         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1580                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1581         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1582                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1583
1584         pthread_mutex_lock(&surface_queue->lock);
1585
1586         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, surface);
1587
1588         node = _queue_get_node(surface_queue, 0, surface, &queue_type);
1589         if (node == NULL || queue_type != NODE_LIST) {
1590                 TBM_ERR("tbm_surface_queue_cancel_acquire::Surface is existed in free_queue or dirty_queue node:%p, type:%d\n",
1591                         node, queue_type);
1592                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST);
1593                 pthread_mutex_unlock(&surface_queue->lock);
1594
1595                 _tbm_surf_queue_mutex_unlock();
1596                 return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
1597         }
1598
1599         if (surface_queue->impl && surface_queue->impl->enqueue)
1600                 surface_queue->impl->enqueue(surface_queue, node);
1601         else
1602                 _tbm_surface_queue_enqueue(surface_queue, node, 1);
1603
1604         if (_queue_is_empty(&surface_queue->dirty_queue)) {
1605                 TBM_ERR("enqueue surface but queue is empty node:%p\n", node);
1606                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE);
1607                 pthread_mutex_unlock(&surface_queue->lock);
1608
1609                 _tbm_surf_queue_mutex_unlock();
1610                 return TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE;
1611         }
1612
1613         node->type = QUEUE_NODE_TYPE_ENQUEUE;
1614
1615         pthread_mutex_unlock(&surface_queue->lock);
1616         pthread_cond_signal(&surface_queue->dirty_cond);
1617
1618         _tbm_surf_queue_mutex_unlock();
1619
1620         _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_ACQUIRE);
1621
1622         _notify_emit(surface_queue, &surface_queue->acquirable_noti);
1623
1624         return TBM_SURFACE_QUEUE_ERROR_NONE;
1625 }
1626
1627 tbm_surface_queue_error_e
1628 tbm_surface_queue_acquire(tbm_surface_queue_h
1629                           surface_queue, tbm_surface_h *surface)
1630 {
1631         queue_node *node;
1632
1633         _tbm_surf_queue_mutex_lock();
1634         _tbm_set_last_result(TBM_ERROR_NONE);
1635
1636         *surface = NULL;
1637
1638         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1639                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1640         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(surface != NULL,
1641                                TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE);
1642
1643         pthread_mutex_lock(&surface_queue->lock);
1644
1645         if (surface_queue->impl && surface_queue->impl->acquire)
1646                 node = surface_queue->impl->acquire(surface_queue);
1647         else
1648                 node = _tbm_surface_queue_acquire(surface_queue);
1649
1650         if (node == NULL || node->surface == NULL) {
1651                 TBM_ERR("_queue_node_pop_front failed\n");
1652                 _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_EMPTY);
1653                 pthread_mutex_unlock(&surface_queue->lock);
1654
1655                 _tbm_surf_queue_mutex_unlock();
1656                 return TBM_SURFACE_QUEUE_ERROR_EMPTY;
1657         }
1658
1659         node->type = QUEUE_NODE_TYPE_ACQUIRE;
1660
1661         *surface = node->surface;
1662
1663         if (surface_queue->acquire_sync_count == 1) {
1664                 tbm_surface_info_s info;
1665                 int ret;
1666
1667                 TBM_ERR("start map surface:%p", *surface);
1668                 ret = tbm_surface_map(*surface, TBM_SURF_OPTION_READ, &info);
1669                 TBM_ERR("end map surface:%p", *surface);
1670                 if (ret == TBM_SURFACE_ERROR_NONE)
1671                         tbm_surface_unmap(*surface);
1672         }
1673
1674         if (surface_queue->acquire_sync_count > 0) surface_queue->acquire_sync_count--;
1675
1676         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) tbm_surface(%p)\n", surface_queue, *surface);
1677
1678         pthread_mutex_unlock(&surface_queue->lock);
1679
1680         _tbm_surf_queue_mutex_unlock();
1681
1682         if (b_dump_queue)
1683                 tbm_surface_internal_dump_buffer(*surface, "acquire");
1684
1685         _trace_emit(surface_queue, &surface_queue->trace_noti, *surface, TBM_SURFACE_QUEUE_TRACE_ACQUIRE);
1686
1687         return TBM_SURFACE_QUEUE_ERROR_NONE;
1688 }
1689
1690 int
1691 tbm_surface_queue_can_acquire(tbm_surface_queue_h surface_queue, int wait)
1692 {
1693         _tbm_surf_queue_mutex_lock();
1694         _tbm_set_last_result(TBM_ERROR_NONE);
1695
1696         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue), 0);
1697
1698         pthread_mutex_lock(&surface_queue->lock);
1699
1700         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1701
1702         if (!_queue_is_empty(&surface_queue->dirty_queue)) {
1703                 pthread_mutex_unlock(&surface_queue->lock);
1704                 _tbm_surf_queue_mutex_unlock();
1705                 return 1;
1706         }
1707
1708         if (wait && _tbm_surface_queue_get_node_count(surface_queue,
1709                                                 QUEUE_NODE_TYPE_DEQUEUE)) {
1710                 _tbm_surf_queue_mutex_unlock();
1711                 pthread_cond_wait(&surface_queue->dirty_cond, &surface_queue->lock);
1712                 pthread_mutex_unlock(&surface_queue->lock);
1713                 return 1;
1714         }
1715
1716         pthread_mutex_unlock(&surface_queue->lock);
1717         _tbm_surf_queue_mutex_unlock();
1718         return 0;
1719 }
1720
1721 void
1722 tbm_surface_queue_destroy(tbm_surface_queue_h surface_queue)
1723 {
1724         queue_node *node = NULL, *tmp;
1725
1726         _tbm_surf_queue_mutex_lock();
1727         _tbm_set_last_result(TBM_ERROR_NONE);
1728
1729         TBM_SURF_QUEUE_RETURN_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue));
1730
1731         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1732
1733         LIST_DEL(&surface_queue->item_link);
1734
1735         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1736                 _queue_delete_node(surface_queue, node);
1737
1738         if (surface_queue->impl && surface_queue->impl->destroy)
1739                 surface_queue->impl->destroy(surface_queue);
1740
1741         _notify_emit(surface_queue, &surface_queue->destory_noti);
1742
1743         _notify_remove_all(&surface_queue->destory_noti);
1744         _notify_remove_all(&surface_queue->dequeuable_noti);
1745         _notify_remove_all(&surface_queue->dequeue_noti);
1746         _notify_remove_all(&surface_queue->can_dequeue_noti);
1747         _notify_remove_all(&surface_queue->acquirable_noti);
1748         _notify_remove_all(&surface_queue->reset_noti);
1749         _trace_remove_all(&surface_queue->trace_noti);
1750
1751         pthread_mutex_destroy(&surface_queue->lock);
1752
1753         free(surface_queue);
1754
1755         if (LIST_IS_EMPTY(&g_surf_queue_bufmgr->surf_queue_list))
1756                 _deinit_tbm_surf_queue_bufmgr();
1757
1758         _tbm_surf_queue_mutex_unlock();
1759 }
1760
1761 tbm_surface_queue_error_e
1762 tbm_surface_queue_reset(tbm_surface_queue_h
1763                         surface_queue, int width, int height, int format)
1764 {
1765         queue_node *node = NULL, *tmp;
1766
1767         _tbm_surf_queue_mutex_lock();
1768         _tbm_set_last_result(TBM_ERROR_NONE);
1769
1770         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1771                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1772
1773         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1774
1775         if (width == surface_queue->width && height == surface_queue->height &&
1776                 format == surface_queue->format) {
1777                 _tbm_surf_queue_mutex_unlock();
1778                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1779         }
1780
1781         pthread_mutex_lock(&surface_queue->lock);
1782
1783         surface_queue->width = width;
1784         surface_queue->height = height;
1785         surface_queue->format = format;
1786
1787         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1788                 /* Destory surface and Push to free_queue */
1789                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1790                         _queue_delete_node(surface_queue, node);
1791
1792                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1793                         node->delete_pending = 1;
1794         } else {
1795                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1796                         _queue_delete_node(surface_queue, node);
1797
1798                 _queue_init(&surface_queue->dirty_queue);
1799                 LIST_INITHEAD(&surface_queue->list);
1800         }
1801
1802         /* Reset queue */
1803         _queue_init(&surface_queue->free_queue);
1804
1805         surface_queue->num_attached = 0;
1806
1807         if (surface_queue->impl && surface_queue->impl->reset)
1808                 surface_queue->impl->reset(surface_queue);
1809
1810         pthread_mutex_unlock(&surface_queue->lock);
1811         pthread_cond_signal(&surface_queue->free_cond);
1812
1813         _tbm_surf_queue_mutex_unlock();
1814
1815         _notify_emit(surface_queue, &surface_queue->reset_noti);
1816
1817         return TBM_SURFACE_QUEUE_ERROR_NONE;
1818 }
1819
1820 tbm_surface_queue_error_e
1821 tbm_surface_queue_notify_reset(tbm_surface_queue_h surface_queue)
1822 {
1823         _tbm_surf_queue_mutex_lock();
1824         _tbm_set_last_result(TBM_ERROR_NONE);
1825
1826         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1827                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1828
1829         _tbm_surf_queue_mutex_unlock();
1830
1831         _notify_emit(surface_queue, &surface_queue->reset_noti);
1832
1833         return TBM_SURFACE_QUEUE_ERROR_NONE;
1834 }
1835
1836 tbm_surface_queue_error_e
1837 tbm_surface_queue_notify_dequeuable(tbm_surface_queue_h surface_queue)
1838 {
1839         _tbm_surf_queue_mutex_lock();
1840         _tbm_set_last_result(TBM_ERROR_NONE);
1841
1842         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1843                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1844
1845         pthread_mutex_lock(&surface_queue->lock);
1846         pthread_mutex_unlock(&surface_queue->lock);
1847         pthread_cond_signal(&surface_queue->free_cond);
1848
1849         _tbm_surf_queue_mutex_unlock();
1850
1851         _notify_emit(surface_queue, &surface_queue->dequeuable_noti);
1852
1853         return TBM_SURFACE_QUEUE_ERROR_NONE;
1854 }
1855
1856 tbm_surface_queue_error_e
1857 tbm_surface_queue_set_size(tbm_surface_queue_h
1858                         surface_queue, int queue_size, int flush)
1859 {
1860         queue_node *node = NULL, *tmp;
1861
1862         _tbm_surf_queue_mutex_lock();
1863         _tbm_set_last_result(TBM_ERROR_NONE);
1864
1865         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1866                                         TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1867         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0,
1868                                         TBM_ERROR_INVALID_PARAMETER);
1869
1870         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1871
1872         if ((surface_queue->queue_size == queue_size) && !flush) {
1873                 _tbm_surf_queue_mutex_unlock();
1874                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1875         }
1876
1877         pthread_mutex_lock(&surface_queue->lock);
1878
1879         if (flush) {
1880                 surface_queue->queue_size = queue_size;
1881
1882                 if (surface_queue->num_attached == 0) {
1883                         pthread_mutex_unlock(&surface_queue->lock);
1884                         _tbm_surf_queue_mutex_unlock();
1885                         return TBM_SURFACE_QUEUE_ERROR_NONE;
1886                 }
1887
1888                 if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
1889                         /* Destory surface and Push to free_queue */
1890                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
1891                                 _queue_delete_node(surface_queue, node);
1892
1893                         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
1894                                 node->delete_pending = 1;
1895                 } else {
1896                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
1897                                 _queue_delete_node(surface_queue, node);
1898
1899                         _queue_init(&surface_queue->dirty_queue);
1900                         LIST_INITHEAD(&surface_queue->list);
1901                 }
1902
1903                 /* Reset queue */
1904                 _queue_init(&surface_queue->free_queue);
1905
1906                 surface_queue->num_attached = 0;
1907
1908                 if (surface_queue->impl && surface_queue->impl->reset)
1909                         surface_queue->impl->reset(surface_queue);
1910
1911                 pthread_mutex_unlock(&surface_queue->lock);
1912                 pthread_cond_signal(&surface_queue->free_cond);
1913
1914                 _tbm_surf_queue_mutex_unlock();
1915
1916                 _notify_emit(surface_queue, &surface_queue->reset_noti);
1917
1918                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1919         } else {
1920                 if (surface_queue->queue_size > queue_size) {
1921                         int need_del = surface_queue->queue_size - queue_size;
1922
1923                         LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link) {
1924                                 TBM_TRACE_SURFACE_QUEUE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
1925
1926                                 if (surface_queue->impl && surface_queue->impl->need_detach)
1927                                         surface_queue->impl->need_detach(surface_queue, node);
1928                                 else
1929                                         _tbm_surface_queue_detach(surface_queue, node->surface);
1930
1931                                 need_del--;
1932                                 if (need_del == 0)
1933                                         break;
1934                         }
1935                 }
1936
1937                 surface_queue->queue_size = queue_size;
1938
1939                 pthread_mutex_unlock(&surface_queue->lock);
1940
1941                 _tbm_surf_queue_mutex_unlock();
1942
1943                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1944         }
1945 }
1946
1947 tbm_surface_queue_error_e
1948 tbm_surface_queue_free_flush(tbm_surface_queue_h surface_queue)
1949 {
1950         queue_node *node = NULL;
1951
1952         _tbm_surf_queue_mutex_lock();
1953         _tbm_set_last_result(TBM_ERROR_NONE);
1954
1955         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1956                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1957
1958         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1959
1960         if (surface_queue->num_attached == 0) {
1961                 _tbm_surf_queue_mutex_unlock();
1962                 return TBM_SURFACE_QUEUE_ERROR_NONE;
1963         }
1964
1965         pthread_mutex_lock(&surface_queue->lock);
1966
1967         /* Destory surface in free_queue */
1968         while ((node = _queue_node_pop_front(&surface_queue->free_queue))) {
1969                 if (surface_queue->impl && surface_queue->impl->need_detach)
1970                         surface_queue->impl->need_detach(surface_queue, node);
1971                 else
1972                         _tbm_surface_queue_detach(surface_queue, node->surface);
1973         }
1974
1975         /* Reset queue */
1976         _queue_init(&surface_queue->free_queue);
1977
1978         pthread_mutex_unlock(&surface_queue->lock);
1979         _tbm_surf_queue_mutex_unlock();
1980
1981         return TBM_SURFACE_QUEUE_ERROR_NONE;
1982 }
1983
1984 tbm_surface_queue_error_e
1985 tbm_surface_queue_flush(tbm_surface_queue_h surface_queue)
1986 {
1987         queue_node *node = NULL, *tmp;
1988
1989         _tbm_surf_queue_mutex_lock();
1990         _tbm_set_last_result(TBM_ERROR_NONE);
1991
1992         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
1993                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
1994
1995         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
1996
1997         if (surface_queue->num_attached == 0) {
1998                 _tbm_surf_queue_mutex_unlock();
1999                 return TBM_SURFACE_QUEUE_ERROR_NONE;
2000         }
2001
2002         pthread_mutex_lock(&surface_queue->lock);
2003
2004         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
2005                 /* Destory surface and Push to free_queue */
2006                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
2007                         _queue_delete_node(surface_queue, node);
2008
2009                 LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
2010                         node->delete_pending = 1;
2011         } else {
2012                 LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
2013                         _queue_delete_node(surface_queue, node);
2014
2015                 _queue_init(&surface_queue->dirty_queue);
2016                 LIST_INITHEAD(&surface_queue->list);
2017         }
2018
2019         /* Reset queue */
2020         _queue_init(&surface_queue->free_queue);
2021
2022         surface_queue->num_attached = 0;
2023
2024         if (surface_queue->impl && surface_queue->impl->reset)
2025                 surface_queue->impl->reset(surface_queue);
2026
2027         pthread_mutex_unlock(&surface_queue->lock);
2028         pthread_cond_signal(&surface_queue->free_cond);
2029
2030         _tbm_surf_queue_mutex_unlock();
2031
2032         _notify_emit(surface_queue, &surface_queue->reset_noti);
2033
2034         return TBM_SURFACE_QUEUE_ERROR_NONE;
2035 }
2036
2037 tbm_surface_queue_error_e
2038 tbm_surface_queue_get_surfaces(tbm_surface_queue_h surface_queue,
2039                         tbm_surface_h *surfaces, int *num)
2040 {
2041         queue_node *node = NULL;
2042
2043         _tbm_surf_queue_mutex_lock();
2044         _tbm_set_last_result(TBM_ERROR_NONE);
2045
2046         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2047                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2048         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2049                                TBM_ERROR_INVALID_PARAMETER);
2050
2051         *num = 0;
2052
2053         pthread_mutex_lock(&surface_queue->lock);
2054
2055         LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link) {
2056                 if (node->delete_pending) continue;
2057
2058                 if (surfaces)
2059                         surfaces[*num] = node->surface;
2060
2061                 *num = *num + 1;
2062         }
2063
2064         pthread_mutex_unlock(&surface_queue->lock);
2065
2066         _tbm_surf_queue_mutex_unlock();
2067
2068         return TBM_SURFACE_QUEUE_ERROR_NONE;
2069 }
2070
2071 tbm_surface_queue_error_e
2072 tbm_surface_queue_get_acquirable_surfaces(tbm_surface_queue_h surface_queue,
2073                         tbm_surface_h *surfaces, int *num)
2074 {
2075         queue_node *node = NULL;
2076
2077         _tbm_surf_queue_mutex_lock();
2078
2079         *num = 0;
2080
2081         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2082                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2083         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2084                                TBM_ERROR_INVALID_PARAMETER);
2085
2086         pthread_mutex_lock(&surface_queue->lock);
2087
2088         LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head, item_link) {
2089                 if (surfaces)
2090                         surfaces[*num] = node->surface;
2091
2092                 *num = *num + 1;
2093         }
2094
2095         pthread_mutex_unlock(&surface_queue->lock);
2096
2097         _tbm_surf_queue_mutex_unlock();
2098
2099         return TBM_SURFACE_QUEUE_ERROR_NONE;
2100 }
2101
2102 tbm_surface_queue_error_e
2103 tbm_surface_queue_get_trace_surface_num(
2104                         tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
2105 {
2106         _tbm_surf_queue_mutex_lock();
2107         _tbm_set_last_result(TBM_ERROR_NONE);
2108
2109         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2110                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2111         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
2112                                TBM_ERROR_INVALID_PARAMETER);
2113
2114         *num = 0;
2115
2116         pthread_mutex_lock(&surface_queue->lock);
2117
2118         switch (trace) {
2119         case TBM_SURFACE_QUEUE_TRACE_NONE:
2120                 *num = 0;
2121                 break;
2122         case TBM_SURFACE_QUEUE_TRACE_DEQUEUE:
2123                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2124                 break;
2125         case TBM_SURFACE_QUEUE_TRACE_ENQUEUE:
2126                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2127                 break;
2128         case TBM_SURFACE_QUEUE_TRACE_ACQUIRE:
2129                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ACQUIRE);
2130                 break;
2131         case TBM_SURFACE_QUEUE_TRACE_RELEASE:
2132                 *num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_RELEASE);
2133                 break;
2134         default:
2135                 break;
2136         }
2137
2138         pthread_mutex_unlock(&surface_queue->lock);
2139
2140         _tbm_surf_queue_mutex_unlock();
2141
2142         return TBM_SURFACE_QUEUE_ERROR_NONE;
2143 }
2144
2145 static const tbm_surface_queue_interface tbm_queue_default_impl = {
2146         NULL,                           /*__tbm_queue_default_init*/
2147         NULL,                           /*__tbm_queue_default_reset*/
2148         NULL,                           /*__tbm_queue_default_destroy*/
2149         NULL,                           /*__tbm_queue_default_need_attach*/
2150         NULL,                           /*__tbm_queue_default_enqueue*/
2151         NULL,                           /*__tbm_queue_default_release*/
2152         NULL,                           /*__tbm_queue_default_dequeue*/
2153         NULL,                           /*__tbm_queue_default_acquire*/
2154         NULL,                           /*__tbm_queue_default_need_detach*/
2155 };
2156
2157 tbm_surface_queue_h
2158 tbm_surface_queue_create(int queue_size, int width,
2159                          int height, int format, int flags)
2160 {
2161         _tbm_surf_queue_mutex_lock();
2162         _tbm_set_last_result(TBM_ERROR_NONE);
2163
2164         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2165         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2166         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2167         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2168
2169         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2170                                             sizeof(struct _tbm_surface_queue));
2171         if (!surface_queue) {
2172                 TBM_ERR("cannot allocate the surface_queue.\n");
2173                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2174                 _tbm_surf_queue_mutex_unlock();
2175                 return NULL;
2176         }
2177
2178         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2179
2180         _tbm_surface_queue_init(surface_queue,
2181                                 queue_size,
2182                                 width, height, format, flags,
2183                                 &tbm_queue_default_impl, NULL);
2184
2185         _tbm_surf_queue_mutex_unlock();
2186
2187         return surface_queue;
2188 }
2189
2190 typedef struct {
2191         queue dequeue_list;
2192 } tbm_queue_sequence;
2193
2194 static void
2195 __tbm_queue_sequence_init(tbm_surface_queue_h surface_queue)
2196 {
2197         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2198
2199         _queue_init(&data->dequeue_list);
2200 }
2201
2202 static void
2203 __tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
2204 {
2205         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2206
2207         if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE)
2208                 return;
2209
2210         _queue_init(&data->dequeue_list);
2211 }
2212
2213 static void
2214 __tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
2215 {
2216         free(surface_queue->impl_data);
2217 }
2218
2219 static void
2220 __tbm_queue_sequence_enqueue(tbm_surface_queue_h surface_queue,
2221                              queue_node *node)
2222 {
2223         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2224         queue_node *first = NULL;
2225
2226         first = container_of(data->dequeue_list.head.next, first, item_link);
2227         if (first != node) {
2228                 return;
2229         }
2230
2231         node->priv_flags = 0;
2232
2233         _queue_node_pop(&data->dequeue_list, node);
2234         _tbm_surface_queue_enqueue(surface_queue, node, 1);
2235 }
2236
2237 static void
2238 __tbm_queue_sequence_release(tbm_surface_queue_h surface_queue,
2239                                 queue_node *node)
2240 {
2241         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2242
2243         if (node->priv_flags) {
2244                 node->priv_flags = 0;
2245                 _queue_node_pop(&data->dequeue_list, node);
2246         }
2247
2248         _tbm_surface_queue_release(surface_queue, node, 1);
2249 }
2250
2251 static queue_node *
2252 __tbm_queue_sequence_dequeue(tbm_surface_queue_h
2253                              surface_queue)
2254 {
2255         tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
2256         queue_node *node;
2257
2258         node = _tbm_surface_queue_dequeue(surface_queue);
2259         if (node) {
2260                 _queue_node_push_back(&data->dequeue_list, node);
2261                 node->priv_flags = 1;
2262         }
2263
2264         return node;
2265 }
2266
2267 static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
2268         __tbm_queue_sequence_init,
2269         __tbm_queue_sequence_reset,
2270         __tbm_queue_sequence_destroy,
2271         NULL,
2272         __tbm_queue_sequence_enqueue,
2273         __tbm_queue_sequence_release,
2274         __tbm_queue_sequence_dequeue,
2275         NULL,                                   /*__tbm_queue_sequence_acquire*/
2276         NULL,                                   /*__tbm_queue_sequence_need_dettach*/
2277 };
2278
2279 tbm_surface_queue_h
2280 tbm_surface_queue_sequence_create(int queue_size, int width,
2281                                   int height, int format, int flags)
2282 {
2283         _tbm_surf_queue_mutex_lock();
2284         _tbm_set_last_result(TBM_ERROR_NONE);
2285
2286         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(queue_size > 0, NULL);
2287         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(width > 0, NULL);
2288         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(height > 0, NULL);
2289         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(format > 0, NULL);
2290
2291         tbm_surface_queue_h surface_queue = (tbm_surface_queue_h) calloc(1,
2292                                             sizeof(struct _tbm_surface_queue));
2293         if (surface_queue == NULL) {
2294                 TBM_ERR("cannot allocate the surface_queue.\n");
2295                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2296                 _tbm_surf_queue_mutex_unlock();
2297                 return NULL;
2298         }
2299
2300         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p)\n", surface_queue);
2301
2302         tbm_queue_sequence *data = (tbm_queue_sequence *) calloc(1,
2303                                    sizeof(tbm_queue_sequence));
2304         if (data == NULL) {
2305                 TBM_ERR("cannot allocate the tbm_queue_sequence.\n");
2306                 _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
2307                 free(surface_queue);
2308                 _tbm_surf_queue_mutex_unlock();
2309                 return NULL;
2310         }
2311
2312         _tbm_surface_queue_init(surface_queue,
2313                                 queue_size,
2314                                 width, height, format, flags,
2315                                 &tbm_queue_sequence_impl, data);
2316
2317         _tbm_surf_queue_mutex_unlock();
2318
2319         return surface_queue;
2320 }
2321
2322 tbm_surface_queue_error_e
2323 tbm_surface_queue_set_modes(tbm_surface_queue_h surface_queue,
2324                                   int modes)
2325 {
2326         _tbm_surf_queue_mutex_lock();
2327         _tbm_set_last_result(TBM_ERROR_NONE);
2328
2329         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2330                                TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2331
2332         pthread_mutex_lock(&surface_queue->lock);
2333
2334         if (modes == TBM_SURFACE_QUEUE_MODE_NONE)
2335                 modes = TBM_SURFACE_QUEUE_MODE_NONE;
2336         else
2337                 surface_queue->modes |= modes;
2338
2339         pthread_mutex_unlock(&surface_queue->lock);
2340
2341         _tbm_surf_queue_mutex_unlock();
2342
2343         return TBM_SURFACE_QUEUE_ERROR_NONE;
2344 }
2345
2346 tbm_surface_queue_error_e
2347 tbm_surface_queue_set_sync_count(tbm_surface_queue_h surface_queue,
2348                                   unsigned int sync_count)
2349 {
2350         int dequeue_num, enqueue_num;
2351
2352         _tbm_surf_queue_mutex_lock();
2353         _tbm_set_last_result(TBM_ERROR_NONE);
2354
2355         TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
2356                                    TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
2357
2358         pthread_mutex_lock(&surface_queue->lock);
2359
2360         dequeue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_DEQUEUE);
2361         enqueue_num = _tbm_surface_queue_get_node_count(surface_queue, QUEUE_NODE_TYPE_ENQUEUE);
2362
2363         if (dequeue_num + sync_count == 0)
2364                 surface_queue->acquire_sync_count = enqueue_num;
2365         else
2366                 surface_queue->enqueue_sync_count = dequeue_num + sync_count;
2367
2368         TBM_TRACE_SURFACE_QUEUE("tbm_surface_queue(%p) enqueue_sync_count:(%d) acquire_sync_count:(%d)\n",
2369                                 surface_queue, surface_queue->enqueue_sync_count, surface_queue->acquire_sync_count);
2370
2371         pthread_mutex_unlock(&surface_queue->lock);
2372
2373         _tbm_surf_queue_mutex_unlock();
2374
2375         return TBM_SURFACE_QUEUE_ERROR_NONE;
2376 }