Queue_Node_Type type;
unsigned int priv_flags; /*for each queue*/
+
+ int delete_pending;
} queue_node;
typedef struct {
return TBM_SURFACE_QUEUE_ERROR_INVALID_SURFACE;
}
+ if (node->delete_pending) {
+ TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+
+ _queue_delete_node(surface_queue, node);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_CANCEL_DEQUEUE);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+
if (surface_queue->queue_size < surface_queue->num_attached) {
TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
return TBM_SURFACE_QUEUE_ERROR_ALREADY_EXIST;
}
+ if (node->delete_pending) {
+ TBM_QUEUE_TRACE("delete pending tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
+
+ _queue_delete_node(surface_queue, node);
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+
if (surface_queue->queue_size < surface_queue->num_attached) {
TBM_QUEUE_TRACE("deatch tbm_surface_queue(%p) surface(%p)\n", surface_queue, node->surface);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
+
+ _trace_emit(surface_queue, &surface_queue->trace_noti, surface, TBM_SURFACE_QUEUE_TRACE_RELEASE);
+
return TBM_SURFACE_QUEUE_ERROR_NONE;
}
surface_queue->format = format;
/* Destory surface and Push to free_queue */
- LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, link)
_queue_delete_node(surface_queue, node);
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+ node->delete_pending = 1;
+
/* Reset queue */
_queue_init(&surface_queue->free_queue);
- _queue_init(&surface_queue->dirty_queue);
- LIST_INITHEAD(&surface_queue->list);
surface_queue->num_attached = 0;
if (flush) {
/* Destory surface and Push to free_queue */
- LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
_queue_delete_node(surface_queue, node);
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+ node->delete_pending = 1;
+
/* Reset queue */
_queue_init(&surface_queue->free_queue);
- _queue_init(&surface_queue->dirty_queue);
- LIST_INITHEAD(&surface_queue->list);
surface_queue->num_attached = 0;
surface_queue->queue_size = queue_size;
pthread_mutex_lock(&surface_queue->lock);
/* Destory surface and Push to free_queue */
- LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->list, link)
+ LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
_queue_delete_node(surface_queue, node);
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->list, link)
+ node->delete_pending = 1;
+
/* Reset queue */
_queue_init(&surface_queue->free_queue);
- _queue_init(&surface_queue->dirty_queue);
- LIST_INITHEAD(&surface_queue->list);
surface_queue->num_attached = 0;
}
static void
-__tbm_queue_sequence_reset(tbm_surface_queue_h surface_queue)
-{
- tbm_queue_sequence *data = (tbm_queue_sequence *)surface_queue->impl_data;
-
- _queue_init(&data->dequeue_list);
-}
-
-static void
__tbm_queue_sequence_destroy(tbm_surface_queue_h surface_queue)
{
free(surface_queue->impl_data);
static const tbm_surface_queue_interface tbm_queue_sequence_impl = {
__tbm_queue_sequence_init,
- __tbm_queue_sequence_reset,
+ NULL,
__tbm_queue_sequence_destroy,
__tbm_queue_sequence_need_attach,
__tbm_queue_sequence_enqueue,