if (!_queue_get_node(surface_queue, DIRTY_QUEUE, surface, NULL)) {
TBM_ERR("enqueue surface(%p) but surface isn't present in the dirty_queue\n", surface);
- _tbm_set_last_result(TBM_SURFACE_ERROR_INVALID_OPERATION);
+ _tbm_set_last_result(TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE);
pthread_mutex_unlock(&surface_queue->lock);
_tbm_surf_queue_mutex_unlock();
- return TBM_SURFACE_ERROR_INVALID_OPERATION;
+ return TBM_SURFACE_QUEUE_ERROR_INVALID_SEQUENCE;
}
node->type = QUEUE_NODE_TYPE_ENQUEUE;
return 1;
}
- if (wait && _tbm_surface_queue_get_node_count(surface_queue,
- QUEUE_NODE_TYPE_ACQUIRE)) {
+ if (wait) {
_tbm_surf_queue_mutex_unlock();
pthread_cond_wait(&surface_queue->free_cond, &surface_queue->lock);
pthread_mutex_unlock(&surface_queue->lock);
TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ pthread_mutex_lock(&surface_queue->lock);
+ pthread_mutex_unlock(&surface_queue->lock);
+ pthread_cond_signal(&surface_queue->free_cond);
+
_tbm_surf_queue_mutex_unlock();
_notify_emit(surface_queue, &surface_queue->dequeuable_noti);
pthread_mutex_lock(&surface_queue->lock);
if (flush) {
+ surface_queue->queue_size = queue_size;
+
+ if (surface_queue->num_attached == 0) {
+ pthread_mutex_unlock(&surface_queue->lock);
+ _tbm_surf_queue_mutex_unlock();
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+ }
+
if (surface_queue->modes & TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) {
/* Destory surface and Push to free_queue */
LIST_FOR_EACH_ENTRY_SAFE(node, tmp, &surface_queue->free_queue.head, item_link)
_queue_init(&surface_queue->free_queue);
surface_queue->num_attached = 0;
- surface_queue->queue_size = queue_size;
if (surface_queue->impl && surface_queue->impl->reset)
surface_queue->impl->reset(surface_queue);
}
tbm_surface_queue_error_e
+tbm_surface_queue_get_acquirable_surfaces(tbm_surface_queue_h surface_queue,
+ tbm_surface_h *surfaces, int *num)
+{
+ queue_node *node = NULL;
+
+ _tbm_surf_queue_mutex_lock();
+
+ *num = 0;
+
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(_tbm_surface_queue_is_valid(surface_queue),
+ TBM_SURFACE_QUEUE_ERROR_INVALID_QUEUE);
+ TBM_SURF_QUEUE_RETURN_VAL_IF_FAIL(num != NULL,
+ TBM_ERROR_INVALID_PARAMETER);
+
+ pthread_mutex_lock(&surface_queue->lock);
+
+ LIST_FOR_EACH_ENTRY(node, &surface_queue->dirty_queue.head, item_link) {
+ if (surfaces)
+ surfaces[*num] = node->surface;
+
+ *num = *num + 1;
+ }
+
+ pthread_mutex_unlock(&surface_queue->lock);
+
+ _tbm_surf_queue_mutex_unlock();
+
+ return TBM_SURFACE_QUEUE_ERROR_NONE;
+}
+
+tbm_surface_queue_error_e
tbm_surface_queue_get_trace_surface_num(
tbm_surface_queue_h surface_queue, tbm_surface_queue_trace trace, int *num)
{