int is_active;
int active_flush;
+ int active_flush_done;
+ int active_flush_size;
+ int active_flush_invalid;
int usage;
struct wl_list attach_bufs;
- int active_flush_invalid;
- int active_flush_reset;
-
tbm_surface_queue_h tbm_queue;
struct wl_tbm *wl_tbm;
struct wl_list link;
pthread_mutex_t lock;
+
+ int wait_usable;
};
#define DEBUG_TRACE
return 1;
}
+
+static int
+_wayland_tbm_client_use_attach_bufs(struct wayland_tbm_surface_queue *queue_info)
+{
+ if (!queue_info->is_active) return 0;
+ if (!queue_info->active_flush) return 0;
+ if (!queue_info->active_flush_done) return 0;
+ if (queue_info->active_flush_invalid) return 0;
+
+ return 1;
+}
+
+static struct wayland_tbm_buffer *
+_wayland_tbm_client_find_usable_attach_buf(struct wayland_tbm_surface_queue *queue_info)
+{
+ struct wayland_tbm_buffer *buffer;
+
+ wl_list_for_each_reverse(buffer, &queue_info->attach_bufs, link) {
+ if (!buffer->allocated && buffer->usable && !buffer->expire)
+ return buffer;
+ }
+
+ return NULL;
+}
+
+static tbm_surface_h
+_wayland_tbm_client_get_usable_attach_buf(struct wayland_tbm_surface_queue *queue_info)
+{
+ struct wayland_tbm_buffer *buffer = NULL;
+ struct wayland_tbm_client *tbm_client = NULL;
+ tbm_surface_h surface = NULL;
+
+ tbm_client = wl_tbm_get_user_data(queue_info->wl_tbm);
+ if (!tbm_client) goto allocate;
+
+ if (!queue_info->wait_usable) {
+ wl_tbm_queue_wait_usable(queue_info->wl_tbm_queue);
+ queue_info->wait_usable = 1;
+ wl_display_flush(tbm_client->dpy);
+ }
+
+ buffer = _wayland_tbm_client_find_usable_attach_buf(queue_info);
+ if (!buffer) return NULL;
+
+ if (!_wayland_tbm_client_is_valid_attach_buf(queue_info, buffer))
+ goto allocate;
+
+ buffer->allocated = 1;
+ surface = buffer->tbm_surface;
+
+ WL_TBM_TRACE("wl_buffer:%p tbm_surface:%p surface_queue:%p ACTIVE",
+ buffer->wl_buffer, buffer->tbm_surface, queue_info->tbm_queue);
+
+ return surface;
+
+allocate:
+ queue_info->active_flush_invalid = 1;
+ return NULL;
+}
+
static tbm_surface_h
__wayland_tbm_client_surface_alloc_cb(tbm_surface_queue_h surface_queue, void *data)
{
struct wayland_tbm_surface_queue *queue_info =
(struct wayland_tbm_surface_queue *)data;
tbm_surface_h surface = NULL;
- struct wayland_tbm_buffer *buffer;
- int alloc = 0;
pthread_mutex_lock(&queue_info->lock);
- if (queue_info->is_active && queue_info->active_flush && !queue_info->active_flush_invalid) {
- wl_list_for_each_reverse(buffer, &queue_info->attach_bufs, link) {
- if (!buffer->allocated && buffer->usable && !buffer->expire) {
- if (_wayland_tbm_client_is_valid_attach_buf(queue_info, buffer)) {
- surface = buffer->tbm_surface;
- /* ref.. pair of __wayland_tbm_client_surface_free_cb */
- buffer->allocated = 1;
-
- WL_TBM_TRACE("wl_buffer:%p tbm_surface:%p surface_queue:%p ACTIVE",
- buffer->wl_buffer, buffer->tbm_surface, surface_queue);
- } else {
- alloc = 1;
- }
-
- break;
+ if (_wayland_tbm_client_use_attach_bufs(queue_info)) {
+ surface = _wayland_tbm_client_get_usable_attach_buf(queue_info);
+ /* ref.. pair of __wayland_tbm_client_surface_free_cb */
+ if (surface) {
+ pthread_mutex_unlock(&queue_info->lock);
+ tbm_surface_internal_ref(surface);
+ return surface;
+ } else {
+ if (_wayland_tbm_client_use_attach_bufs(queue_info)) {
+ pthread_mutex_unlock(&queue_info->lock);
+ return NULL;
}
}
- } else {
- alloc = 1;
}
pthread_mutex_unlock(&queue_info->lock);
- if (surface) {
- /* ref.. pair of __wayland_tbm_client_surface_free_cb */
- tbm_surface_internal_ref(surface);
- } else if (!surface && alloc) {
+ if (!surface) {
/* ref.. pair of __wayland_tbm_client_surface_free_cb */
surface = tbm_surface_internal_create_with_flags(queue_info->width,
queue_info->height,
wl_buffer_destroy(wl_buffer);
}
-static int
-_wayland_tbm_client_is_active_flush_valid(struct wayland_tbm_surface_queue *queue_info)
-{
- struct wayland_tbm_buffer *buffer;
-
- if (wl_list_empty(&queue_info->attach_bufs)) return 0;
-
- wl_list_for_each_reverse(buffer, &queue_info->attach_bufs, link) {
- if (buffer->expire) continue;
-
- if (_wayland_tbm_client_is_valid_attach_bufs(queue_info, buffer))
- return 1;
- }
-
- return 0;
-}
-
static void
handle_tbm_queue_active(void *data,
struct wl_tbm_queue *wl_tbm_queue,
{
struct wayland_tbm_surface_queue *queue_info =
(struct wayland_tbm_surface_queue *)data;
- tbm_surface_queue_h tbm_queue = NULL;
WL_TBM_LOG("active surface_queue:%p", queue_info->tbm_queue);
if (need_flush) {
queue_info->active_flush = need_flush;
-
- if (!_wayland_tbm_client_is_active_flush_valid(queue_info)) {
- WL_TBM_LOG("active_flush invalid queue");
- queue_info->active_flush_invalid = 1;
- pthread_mutex_unlock(&queue_info->lock);
- return;
- }
-
- queue_info->active_flush_reset = 1;
+ queue_info->active_flush_size = queue_size;
+ queue_info->active_flush_done = 0;
+ queue_info->wait_usable = 0;
}
- tbm_queue = queue_info->tbm_queue;
-
pthread_mutex_unlock(&queue_info->lock);
-
- /* flush the allocated surfaces at the client */
- if (need_flush)
- tbm_surface_queue_set_size(tbm_queue, queue_size, 1);
}
static void
/* flush the attached surfaces */
_wayland_tbm_client_queue_destroy_unused_attach_bufs(queue_info, &flush);
+ if (queue_info->active_flush_done)
+ flush = 1;
if (!queue_info->active_flush_invalid)
need_flush = 1;
}
queue_info->is_active = 0;
queue_info->active_flush_invalid = 0;
+ queue_info->active_flush_done = 0;
+ queue_info->active_flush_size = 0;
+ queue_info->wait_usable = 0;
tbm_queue = queue_info->tbm_queue;
queue_size = queue_info->queue_size;
int format;
int queue_size;
- WL_TBM_TRACE("surface_queue:%p", surface_queue);
-
width = tbm_surface_queue_get_width(surface_queue);
height = tbm_surface_queue_get_height(surface_queue);
format = tbm_surface_queue_get_format(surface_queue);
queue_size = tbm_surface_queue_get_size(surface_queue);
+ WL_TBM_TRACE("surface_queue:%p (%dx%d) size:%d fmt:%c%c%c%c",
+ surface_queue, width, height, queue_size, FOURCC_STR(format));
+
pthread_mutex_lock(&queue_info->lock);
queue_info->width = width;
queue_info->height = height;
queue_info->format = format;
- if (queue_info->is_active && queue_info->active_flush) {
- if (queue_info->active_flush_reset) {
- queue_info->active_flush_reset = 0;
- } else {
- WL_TBM_LOG("active_flush invalid queue");
- queue_info->active_flush_invalid = 1;
- if (queue_size != queue_info->queue_size)
- recover_size = queue_info->queue_size;
- }
+ if (!queue_info->is_active ||
+ !queue_info->active_flush ||
+ !queue_info->active_flush_done) {
+ pthread_mutex_unlock(&queue_info->lock);
+ return;
}
+ WL_TBM_LOG("active_flush invalid queue");
+ queue_info->active_flush_invalid = 1;
+ if (queue_size != queue_info->queue_size)
+ recover_size = queue_info->queue_size;
+
pthread_mutex_unlock(&queue_info->lock);
if (recover_size)
tbm_surface_queue_set_size(surface_queue, recover_size, 0);
+
+ tbm_surface_queue_notify_dequeuable(surface_queue);
}
static void
{
struct wayland_tbm_surface_queue *queue_info = data;
struct wayland_tbm_client *tbm_client = NULL;
+ int queue_size = 0;
WL_TBM_RETURN_IF_FAIL(queue_info != NULL);
pthread_mutex_unlock(&queue_info->lock);
return;
}
+
+ if (!queue_info->active_flush_done) {
+ queue_size = queue_info->active_flush_size;
+ pthread_mutex_unlock(&queue_info->lock);
+ tbm_surface_queue_set_size(surface_queue, queue_size, 1);
+ pthread_mutex_lock(&queue_info->lock);
+ queue_info->active_flush_done = 1;
+ }
+
pthread_mutex_unlock(&queue_info->lock);
tbm_client = wl_tbm_get_user_data(queue_info->wl_tbm);