simple_mtx_t table_lock = SIMPLE_MTX_INITIALIZER;
void bo_del(struct fd_bo *bo);
+void bo_del_flush(struct fd_device *dev);
/* set buffer name, and add to table, call w/ table_lock held: */
static void
if (!p_atomic_dec_zero(&bo->refcnt))
return;
+ struct fd_device *dev = bo->dev;
+
bo_del_or_recycle(bo);
+ bo_del_flush(dev);
}
void
if (!p_atomic_dec_zero(&bo->refcnt))
return;
+ struct fd_device *dev = bo->dev;
+
simple_mtx_lock(&table_lock);
bo_del_or_recycle(bo);
+ bo_del_flush(dev);
simple_mtx_unlock(&table_lock);
}
void
fd_bo_del_array(struct fd_bo **bos, unsigned count)
{
+ if (!count)
+ return;
+
+ struct fd_device *dev = bos[0]->dev;
+
simple_mtx_lock(&table_lock);
- for (unsigned i = 0; i < count; i++)
- fd_bo_del_locked(bos[i]);
+ for (unsigned i = 0; i < count; i++) {
+ if (!p_atomic_dec_zero(&bos[i]->refcnt))
+ continue;
+ bo_del_or_recycle(bos[i]);
+ }
+ bo_del_flush(dev);
simple_mtx_unlock(&table_lock);
}
}
}
-/* Called under table_lock */
+/* Called under table_lock, bo_del_flush() *must* be called before
+ * table_lock is released (but bo_del() can be called multiple times
+ * before bo_del_flush(), as long as table_lock is held the entire
+ * time)
+ */
void
bo_del(struct fd_bo *bo)
{
bo->funcs->destroy(bo);
if (handle) {
+ if (dev->num_deferred_handles == ARRAY_SIZE(dev->deferred_handles))
+ bo_del_flush(dev);
+ dev->deferred_handles[dev->num_deferred_handles++] = handle;
+ }
+}
+
+/* Called under table_lock */
+void
+bo_del_flush(struct fd_device *dev)
+{
+ if (!dev->num_deferred_handles)
+ return;
+
+ if (dev->funcs->flush)
+ dev->funcs->flush(dev);
+
+ for (unsigned i = 0; i < dev->num_deferred_handles; i++) {
struct drm_gem_close req = {
- .handle = handle,
+ .handle = dev->deferred_handles[i],
};
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
+
+ dev->num_deferred_handles = 0;
}
static void
#include "freedreno_priv.h"
void bo_del(struct fd_bo *bo);
+void bo_del_flush(struct fd_device *dev);
extern simple_mtx_t table_lock;
static void
void
fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
{
+ struct fd_device *dev = NULL;
int i;
simple_mtx_assert_locked(&table_lock);
if (time && ((time - bo->free_time) <= 1))
break;
+ dev = bo->dev;
+
VG_BO_OBTAIN(bo);
list_del(&bo->list);
bo_del(bo);
}
}
+ if (dev)
+ bo_del_flush(dev);
+
cache->time = time;
}
VG_BO_OBTAIN(bo);
if (bo->funcs->madvise(bo, true) <= 0) {
/* we've lost the backing pages, delete and try again: */
+ struct fd_device *dev = bo->dev;
simple_mtx_lock(&table_lock);
bo_del(bo);
+ bo_del_flush(dev);
simple_mtx_unlock(&table_lock);
goto retry;
}
struct fd_pipe *(*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
unsigned prio);
+ int (*flush)(struct fd_device *dev);
void (*destroy)(struct fd_device *dev);
};
simple_mtx_t suballoc_lock;
struct util_queue submit_queue;
+
+ /**
+ * GEM handles can be queued/batched for freeing in cases where many
+ * buffers are freed together under table_lock. This enables the
+ * virtio backend to batch messages to the host to avoid quickly
+ * depleting the virtqueue ringbuffer slots.
+ */
+ uint32_t deferred_handles[64];
+ uint32_t num_deferred_handles;
};
#define foreach_submit(name, list) \