freedreno: refactor bo-cache API
authorRob Clark <robclark@freedesktop.org>
Mon, 30 May 2016 16:45:33 +0000 (12:45 -0400)
committerRob Clark <robclark@freedesktop.org>
Wed, 20 Jul 2016 23:42:21 +0000 (19:42 -0400)
Split out interface to allocate from and release to bo-cache, and get
rid of direct usage of bucket level API from fd_bo/etc.

Signed-off-by: Rob Clark <robclark@freedesktop.org>
freedreno/freedreno_bo.c
freedreno/freedreno_device.c
freedreno/freedreno_priv.h

index 7b3e51f..da56398 100644 (file)
@@ -84,7 +84,8 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
 }
 
 /* Frees older cached buffers.  Called under table_lock */
-drm_private void fd_cleanup_bo_cache(struct fd_bo_cache *cache, time_t time)
+drm_private void
+fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
 {
        int i;
 
@@ -168,21 +169,19 @@ static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
        return bo;
 }
 
-
-struct fd_bo *
-fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
+/* NOTE: size is potentially rounded up to bucket size: */
+drm_private struct fd_bo *
+fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
 {
        struct fd_bo *bo = NULL;
        struct fd_bo_bucket *bucket;
-       uint32_t handle;
-       int ret;
 
-       size = ALIGN(size, 4096);
-       bucket = get_bucket(&dev->bo_cache, size);
+       *size = ALIGN(*size, 4096);
+       bucket = get_bucket(cache, *size);
 
        /* see if we can be green and recycle: */
        if (bucket) {
-               size = bucket->size;
+               *size = bucket->size;
                bo = find_in_bucket(bucket, flags);
                if (bo) {
                        atomic_set(&bo->refcnt, 1);
@@ -191,6 +190,20 @@ fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
                }
        }
 
+       return NULL;
+}
+
+struct fd_bo *
+fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
+{
+       struct fd_bo *bo = NULL;
+       uint32_t handle;
+       int ret;
+
+       bo = fd_bo_cache_alloc(&dev->bo_cache, &size, flags);
+       if (bo)
+               return bo;
+
        ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
        if (ret)
                return NULL;
@@ -290,39 +303,47 @@ struct fd_bo * fd_bo_ref(struct fd_bo *bo)
        return bo;
 }
 
-void fd_bo_del(struct fd_bo *bo)
+drm_private int
+fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
 {
-       struct fd_device *dev = bo->dev;
+       struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
 
-       if (!atomic_dec_and_test(&bo->refcnt))
-               return;
+       /* see if we can be green and recycle: */
+       if (bucket) {
+               struct timespec time;
 
-       pthread_mutex_lock(&table_lock);
+               clock_gettime(CLOCK_MONOTONIC, &time);
 
-       if (bo->bo_reuse) {
-               struct fd_bo_bucket *bucket = get_bucket(&dev->bo_cache, bo->size);
+               bo->free_time = time.tv_sec;
+               list_addtail(&bo->list, &bucket->list);
+               fd_bo_cache_cleanup(cache, time.tv_sec);
 
-               /* see if we can be green and recycle: */
-               if (bucket) {
-                       struct timespec time;
+               /* bo's in the bucket cache don't have a ref and
+                * don't hold a ref to the dev:
+                */
+               fd_device_del_locked(bo->dev);
 
-                       clock_gettime(CLOCK_MONOTONIC, &time);
+               return 0;
+       }
 
-                       bo->free_time = time.tv_sec;
-                       list_addtail(&bo->list, &bucket->list);
-                       fd_cleanup_bo_cache(&dev->bo_cache, time.tv_sec);
+       return -1;
+}
 
-                       /* bo's in the bucket cache don't have a ref and
-                        * don't hold a ref to the dev:
-                        */
+void fd_bo_del(struct fd_bo *bo)
+{
+       struct fd_device *dev = bo->dev;
 
-                       goto out;
-               }
-       }
+       if (!atomic_dec_and_test(&bo->refcnt))
+               return;
+
+       pthread_mutex_lock(&table_lock);
+
+       if (bo->bo_reuse && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
+               goto out;
 
        bo_del(bo);
-out:
        fd_device_del_locked(dev);
+out:
        pthread_mutex_unlock(&table_lock);
 }
 
index 0e20332..bd57c24 100644 (file)
@@ -54,7 +54,7 @@ add_bucket(struct fd_bo_cache *cache, int size)
        cache->num_buckets++;
 }
 
-static void
+drm_private void
 fd_bo_cache_init(struct fd_bo_cache *cache)
 {
        unsigned long size, cache_max_size = 64 * 1024 * 1024;
@@ -137,7 +137,7 @@ struct fd_device * fd_device_ref(struct fd_device *dev)
 
 static void fd_device_del_impl(struct fd_device *dev)
 {
-       fd_cleanup_bo_cache(&dev->bo_cache, 0);
+       fd_bo_cache_cleanup(&dev->bo_cache, 0);
        drmHashDestroy(dev->handle_table);
        drmHashDestroy(dev->name_table);
        if (dev->closefd)
index 5880dc2..4159e52 100644 (file)
@@ -96,7 +96,11 @@ struct fd_device {
        int closefd;        /* call close(fd) upon destruction */
 };
 
-drm_private void fd_cleanup_bo_cache(struct fd_bo_cache *cache, time_t time);
+drm_private void fd_bo_cache_init(struct fd_bo_cache *cache);
+drm_private void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
+drm_private struct fd_bo * fd_bo_cache_alloc(struct fd_bo_cache *cache,
+               uint32_t *size, uint32_t flags);
+drm_private int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
 
 /* for where @table_lock is already held: */
 drm_private void fd_device_del_locked(struct fd_device *dev);