drm/amdgpu: add slap cache for sync objects as well
authorChristian König <christian.koenig@amd.com>
Tue, 16 Feb 2016 10:24:58 +0000 (11:24 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 8 Mar 2016 16:01:47 +0000 (11:01 -0500)
We need them all the time.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c

index fab6ddb..3e4ec56 100644 (file)
@@ -634,6 +634,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
 int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
 
 /*
  * GART structures, functions & helpers
index ce79a8b..875333b 100644 (file)
@@ -539,6 +539,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
 
 static int __init amdgpu_init(void)
 {
+       amdgpu_sync_init();
 #ifdef CONFIG_VGA_CONSOLE
        if (vgacon_text_force()) {
                DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
@@ -563,6 +564,7 @@ static void __exit amdgpu_exit(void)
        amdgpu_amdkfd_fini();
        drm_pci_exit(driver, pdriver);
        amdgpu_unregister_atpx_handler();
+       amdgpu_sync_fini();
 }
 
 module_init(amdgpu_init);
index e367342..c48b4fc 100644 (file)
@@ -37,6 +37,8 @@ struct amdgpu_sync_entry {
        struct fence            *fence;
 };
 
+static struct kmem_cache *amdgpu_sync_slab;
+
 /**
  * amdgpu_sync_create - zero init sync object
  *
@@ -133,7 +135,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                return 0;
        }
 
-       e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+       e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
        if (!e)
                return -ENOMEM;
 
@@ -214,7 +216,7 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
                f = e->fence;
 
                hash_del(&e->node);
-               kfree(e);
+               kmem_cache_free(amdgpu_sync_slab, e);
 
                if (!fence_is_signaled(f))
                        return f;
@@ -237,7 +239,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
 
                hash_del(&e->node);
                fence_put(e->fence);
-               kfree(e);
+               kmem_cache_free(amdgpu_sync_slab, e);
        }
 
        return 0;
@@ -259,8 +261,34 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
        hash_for_each_safe(sync->fences, i, tmp, e, node) {
                hash_del(&e->node);
                fence_put(e->fence);
-               kfree(e);
+               kmem_cache_free(amdgpu_sync_slab, e);
        }
 
        fence_put(sync->last_vm_update);
 }
+
+/**
+ * amdgpu_sync_init - init sync object subsystem
+ *
+ * Allocate the slab allocator.
+ */
+int amdgpu_sync_init(void)
+{
+       amdgpu_sync_slab = kmem_cache_create(
+               "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
+               SLAB_HWCACHE_ALIGN, NULL);
+       if (!amdgpu_sync_slab)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/**
+ * amdgpu_sync_fini - fini sync object subsystem
+ *
+ * Free the slab allocator.
+ */
+void amdgpu_sync_fini(void)
+{
+       kmem_cache_destroy(amdgpu_sync_slab);
+}