amdgpu: make vamgr per device v2
authorJammy Zhou <Jammy.Zhou@amd.com>
Mon, 17 Aug 2015 03:09:09 +0000 (11:09 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:29:26 +0000 (16:29 -0400)
Each device can have its own vamgr, so make it per device now.
This can fix the failure with multiple GPUs used in one single
process.

v2: rebase

Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
amdgpu/amdgpu_device.c
amdgpu/amdgpu_internal.h
amdgpu/amdgpu_vamgr.c

index e16cd24..75b12e2 100644 (file)
@@ -130,7 +130,8 @@ static int amdgpu_get_auth(int fd, int *auth)
 
 static void amdgpu_device_free_internal(amdgpu_device_handle dev)
 {
-       amdgpu_vamgr_reference(&dev->vamgr, NULL);
+       amdgpu_vamgr_deinit(dev->vamgr);
+       free(dev->vamgr);
        util_hash_table_destroy(dev->bo_flink_names);
        util_hash_table_destroy(dev->bo_handles);
        pthread_mutex_destroy(&dev->bo_table_mutex);
@@ -251,7 +252,13 @@ int amdgpu_device_initialize(int fd,
        if (r)
                goto cleanup;
 
-       dev->vamgr = amdgpu_vamgr_get_global(dev);
+       dev->vamgr = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
+       if (dev->vamgr == NULL)
+               goto cleanup;
+
+       amdgpu_vamgr_init(dev->vamgr, dev->dev_info.virtual_address_offset,
+                         dev->dev_info.virtual_address_max,
+                         dev->dev_info.virtual_address_alignment);
 
        max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
        start = amdgpu_vamgr_find_va(dev->vamgr,
@@ -278,6 +285,8 @@ free_va:
        r = -ENOMEM;
        amdgpu_vamgr_free_va(dev->vamgr, start,
                             max - dev->dev_info.virtual_address_offset);
+       amdgpu_vamgr_deinit(dev->vamgr);
+       free(dev->vamgr);
 
 cleanup:
        if (dev->fd >= 0)
index 3ce0969..5d86603 100644 (file)
@@ -52,7 +52,6 @@ struct amdgpu_bo_va_hole {
 };
 
 struct amdgpu_bo_va_mgr {
-       atomic_t refcount;
        /* the start virtual address */
        uint64_t va_offset;
        uint64_t va_max;
@@ -125,13 +124,6 @@ struct amdgpu_context {
 
 drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
 
-drm_private struct amdgpu_bo_va_mgr*
-amdgpu_vamgr_get_global(struct amdgpu_device *dev);
-
-drm_private void
-amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
-                      struct amdgpu_bo_va_mgr *src);
-
 drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
                       uint64_t max, uint64_t alignment);
 
index 507a73a..04d2881 100644 (file)
@@ -33,8 +33,6 @@
 #include "amdgpu_internal.h"
 #include "util_math.h"
 
-static struct amdgpu_bo_va_mgr vamgr = {{0}};
-
 int amdgpu_va_range_query(amdgpu_device_handle dev,
                          enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end)
 {
@@ -67,28 +65,6 @@ drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
        pthread_mutex_destroy(&mgr->bo_va_mutex);
 }
 
-drm_private struct amdgpu_bo_va_mgr *
-amdgpu_vamgr_get_global(struct amdgpu_device *dev)
-{
-       int ref;
-       ref = atomic_inc_return(&vamgr.refcount);
-
-       if (ref == 1)
-               amdgpu_vamgr_init(&vamgr, dev->dev_info.virtual_address_offset,
-                                 dev->dev_info.virtual_address_max,
-                                 dev->dev_info.virtual_address_alignment);
-       return &vamgr;
-}
-
-drm_private void
-amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
-                      struct amdgpu_bo_va_mgr *src)
-{
-       if (update_references(&(*dst)->refcount, NULL))
-               amdgpu_vamgr_deinit(*dst);
-       *dst = src;
-}
-
 drm_private uint64_t
 amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
                     uint64_t alignment, uint64_t base_required)
@@ -105,7 +81,7 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
        pthread_mutex_lock(&mgr->bo_va_mutex);
        /* TODO: using more appropriate way to track the holes */
        /* first look for a hole */
-       LIST_FOR_EACH_ENTRY_SAFE(hole, n, &vamgr.va_holes, list) {
+       LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
                if (base_required) {
                        if(hole->offset > base_required ||
                                (hole->offset + hole->size) < (base_required + size))