Revert "amdgpu: clean up non list code path for vamgr"
authorMichel Dänzer <michel.daenzer@amd.com>
Thu, 8 Feb 2018 08:50:53 +0000 (09:50 +0100)
committerMichel Dänzer <michel@daenzer.net>
Thu, 8 Feb 2018 08:50:53 +0000 (09:50 +0100)
This reverts commit 41b94a3fb6e87d057fad78568d920d29489e5060.

It caused crashes with radeonsi in at least glxgears and Xorg.

amdgpu/amdgpu_internal.h
amdgpu/amdgpu_vamgr.c

index 75276a9..3e044f1 100644 (file)
@@ -53,6 +53,8 @@ struct amdgpu_bo_va_hole {
 };
 
 struct amdgpu_bo_va_mgr {
+       /* the start virtual address */
+       uint64_t va_offset;
        uint64_t va_max;
        struct list_head va_holes;
        pthread_mutex_t bo_va_mutex;
index 2311e5e..a2852b5 100644 (file)
@@ -48,19 +48,12 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
 drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
                                   uint64_t max, uint64_t alignment)
 {
-       struct amdgpu_bo_va_hole *n;
-
+       mgr->va_offset = start;
        mgr->va_max = max;
        mgr->va_alignment = alignment;
 
        list_inithead(&mgr->va_holes);
        pthread_mutex_init(&mgr->bo_va_mutex, NULL);
-       pthread_mutex_lock(&mgr->bo_va_mutex);
-       n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
-       n->size = mgr->va_max;
-       n->offset = start;
-       list_add(&n->list, &mgr->va_holes);
-       pthread_mutex_unlock(&mgr->bo_va_mutex);
 }
 
 drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
@@ -129,14 +122,41 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
                }
        }
 
+       if (base_required) {
+               if (base_required < mgr->va_offset) {
+                       pthread_mutex_unlock(&mgr->bo_va_mutex);
+                       return AMDGPU_INVALID_VA_ADDRESS;
+               }
+               offset = mgr->va_offset;
+               waste = base_required - mgr->va_offset;
+       } else {
+               offset = mgr->va_offset;
+               waste = offset % alignment;
+               waste = waste ? alignment - waste : 0;
+       }
+
+       if (offset + waste + size > mgr->va_max) {
+               pthread_mutex_unlock(&mgr->bo_va_mutex);
+               return AMDGPU_INVALID_VA_ADDRESS;
+       }
+
+       if (waste) {
+               n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+               n->size = waste;
+               n->offset = offset;
+               list_add(&n->list, &mgr->va_holes);
+       }
+
+       offset += waste;
+       mgr->va_offset += size + waste;
        pthread_mutex_unlock(&mgr->bo_va_mutex);
-       return AMDGPU_INVALID_VA_ADDRESS;
+       return offset;
 }
 
 static drm_private void
 amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
 {
-       struct amdgpu_bo_va_hole *hole, *next;
+       struct amdgpu_bo_va_hole *hole;
 
        if (va == AMDGPU_INVALID_VA_ADDRESS)
                return;
@@ -144,46 +164,61 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
        size = ALIGN(size, mgr->va_alignment);
 
        pthread_mutex_lock(&mgr->bo_va_mutex);
-       hole = container_of(&mgr->va_holes, hole, list);
-       LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
-               if (next->offset < va)
-                       break;
-               hole = next;
-       }
-
-       if (&hole->list != &mgr->va_holes) {
-               /* Grow upper hole if it's adjacent */
-               if (hole->offset == (va + size)) {
-                       hole->offset = va;
-                       hole->size += size;
-                       /* Merge lower hole if it's adjacent */
-                       if (next != hole &&
-                           &next->list != &mgr->va_holes &&
-                           (next->offset + next->size) == va) {
-                               next->size += hole->size;
+       if ((va + size) == mgr->va_offset) {
+               mgr->va_offset = va;
+               /* Delete uppermost hole if it reaches the new top */
+               if (!LIST_IS_EMPTY(&mgr->va_holes)) {
+                       hole = container_of(mgr->va_holes.next, hole, list);
+                       if ((hole->offset + hole->size) == va) {
+                               mgr->va_offset = hole->offset;
                                list_del(&hole->list);
                                free(hole);
                        }
                }
-       }
+       } else {
+               struct amdgpu_bo_va_hole *next;
 
-       /* Grow lower hole if it's adjacent */
-       if (next != hole && &next->list != &mgr->va_holes &&
-           (next->offset + next->size) == va) {
-               next->size += size;
-               goto out;
-       }
+               hole = container_of(&mgr->va_holes, hole, list);
+               LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
+                       if (next->offset < va)
+                               break;
+                       hole = next;
+               }
 
-       /* FIXME on allocation failure we just lose virtual address space
-        * maybe print a warning
-        */
-       next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
-       if (next) {
-               next->size = size;
-               next->offset = va;
-               list_add(&next->list, &hole->list);
-       }
+               if (&hole->list != &mgr->va_holes) {
+                       /* Grow upper hole if it's adjacent */
+                       if (hole->offset == (va + size)) {
+                               hole->offset = va;
+                               hole->size += size;
+                               /* Merge lower hole if it's adjacent */
+                               if (next != hole &&
+                                   &next->list != &mgr->va_holes &&
+                                   (next->offset + next->size) == va) {
+                                       next->size += hole->size;
+                                       list_del(&hole->list);
+                                       free(hole);
+                               }
+                               goto out;
+                       }
+               }
+
+               /* Grow lower hole if it's adjacent */
+               if (next != hole && &next->list != &mgr->va_holes &&
+                               (next->offset + next->size) == va) {
+                       next->size += size;
+                       goto out;
+               }
 
+               /* FIXME on allocation failure we just lose virtual address space
+                * maybe print a warning
+                */
+               next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+               if (next) {
+                       next->size = size;
+                       next->offset = va;
+                       list_add(&next->list, &hole->list);
+               }
+       }
 out:
        pthread_mutex_unlock(&mgr->bo_va_mutex);
 }