drm/ttm: Make LRU removal optional v2
authorChristian König <christian.koenig@amd.com>
Fri, 10 May 2019 12:15:08 +0000 (14:15 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 31 May 2019 15:39:34 +0000 (10:39 -0500)
We are already doing this for DMA-buf imports and also for
amdgpu VM BOs for quite a while now.

If this doesn't run into any problems we are probably going
to stop removing BOs from the LRU altogether.

v2: drop BUG_ON from ttm_bo_add_to_lru

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Tested-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
14 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_execbuf_util.h

index e304271..81e0e75 100644 (file)
@@ -585,7 +585,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
        amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
 
        ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
-                                    false, &ctx->duplicates);
+                                    false, &ctx->duplicates, true);
        if (!ret)
                ctx->reserved = true;
        else {
@@ -658,7 +658,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
        }
 
        ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
-                                    false, &ctx->duplicates);
+                                    false, &ctx->duplicates, true);
        if (!ret)
                ctx->reserved = true;
        else
@@ -1808,7 +1808,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
        }
 
        /* Reserve all BOs and page tables for validation */
-       ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
+       ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
+                                    true);
        WARN(!list_empty(&duplicates), "Duplicates should be empty");
        if (ret)
                goto out_free;
@@ -2014,7 +2015,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
        }
 
        ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
-                                    false, &duplicate_save);
+                                    false, &duplicate_save, true);
        if (ret) {
                pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
                goto ttm_reserve_fail;
index d72cc58..fff558c 100644 (file)
@@ -648,7 +648,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        }
 
        r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
-                                  &duplicates);
+                                  &duplicates, true);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS)
                        DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
index 54dd02a..06f83ca 100644 (file)
@@ -79,7 +79,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        list_add(&csa_tv.head, &list);
        amdgpu_vm_get_pd_bo(vm, &list, &pd);
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
        if (r) {
                DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
                return r;
index 7b84036..d513a5a 100644 (file)
@@ -171,7 +171,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 
        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
+       r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, true);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
                        "we fail to reserve bo (%d)\n", r);
@@ -608,7 +608,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 
        amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, true);
        if (r)
                goto error_unref;
 
index 30f85f0..49f9a93 100644 (file)
@@ -256,7 +256,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
                return 0;
 
        ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
-                                    !no_intr, NULL);
+                                    !no_intr, NULL, true);
        if (ret)
                return ret;
 
index 44617de..7411e69 100644 (file)
@@ -559,7 +559,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
        if (!vm_bos)
                return;
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
        if (r)
                goto error_free;
 
index 833e909..36683de 100644 (file)
@@ -539,7 +539,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
        u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
 
        INIT_LIST_HEAD(&duplicates);
-       r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
+       r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
        if (unlikely(r != 0)) {
                return r;
        }
index 2845fce..06bbcd2 100644 (file)
@@ -173,19 +173,20 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 
        reservation_object_assert_held(bo->resv);
 
-       if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
-               BUG_ON(!list_empty(&bo->lru));
+       if (!list_empty(&bo->lru))
+               return;
 
-               man = &bdev->man[bo->mem.mem_type];
-               list_add_tail(&bo->lru, &man->lru[bo->priority]);
-               kref_get(&bo->list_kref);
+       if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
+               return;
 
-               if (bo->ttm && !(bo->ttm->page_flags &
-                                (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
-                       list_add_tail(&bo->swap,
-                                     &bdev->glob->swap_lru[bo->priority]);
-                       kref_get(&bo->list_kref);
-               }
+       man = &bdev->man[bo->mem.mem_type];
+       list_add_tail(&bo->lru, &man->lru[bo->priority]);
+       kref_get(&bo->list_kref);
+
+       if (bo->ttm && !(bo->ttm->page_flags &
+                        (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
+               list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+               kref_get(&bo->list_kref);
        }
 }
 EXPORT_SYMBOL(ttm_bo_add_to_lru);
index 0075eb9..957ec37 100644 (file)
@@ -69,7 +69,8 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
 
-               ttm_bo_add_to_lru(bo);
+               if (list_empty(&bo->lru))
+                       ttm_bo_add_to_lru(bo);
                reservation_object_unlock(bo->resv);
        }
        spin_unlock(&glob->lru_lock);
@@ -93,7 +94,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 
 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                           struct list_head *list, bool intr,
-                          struct list_head *dups)
+                          struct list_head *dups, bool del_lru)
 {
        struct ttm_bo_global *glob;
        struct ttm_validate_buffer *entry;
@@ -172,11 +173,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                list_add(&entry->head, list);
        }
 
-       if (ticket)
-               ww_acquire_done(ticket);
-       spin_lock(&glob->lru_lock);
-       ttm_eu_del_from_lru_locked(list);
-       spin_unlock(&glob->lru_lock);
+       if (del_lru) {
+               spin_lock(&glob->lru_lock);
+               ttm_eu_del_from_lru_locked(list);
+               spin_unlock(&glob->lru_lock);
+       }
        return 0;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -203,7 +204,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
                        reservation_object_add_shared_fence(bo->resv, fence);
                else
                        reservation_object_add_excl_fence(bo->resv, fence);
-               ttm_bo_add_to_lru(bo);
+               if (list_empty(&bo->lru))
+                       ttm_bo_add_to_lru(bo);
+               else
+                       ttm_bo_move_to_lru_tail(bo, NULL);
                reservation_object_unlock(bo->resv);
        }
        spin_unlock(&glob->lru_lock);
index b7f9dfe..fe0bd62 100644 (file)
@@ -63,7 +63,7 @@ int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
        struct virtio_gpu_object *qobj;
        int ret;
 
-       ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
+       ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
        if (ret != 0)
                return ret;
 
index 711f8fd..1d38a8b 100644 (file)
@@ -464,7 +464,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
        val_buf->bo = &res->backup->base;
        val_buf->num_shared = 0;
        list_add_tail(&val_buf->head, &val_list);
-       ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
+       ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
+                                    true);
        if (unlikely(ret != 0))
                goto out_no_reserve;
 
index 523f6ac..1d2322a 100644 (file)
@@ -169,7 +169,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
                          bool intr)
 {
        return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
-                                     NULL);
+                                     NULL, true);
 }
 
 /**
index 129dabb..9f54cf9 100644 (file)
@@ -769,7 +769,10 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 {
        if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
                spin_lock(&bo->bdev->glob->lru_lock);
-               ttm_bo_add_to_lru(bo);
+               if (list_empty(&bo->lru))
+                       ttm_bo_add_to_lru(bo);
+               else
+                       ttm_bo_move_to_lru_tail(bo, NULL);
                spin_unlock(&bo->bdev->glob->lru_lock);
        }
        reservation_object_unlock(bo->resv);
index 621615f..7e46cc6 100644 (file)
@@ -70,6 +70,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  * @list:    thread private list of ttm_validate_buffer structs.
  * @intr:    should the wait be interruptible
  * @dups:    [out] optional list of duplicates.
+ * @del_lru: true if BOs should be removed from the LRU.
  *
  * Tries to reserve bos pointed to by the list entries for validation.
  * If the function returns 0, all buffers are marked as "unfenced",
@@ -98,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 
 extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                                  struct list_head *list, bool intr,
-                                 struct list_head *dups);
+                                 struct list_head *dups, bool del_lru);
 
 /**
  * function ttm_eu_fence_buffer_objects.