drm/ttm: stop always moving BOs on the LRU on page fault
authorChristian König <christian.koenig@amd.com>
Fri, 11 Jan 2019 13:12:58 +0000 (14:12 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 25 Jan 2019 21:15:34 +0000 (16:15 -0500)
Move the BO on the LRU only when it is actually moved by a DMA
operation.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Tested-And-Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/ttm/ttm_bo_vm.c

index a1d977fbade5502c8edad616e537702d75dc1673..e86a29a1e51f2ca225ad0ba8e6edd2eecf5430f1 100644 (file)
@@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
                ttm_bo_get(bo);
                up_read(&vmf->vma->vm_mm->mmap_sem);
                (void) dma_fence_wait(bo->moving, true);
-               ttm_bo_unreserve(bo);
+               reservation_object_unlock(bo->resv);
                ttm_bo_put(bo);
                goto out_unlock;
        }
@@ -131,11 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
         * for reserve, and if it fails, retry the fault after waiting
         * for the buffer to become unreserved.
         */
-       err = ttm_bo_reserve(bo, true, true, NULL);
-       if (unlikely(err != 0)) {
-               if (err != -EBUSY)
-                       return VM_FAULT_NOPAGE;
-
+       if (unlikely(!reservation_object_trylock(bo->resv))) {
                if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
                        if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
                                ttm_bo_get(bo);
@@ -165,6 +161,8 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
        }
 
        if (bdev->driver->fault_reserve_notify) {
+               struct dma_fence *moving = dma_fence_get(bo->moving);
+
                err = bdev->driver->fault_reserve_notify(bo);
                switch (err) {
                case 0:
@@ -177,6 +175,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
                        ret = VM_FAULT_SIGBUS;
                        goto out_unlock;
                }
+
+               if (bo->moving != moving) {
+                       spin_lock(&bdev->glob->lru_lock);
+                       ttm_bo_move_to_lru_tail(bo, NULL);
+                       spin_unlock(&bdev->glob->lru_lock);
+               }
+               dma_fence_put(moving);
        }
 
        /*
@@ -291,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 out_io_unlock:
        ttm_mem_io_unlock(man);
 out_unlock:
-       ttm_bo_unreserve(bo);
+       reservation_object_unlock(bo->resv);
        return ret;
 }