Merge v5.8-rc1 into drm-misc-fixes
authorThomas Zimmermann <tzimmermann@suse.de>
Tue, 16 Jun 2020 11:31:47 +0000 (13:31 +0200)
committerThomas Zimmermann <tzimmermann@suse.de>
Tue, 16 Jun 2020 11:31:47 +0000 (13:31 +0200)
Beginning a new release cycles for what will become v5.8. Updating
drm-misc-fixes accordingly.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
1  2 
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_vm.c

@@@ -588,7 -588,8 +588,8 @@@ static void ttm_bo_release(struct kref 
                ttm_mem_io_unlock(man);
        }
  
-       if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+       if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+           !dma_resv_trylock(bo->base.resv)) {
                /* The BO is not idle, resurrect it for delayed destroy */
                ttm_bo_flush_all_fences(bo);
                bo->deleted = true;
        spin_unlock(&ttm_bo_glob.lru_lock);
  
        ttm_bo_cleanup_memtype_use(bo);
+       dma_resv_unlock(bo->base.resv);
  
        BUG_ON(bo->mem.mm_node != NULL);
        atomic_dec(&ttm_bo_glob.bo_count);
@@@ -881,10 -883,8 +883,10 @@@ static int ttm_bo_add_move_fence(struc
        if (!fence)
                return 0;
  
 -      if (no_wait_gpu)
 +      if (no_wait_gpu) {
 +              dma_fence_put(fence);
                return -EBUSY;
 +      }
  
        dma_resv_add_shared_fence(bo->base.resv, fence);
  
@@@ -58,7 -58,7 +58,7 @@@ static vm_fault_t ttm_bo_vm_fault_idle(
                goto out_clear;
  
        /*
-        * If possible, avoid waiting for GPU with mmap_sem
+        * If possible, avoid waiting for GPU with mmap_lock
         * held.  We only do this if the fault allows retry and this
         * is the first attempt.
         */
@@@ -68,7 -68,7 +68,7 @@@
                        goto out_unlock;
  
                ttm_bo_get(bo);
-               up_read(&vmf->vma->vm_mm->mmap_sem);
+               mmap_read_unlock(vmf->vma->vm_mm);
                (void) dma_fence_wait(bo->moving, true);
                dma_resv_unlock(bo->base.resv);
                ttm_bo_put(bo);
@@@ -131,20 -131,20 +131,20 @@@ vm_fault_t ttm_bo_vm_reserve(struct ttm
  {
        /*
         * Work around locking order reversal in fault / nopfn
-        * between mmap_sem and bo_reserve: Perform a trylock operation
+        * between mmap_lock and bo_reserve: Perform a trylock operation
         * for reserve, and if it fails, retry the fault after waiting
         * for the buffer to become unreserved.
         */
        if (unlikely(!dma_resv_trylock(bo->base.resv))) {
                /*
                 * If the fault allows retry and this is the first
-                * fault attempt, we try to release the mmap_sem
+                * fault attempt, we try to release the mmap_lock
                 * before waiting
                 */
                if (fault_flag_allow_retry_first(vmf->flags)) {
                        if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
                                ttm_bo_get(bo);
-                               up_read(&vmf->vma->vm_mm->mmap_sem);
+                               mmap_read_unlock(vmf->vma->vm_mm);
                                if (!dma_resv_lock_interruptible(bo->base.resv,
                                                                 NULL))
                                        dma_resv_unlock(bo->base.resv);
@@@ -300,10 -300,8 +300,10 @@@ vm_fault_t ttm_bo_vm_fault_reserved(str
                        break;
                case -EBUSY:
                case -ERESTARTSYS:
 +                      dma_fence_put(moving);
                        return VM_FAULT_NOPAGE;
                default:
 +                      dma_fence_put(moving);
                        return VM_FAULT_SIGBUS;
                }