drm/ttm: Use VM_PFNMAP for shared bo maps
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / ttm / ttm_bo_vm.c
index ac617f3..cdda784 100644 (file)
@@ -107,13 +107,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        /*
         * Work around locking order reversal in fault / nopfn
         * between mmap_sem and bo_reserve: Perform a trylock operation
-        * for reserve, and if it fails, retry the fault after scheduling.
+        * for reserve, and if it fails, retry the fault after waiting
+        * for the buffer to become unreserved.
         */
-
-       ret = ttm_bo_reserve(bo, true, true, false, 0);
+       ret = ttm_bo_reserve(bo, true, true, false, NULL);
        if (unlikely(ret != 0)) {
-               if (ret == -EBUSY)
-                       set_need_resched();
+               if (ret != -EBUSY)
+                       return VM_FAULT_NOPAGE;
+
+               if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+                       if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               up_read(&vma->vm_mm->mmap_sem);
+                               (void) ttm_bo_wait_unreserved(bo);
+                       }
+
+                       return VM_FAULT_RETRY;
+               }
+
+               /*
+                * If we'd want to change locking order to
+                * mmap_sem -> bo::reserve, we'd use a blocking reserve here
+                * instead of retrying the fault...
+                */
                return VM_FAULT_NOPAGE;
        }
 
@@ -123,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                case 0:
                        break;
                case -EBUSY:
-                       set_need_resched();
                case -ERESTARTSYS:
                        retval = VM_FAULT_NOPAGE;
                        goto out_unlock;
@@ -206,7 +220,11 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                        pfn = page_to_pfn(page);
                }
 
-               ret = vm_insert_mixed(&cvma, address, pfn);
+               if (vma->vm_flags & VM_MIXEDMAP)
+                       ret = vm_insert_mixed(&cvma, address, pfn);
+               else
+                       ret = vm_insert_pfn(&cvma, address, pfn);
+
                /*
                 * Somebody beat us to this PTE or prefaulting to
                 * an already populated PTE, or prefaulting error.
@@ -305,7 +323,14 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
         */
 
        vma->vm_private_data = bo;
-       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+
+       /*
+        * PFNMAP is faster than MIXEDMAP due to reduced page
+        * administration. So use MIXEDMAP only if private VMA, where
+        * we need to support COW.
+        */
+       vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
        return 0;
 out_unref:
        ttm_bo_unref(&bo);
@@ -320,7 +345,8 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
 
        vma->vm_ops = &ttm_bo_vm_ops;
        vma->vm_private_data = ttm_bo_reference(bo);
-       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND;
        return 0;
 }
 EXPORT_SYMBOL(ttm_fbdev_mmap);