a877813571a45f01b2383bc403163ad6e6bfabed
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / ttm / ttm_bo_vm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #define pr_fmt(fmt) "[TTM] " fmt
32
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_placement.h>
36 #include <linux/mm.h>
37 #include <linux/rbtree.h>
38 #include <linux/module.h>
39 #include <linux/uaccess.h>
40
41 #define TTM_BO_VM_NUM_PREFAULT 16
42
43 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
44                                                      unsigned long page_start,
45                                                      unsigned long num_pages)
46 {
47         struct rb_node *cur = bdev->addr_space_rb.rb_node;
48         unsigned long cur_offset;
49         struct ttm_buffer_object *bo;
50         struct ttm_buffer_object *best_bo = NULL;
51
52         while (likely(cur != NULL)) {
53                 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
54                 cur_offset = bo->vm_node->start;
55                 if (page_start >= cur_offset) {
56                         cur = cur->rb_right;
57                         best_bo = bo;
58                         if (page_start == cur_offset)
59                                 break;
60                 } else
61                         cur = cur->rb_left;
62         }
63
64         if (unlikely(best_bo == NULL))
65                 return NULL;
66
67         if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
68                      (page_start + num_pages)))
69                 return NULL;
70
71         return best_bo;
72 }
73
74 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
75 {
76         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
77             vma->vm_private_data;
78         struct ttm_bo_device *bdev = bo->bdev;
79         unsigned long page_offset;
80         unsigned long page_last;
81         unsigned long pfn;
82         struct ttm_tt *ttm = NULL;
83         struct page *page;
84         int ret;
85         int i;
86         unsigned long address = (unsigned long)vmf->virtual_address;
87         int retval = VM_FAULT_NOPAGE;
88         struct ttm_mem_type_manager *man =
89                 &bdev->man[bo->mem.mem_type];
90
91         /*
92          * Work around locking order reversal in fault / nopfn
93          * between mmap_sem and bo_reserve: Perform a trylock operation
94          * for reserve, and if it fails, retry the fault after scheduling.
95          */
96
97         ret = ttm_bo_reserve(bo, true, true, false, 0);
98         if (unlikely(ret != 0)) {
99                 if (ret == -EBUSY)
100                         set_need_resched();
101                 return VM_FAULT_NOPAGE;
102         }
103
104         if (bdev->driver->fault_reserve_notify) {
105                 ret = bdev->driver->fault_reserve_notify(bo);
106                 switch (ret) {
107                 case 0:
108                         break;
109                 case -EBUSY:
110                         set_need_resched();
111                 case -ERESTARTSYS:
112                         retval = VM_FAULT_NOPAGE;
113                         goto out_unlock;
114                 default:
115                         retval = VM_FAULT_SIGBUS;
116                         goto out_unlock;
117                 }
118         }
119
120         /*
121          * Wait for buffer data in transit, due to a pipelined
122          * move.
123          */
124
125         spin_lock(&bdev->fence_lock);
126         if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
127                 ret = ttm_bo_wait(bo, false, true, false);
128                 spin_unlock(&bdev->fence_lock);
129                 if (unlikely(ret != 0)) {
130                         retval = (ret != -ERESTARTSYS) ?
131                             VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
132                         goto out_unlock;
133                 }
134         } else
135                 spin_unlock(&bdev->fence_lock);
136
137         ret = ttm_mem_io_lock(man, true);
138         if (unlikely(ret != 0)) {
139                 retval = VM_FAULT_NOPAGE;
140                 goto out_unlock;
141         }
142         ret = ttm_mem_io_reserve_vm(bo);
143         if (unlikely(ret != 0)) {
144                 retval = VM_FAULT_SIGBUS;
145                 goto out_io_unlock;
146         }
147
148         page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
149             bo->vm_node->start - vma->vm_pgoff;
150         page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
151             bo->vm_node->start - vma->vm_pgoff;
152
153         if (unlikely(page_offset >= bo->num_pages)) {
154                 retval = VM_FAULT_SIGBUS;
155                 goto out_io_unlock;
156         }
157
158         /*
159          * Strictly, we're not allowed to modify vma->vm_page_prot here,
160          * since the mmap_sem is only held in read mode. However, we
161          * modify only the caching bits of vma->vm_page_prot and
162          * consider those bits protected by
163          * the bo->mutex, as we should be the only writers.
164          * There shouldn't really be any readers of these bits except
165          * within vm_insert_mixed()? fork?
166          *
167          * TODO: Add a list of vmas to the bo, and change the
168          * vma->vm_page_prot when the object changes caching policy, with
169          * the correct locks held.
170          */
171         if (bo->mem.bus.is_iomem) {
172                 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
173                                                 vma->vm_page_prot);
174         } else {
175                 ttm = bo->ttm;
176                 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
177                     vm_get_page_prot(vma->vm_flags) :
178                     ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
179
180                 /* Allocate all page at once, most common usage */
181                 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
182                         retval = VM_FAULT_OOM;
183                         goto out_io_unlock;
184                 }
185         }
186
187         /*
188          * Speculatively prefault a number of pages. Only error on
189          * first page.
190          */
191         for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
192                 if (bo->mem.bus.is_iomem)
193                         pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
194                 else {
195                         page = ttm->pages[page_offset];
196                         if (unlikely(!page && i == 0)) {
197                                 retval = VM_FAULT_OOM;
198                                 goto out_io_unlock;
199                         } else if (unlikely(!page)) {
200                                 break;
201                         }
202                         pfn = page_to_pfn(page);
203                 }
204
205                 ret = vm_insert_mixed(vma, address, pfn);
206                 /*
207                  * Somebody beat us to this PTE or prefaulting to
208                  * an already populated PTE, or prefaulting error.
209                  */
210
211                 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
212                         break;
213                 else if (unlikely(ret != 0)) {
214                         retval =
215                             (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
216                         goto out_io_unlock;
217                 }
218
219                 address += PAGE_SIZE;
220                 if (unlikely(++page_offset >= page_last))
221                         break;
222         }
223 out_io_unlock:
224         ttm_mem_io_unlock(man);
225 out_unlock:
226         ttm_bo_unreserve(bo);
227         return retval;
228 }
229
230 static void ttm_bo_vm_open(struct vm_area_struct *vma)
231 {
232         struct ttm_buffer_object *bo =
233             (struct ttm_buffer_object *)vma->vm_private_data;
234
235         (void)ttm_bo_reference(bo);
236 }
237
238 static void ttm_bo_vm_close(struct vm_area_struct *vma)
239 {
240         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
241
242         ttm_bo_unref(&bo);
243         vma->vm_private_data = NULL;
244 }
245
246 static const struct vm_operations_struct ttm_bo_vm_ops = {
247         .fault = ttm_bo_vm_fault,
248         .open = ttm_bo_vm_open,
249         .close = ttm_bo_vm_close
250 };
251
252 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
253                 struct ttm_bo_device *bdev)
254 {
255         struct ttm_bo_driver *driver;
256         struct ttm_buffer_object *bo;
257         int ret;
258
259         read_lock(&bdev->vm_lock);
260         bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
261                                  (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
262         if (likely(bo != NULL))
263                 ttm_bo_reference(bo);
264         read_unlock(&bdev->vm_lock);
265
266         if (unlikely(bo == NULL)) {
267                 pr_err("Could not find buffer object to map\n");
268                 return -EINVAL;
269         }
270
271         driver = bo->bdev->driver;
272         if (unlikely(!driver->verify_access)) {
273                 ret = -EPERM;
274                 goto out_unref;
275         }
276         ret = driver->verify_access(bo, filp);
277         if (unlikely(ret != 0))
278                 goto out_unref;
279
280         vma->vm_ops = &ttm_bo_vm_ops;
281
282         /*
283          * Note: We're transferring the bo reference to
284          * vma->vm_private_data here.
285          */
286
287         vma->vm_private_data = bo;
288         vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
289         return 0;
290 out_unref:
291         ttm_bo_unref(&bo);
292         return ret;
293 }
294 EXPORT_SYMBOL(ttm_bo_mmap);
295
296 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
297 {
298         if (vma->vm_pgoff != 0)
299                 return -EACCES;
300
301         vma->vm_ops = &ttm_bo_vm_ops;
302         vma->vm_private_data = ttm_bo_reference(bo);
303         vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
304         return 0;
305 }
306 EXPORT_SYMBOL(ttm_fbdev_mmap);
307
308
309 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
310                   const char __user *wbuf, char __user *rbuf, size_t count,
311                   loff_t *f_pos, bool write)
312 {
313         struct ttm_buffer_object *bo;
314         struct ttm_bo_driver *driver;
315         struct ttm_bo_kmap_obj map;
316         unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
317         unsigned long kmap_offset;
318         unsigned long kmap_end;
319         unsigned long kmap_num;
320         size_t io_size;
321         unsigned int page_offset;
322         char *virtual;
323         int ret;
324         bool no_wait = false;
325         bool dummy;
326
327         read_lock(&bdev->vm_lock);
328         bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
329         if (likely(bo != NULL))
330                 ttm_bo_reference(bo);
331         read_unlock(&bdev->vm_lock);
332
333         if (unlikely(bo == NULL))
334                 return -EFAULT;
335
336         driver = bo->bdev->driver;
337         if (unlikely(!driver->verify_access)) {
338                 ret = -EPERM;
339                 goto out_unref;
340         }
341
342         ret = driver->verify_access(bo, filp);
343         if (unlikely(ret != 0))
344                 goto out_unref;
345
346         kmap_offset = dev_offset - bo->vm_node->start;
347         if (unlikely(kmap_offset >= bo->num_pages)) {
348                 ret = -EFBIG;
349                 goto out_unref;
350         }
351
352         page_offset = *f_pos & ~PAGE_MASK;
353         io_size = bo->num_pages - kmap_offset;
354         io_size = (io_size << PAGE_SHIFT) - page_offset;
355         if (count < io_size)
356                 io_size = count;
357
358         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
359         kmap_num = kmap_end - kmap_offset + 1;
360
361         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
362
363         switch (ret) {
364         case 0:
365                 break;
366         case -EBUSY:
367                 ret = -EAGAIN;
368                 goto out_unref;
369         default:
370                 goto out_unref;
371         }
372
373         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
374         if (unlikely(ret != 0)) {
375                 ttm_bo_unreserve(bo);
376                 goto out_unref;
377         }
378
379         virtual = ttm_kmap_obj_virtual(&map, &dummy);
380         virtual += page_offset;
381
382         if (write)
383                 ret = copy_from_user(virtual, wbuf, io_size);
384         else
385                 ret = copy_to_user(rbuf, virtual, io_size);
386
387         ttm_bo_kunmap(&map);
388         ttm_bo_unreserve(bo);
389         ttm_bo_unref(&bo);
390
391         if (unlikely(ret != 0))
392                 return -EFBIG;
393
394         *f_pos += io_size;
395
396         return io_size;
397 out_unref:
398         ttm_bo_unref(&bo);
399         return ret;
400 }
401
402 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
403                         char __user *rbuf, size_t count, loff_t *f_pos,
404                         bool write)
405 {
406         struct ttm_bo_kmap_obj map;
407         unsigned long kmap_offset;
408         unsigned long kmap_end;
409         unsigned long kmap_num;
410         size_t io_size;
411         unsigned int page_offset;
412         char *virtual;
413         int ret;
414         bool no_wait = false;
415         bool dummy;
416
417         kmap_offset = (*f_pos >> PAGE_SHIFT);
418         if (unlikely(kmap_offset >= bo->num_pages))
419                 return -EFBIG;
420
421         page_offset = *f_pos & ~PAGE_MASK;
422         io_size = bo->num_pages - kmap_offset;
423         io_size = (io_size << PAGE_SHIFT) - page_offset;
424         if (count < io_size)
425                 io_size = count;
426
427         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
428         kmap_num = kmap_end - kmap_offset + 1;
429
430         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
431
432         switch (ret) {
433         case 0:
434                 break;
435         case -EBUSY:
436                 return -EAGAIN;
437         default:
438                 return ret;
439         }
440
441         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
442         if (unlikely(ret != 0)) {
443                 ttm_bo_unreserve(bo);
444                 return ret;
445         }
446
447         virtual = ttm_kmap_obj_virtual(&map, &dummy);
448         virtual += page_offset;
449
450         if (write)
451                 ret = copy_from_user(virtual, wbuf, io_size);
452         else
453                 ret = copy_to_user(rbuf, virtual, io_size);
454
455         ttm_bo_kunmap(&map);
456         ttm_bo_unreserve(bo);
457         ttm_bo_unref(&bo);
458
459         if (unlikely(ret != 0))
460                 return ret;
461
462         *f_pos += io_size;
463
464         return io_size;
465 }