tizen 2.4 release
[kernel/linux-3.0.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include "drmP.h"
27 #include "drm.h"
28
29 #include <drm/exynos_drm.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/dma-buf.h>
32
33 #include "exynos_drm_drv.h"
34 #include "exynos_drm_gem.h"
35 #include "exynos_drm_buf.h"
36 #include "exynos_drm_iommu.h"
37
38 #define USERPTR_MAX_SIZE                SZ_64M
39
40 static struct exynos_drm_private_cb *private_cb;
41
42 void exynos_drm_priv_cb_register(struct exynos_drm_private_cb *cb)
43 {
44         if (cb)
45                 private_cb = cb;
46 }
47
48 int register_buf_to_priv_mgr(struct exynos_drm_gem_obj *obj,
49                 unsigned int *priv_handle, unsigned int *priv_id)
50 {
51         if (private_cb && private_cb->add_buffer)
52                 return private_cb->add_buffer(obj, priv_handle, priv_id);
53
54         return 0;
55 }
56
57 static unsigned int convert_to_vm_err_msg(int msg)
58 {
59         unsigned int out_msg;
60
61         switch (msg) {
62         case 0:
63         case -ERESTARTSYS:
64         case -EINTR:
65                 out_msg = VM_FAULT_NOPAGE;
66                 break;
67
68         case -ENOMEM:
69                 out_msg = VM_FAULT_OOM;
70                 break;
71
72         default:
73                 out_msg = VM_FAULT_SIGBUS;
74                 break;
75         }
76
77         return out_msg;
78 }
79
80 static int check_gem_flags(unsigned int flags)
81 {
82         if (flags & ~(EXYNOS_BO_MASK)) {
83                 DRM_ERROR("invalid flags.\n");
84                 return -EINVAL;
85         }
86
87         return 0;
88 }
89
90 static int check_cache_flags(unsigned int flags)
91 {
92         if (flags & ~(EXYNOS_DRM_CACHE_SEL_MASK | EXYNOS_DRM_CACHE_OP_MASK)) {
93                 DRM_ERROR("invalid flags.\n");
94                 return -EINVAL;
95         }
96
97         return 0;
98 }
99
100 static struct vm_area_struct *get_vma(struct vm_area_struct *vma)
101 {
102         struct vm_area_struct *vma_copy;
103
104         vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
105         if (!vma_copy)
106                 return NULL;
107
108         if (vma->vm_ops && vma->vm_ops->open)
109                 vma->vm_ops->open(vma);
110
111         if (vma->vm_file)
112                 get_file(vma->vm_file);
113
114         memcpy(vma_copy, vma, sizeof(*vma));
115
116         vma_copy->vm_mm = NULL;
117         vma_copy->vm_next = NULL;
118         vma_copy->vm_prev = NULL;
119
120         return vma_copy;
121 }
122
123 static void put_vma(struct vm_area_struct *vma)
124 {
125         if (!vma)
126                 return;
127
128         if (vma->vm_ops && vma->vm_ops->close)
129                 vma->vm_ops->close(vma);
130
131         if (vma->vm_file)
132                 fput(vma->vm_file);
133
134         kfree(vma);
135 }
136
137 /*
138  * lock_userptr_vma - lock VMAs within user address space
139  *
140  * this function locks vma within user address space to avoid pages
141  * to the userspace from being swapped out.
142  * if this vma isn't locked, the pages to the userspace could be swapped out
143  * so unprivileged user might access different pages and dma of any device
144  * could access physical memory region not intended once swap-in.
145  */
146 static int lock_userptr_vma(struct exynos_drm_gem_buf *buf, unsigned int lock)
147 {
148         struct vm_area_struct *vma;
149         unsigned long start, end;
150
151         start = buf->userptr;
152         end = buf->userptr + buf->size - 1;
153
154         down_write(&current->mm->mmap_sem);
155
156         do {
157                 vma = find_vma(current->mm, start);
158                 if (!vma) {
159                         up_write(&current->mm->mmap_sem);
160                         return -EFAULT;
161                 }
162
163                 if (lock)
164                         vma->vm_flags |= VM_LOCKED;
165                 else
166                         vma->vm_flags &= ~VM_LOCKED;
167
168                 start = vma->vm_end + 1;
169         } while (vma->vm_end < end);
170
171         up_write(&current->mm->mmap_sem);
172
173         return 0;
174 }
175
176 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
177                                         struct vm_area_struct *vma)
178 {
179         DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
180
181         /* non-cachable as default. */
182         if (obj->flags & EXYNOS_BO_CACHABLE)
183                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
184         else if (obj->flags & EXYNOS_BO_WC)
185                 vma->vm_page_prot =
186                         pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
187         else
188                 vma->vm_page_prot =
189                         pgprot_noncached(vm_get_page_prot(vma->vm_flags));
190 }
191
192 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
193 {
194         if (!IS_NONCONTIG_BUFFER(flags)) {
195                 if (size >= SZ_1M)
196                         return roundup(size, SECTION_SIZE);
197                 else if (size >= SZ_64K)
198                         return roundup(size, SZ_64K);
199                 else
200                         goto out;
201         }
202 out:
203         return roundup(size, PAGE_SIZE);
204 }
205
206 struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
207                                                 gfp_t gfpmask)
208 {
209         struct page *p, **pages;
210         int i, npages;
211
212         npages = obj->size >> PAGE_SHIFT;
213
214         pages = drm_malloc_ab(npages, sizeof(struct page *));
215         if (pages == NULL)
216                 return ERR_PTR(-ENOMEM);
217
218         for (i = 0; i < npages; i++) {
219                 p = alloc_page(gfpmask);
220                 if (IS_ERR(p))
221                         goto fail;
222                 pages[i] = p;
223         }
224
225         return pages;
226
227 fail:
228         while (--i)
229                 __free_page(pages[i]);
230
231         drm_free_large(pages);
232         return ERR_PTR(PTR_ERR(p));
233 }
234
235 static void exynos_gem_put_pages(struct drm_gem_object *obj,
236                                         struct page **pages)
237 {
238         int npages;
239
240         npages = obj->size >> PAGE_SHIFT;
241
242         while (--npages >= 0)
243                 __free_page(pages[npages]);
244
245         drm_free_large(pages);
246 }
247
248 static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
249                                         struct vm_area_struct *vma,
250                                         unsigned long f_vaddr,
251                                         pgoff_t page_offset)
252 {
253         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
254         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
255         unsigned long pfn;
256
257         if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
258                 if (!buf->pages)
259                         return -EINTR;
260
261                 pfn = page_to_pfn(buf->pages[page_offset++]);
262         } else
263                 pfn = (buf->paddr >> PAGE_SHIFT) + page_offset;
264
265         return vm_insert_mixed(vma, f_vaddr, pfn);
266 }
267
268 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
269 {
270         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
271         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
272         struct scatterlist *sgl;
273         struct page **pages;
274         unsigned int npages, i = 0;
275         int ret;
276
277         if (buf->pages) {
278                 DRM_DEBUG_KMS("already allocated.\n");
279                 return -EINVAL;
280         }
281
282         pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
283         if (IS_ERR(pages)) {
284                 DRM_ERROR("failed to get pages.\n");
285                 return PTR_ERR(pages);
286         }
287
288         npages = obj->size >> PAGE_SHIFT;
289         buf->page_size = PAGE_SIZE;
290
291         buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
292         if (!buf->sgt) {
293                 DRM_ERROR("failed to allocate sg table.\n");
294                 ret = -ENOMEM;
295                 goto err;
296         }
297
298         ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
299         if (ret < 0) {
300                 DRM_ERROR("failed to initialize sg table.\n");
301                 ret = -EFAULT;
302                 goto err1;
303         }
304
305         sgl = buf->sgt->sgl;
306
307         /* set all pages to sg list. */
308         while (i < npages) {
309                 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
310                 sg_dma_address(sgl) = page_to_phys(pages[i]);
311                 i++;
312                 sgl = sg_next(sgl);
313         }
314
315         buf->pages = pages;
316         return ret;
317 err1:
318         kfree(buf->sgt);
319         buf->sgt = NULL;
320 err:
321         exynos_gem_put_pages(obj, pages);
322         return ret;
323
324 }
325
326 static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
327 {
328         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
329         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
330
331         /*
332          * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
333          * allocated at gem fault handler.
334          */
335         sg_free_table(buf->sgt);
336         kfree(buf->sgt);
337         buf->sgt = NULL;
338
339         exynos_gem_put_pages(obj, buf->pages);
340         buf->pages = NULL;
341
342         /* add some codes for UNCACHED type here. TODO */
343 }
344
345 static void exynos_drm_put_userptr(struct drm_gem_object *obj)
346 {
347         struct exynos_drm_gem_obj *exynos_gem_obj;
348         struct exynos_drm_gem_buf *buf;
349         struct vm_area_struct *vma;
350         int npages;
351
352         exynos_gem_obj = to_exynos_gem_obj(obj);
353         buf = exynos_gem_obj->buffer;
354         vma = exynos_gem_obj->vma;
355
356         if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
357                 put_vma(exynos_gem_obj->vma);
358                 goto out;
359         }
360
361         npages = buf->size >> PAGE_SHIFT;
362
363         if (exynos_gem_obj->flags & EXYNOS_BO_USERPTR && !buf->pfnmap)
364                 lock_userptr_vma(buf, 0);
365
366         npages--;
367         while (npages >= 0) {
368                 if (buf->write)
369                         set_page_dirty_lock(buf->pages[npages]);
370
371                 put_page(buf->pages[npages]);
372                 npages--;
373         }
374
375 out:
376         kfree(buf->pages);
377         buf->pages = NULL;
378
379         kfree(buf->sgt);
380         buf->sgt = NULL;
381 }
382
383 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
384                                         struct drm_file *file_priv,
385                                         unsigned int *handle)
386 {
387         int ret;
388
389         /*
390          * allocate a id of idr table where the obj is registered
391          * and handle has the id what user can see.
392          */
393         ret = drm_gem_handle_create(file_priv, obj, handle);
394         if (ret)
395                 return ret;
396
397         DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
398
399         /* drop reference from allocate - handle holds it now. */
400         drm_gem_object_unreference_unlocked(obj);
401
402         return 0;
403 }
404
405 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
406 {
407         struct drm_gem_object *obj;
408         struct exynos_drm_gem_buf *buf;
409         struct exynos_drm_private *private;
410
411         DRM_DEBUG_KMS("%s\n", __FILE__);
412
413         obj = &exynos_gem_obj->base;
414         private = obj->dev->dev_private;
415         buf = exynos_gem_obj->buffer;
416
417         DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
418
419         /*
420          * release a private buffer from its table.
421          *
422          * this callback will release a ump object only if user requested
423          * ump export otherwise just return.
424          */
425         if (private_cb && private_cb->release_buffer)
426                 private_cb->release_buffer(exynos_gem_obj->priv_handle);
427
428         if (!buf->pages)
429                 return;
430
431         /*
432          * do not release memory region from exporter.
433          *
434          * the region will be released by exporter
435          * once dmabuf's refcount becomes 0.
436          */
437         if (obj->import_attach)
438                 goto out;
439
440         if (private->vmm)
441                 exynos_drm_iommu_unmap_gem(obj);
442
443         if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
444                 exynos_drm_gem_put_pages(obj);
445         else if (exynos_gem_obj->flags & EXYNOS_BO_USERPTR)
446                 exynos_drm_put_userptr(obj);
447         else
448                 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
449
450 out:
451         exynos_drm_fini_buf(obj->dev, buf);
452         exynos_gem_obj->buffer = NULL;
453
454         if (obj->map_list.map)
455                 drm_gem_free_mmap_offset(obj);
456
457         /* release file pointer to gem object. */
458         drm_gem_object_release(obj);
459
460         kfree(exynos_gem_obj);
461         exynos_gem_obj = NULL;
462 }
463
464 struct exynos_drm_gem_obj *exynos_drm_gem_get_obj(struct drm_device *dev,
465                                                 unsigned int gem_handle,
466                                                 struct drm_file *file_priv)
467 {
468         struct exynos_drm_gem_obj *exynos_gem_obj;
469         struct drm_gem_object *obj;
470
471         obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
472         if (!obj) {
473                 DRM_ERROR("failed to lookup gem object.\n");
474                 return ERR_PTR(-EINVAL);
475         }
476
477         exynos_gem_obj = to_exynos_gem_obj(obj);
478
479         drm_gem_object_unreference_unlocked(obj);
480
481         return exynos_gem_obj;
482 }
483
484 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
485                                                 unsigned int gem_handle,
486                                                 struct drm_file *file_priv)
487 {
488         struct exynos_drm_gem_obj *exynos_gem_obj;
489         struct drm_gem_object *obj;
490
491         obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
492         if (!obj) {
493                 DRM_ERROR("failed to lookup gem object.\n");
494                 return 0;
495         }
496
497         exynos_gem_obj = to_exynos_gem_obj(obj);
498
499         drm_gem_object_unreference_unlocked(obj);
500
501         return exynos_gem_obj->buffer->size;
502 }
503
504
505 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
506                                                       unsigned long size)
507 {
508         struct exynos_drm_gem_obj *exynos_gem_obj;
509         struct drm_gem_object *obj;
510         int ret;
511
512         exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
513         if (!exynos_gem_obj) {
514                 DRM_ERROR("failed to allocate exynos gem object\n");
515                 return NULL;
516         }
517
518         exynos_gem_obj->size = size;
519         obj = &exynos_gem_obj->base;
520
521         ret = drm_gem_object_init(dev, obj, size);
522         if (ret < 0) {
523                 DRM_ERROR("failed to initialize gem object\n");
524                 kfree(exynos_gem_obj);
525                 return NULL;
526         }
527
528         DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
529
530         return exynos_gem_obj;
531 }
532
533 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
534                                                 unsigned int flags,
535                                                 unsigned long size)
536 {
537         struct exynos_drm_gem_obj *exynos_gem_obj;
538         struct exynos_drm_private *private = dev->dev_private;
539         struct exynos_drm_gem_buf *buf;
540         unsigned long packed_size = size;
541         int ret;
542
543         if (!size) {
544                 DRM_ERROR("invalid size.\n");
545                 return ERR_PTR(-EINVAL);
546         }
547
548         size = roundup_gem_size(size, flags);
549         DRM_DEBUG_KMS("%s\n", __FILE__);
550
551         ret = check_gem_flags(flags);
552         if (ret)
553                 return ERR_PTR(ret);
554
555         buf = exynos_drm_init_buf(dev, size);
556         if (!buf)
557                 return ERR_PTR(-ENOMEM);
558
559         exynos_gem_obj = exynos_drm_gem_init(dev, size);
560         if (!exynos_gem_obj) {
561                 ret = -ENOMEM;
562                 goto err_fini_buf;
563         }
564
565         exynos_gem_obj->packed_size = packed_size;
566         exynos_gem_obj->buffer = buf;
567
568         /* set memory type and cache attribute from user side. */
569         exynos_gem_obj->flags = flags;
570
571         /*
572          * allocate all pages as desired size if user wants to allocate
573          * physically non-continuous memory.
574          */
575         if (flags & EXYNOS_BO_NONCONTIG) {
576                 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
577                 if (ret < 0) {
578                         drm_gem_object_release(&exynos_gem_obj->base);
579                         goto err_fini_buf;
580                 }
581         } else {
582                 ret = exynos_drm_alloc_buf(dev, buf, flags);
583                 if (ret < 0) {
584                         drm_gem_object_release(&exynos_gem_obj->base);
585                         goto err_fini_buf;
586                 }
587         }
588
589         if (private->vmm) {
590                 exynos_gem_obj->vmm = private->vmm;
591
592                 buf->dev_addr = exynos_drm_iommu_map_gem(dev,
593                                                         &exynos_gem_obj->base);
594                 if (!buf->dev_addr) {
595                         DRM_ERROR("failed to map gem with iommu table.\n");
596                         ret = -EFAULT;
597
598                         if (flags & EXYNOS_BO_NONCONTIG)
599                                 exynos_drm_gem_put_pages(&exynos_gem_obj->base);
600                         else
601                                 exynos_drm_free_buf(dev, flags, buf);
602
603                         drm_gem_object_release(&exynos_gem_obj->base);
604
605                         goto err_fini_buf;
606                 }
607
608                 buf->dma_addr = buf->dev_addr;
609          } else
610                 buf->dma_addr = buf->paddr;
611
612         DRM_DEBUG_KMS("dma_addr = 0x%x\n", buf->dma_addr);
613
614         return exynos_gem_obj;
615
616 err_fini_buf:
617         exynos_drm_fini_buf(dev, buf);
618         return ERR_PTR(ret);
619 }
620
621 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
622                                 struct drm_file *file_priv)
623 {
624         struct drm_exynos_gem_create *args = data;
625         struct exynos_drm_gem_obj *exynos_gem_obj;
626         int ret;
627
628         DRM_DEBUG_KMS("%s\n", __FILE__);
629
630         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
631         if (IS_ERR(exynos_gem_obj))
632                 return PTR_ERR(exynos_gem_obj);
633
634         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
635                         &args->handle);
636         if (ret) {
637                 exynos_drm_gem_destroy(exynos_gem_obj);
638                 return ret;
639         }
640
641         return ret;
642 }
643
644 void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
645                                         unsigned int gem_handle,
646                                         struct drm_file *filp,
647                                         unsigned int *gem_obj)
648 {
649         struct exynos_drm_gem_obj *exynos_gem_obj;
650         struct exynos_drm_gem_buf *buf;
651         struct drm_gem_object *obj;
652
653         obj = drm_gem_object_lookup(dev, filp, gem_handle);
654         if (!obj) {
655                 DRM_ERROR("failed to lookup gem object.\n");
656                 return ERR_PTR(-EINVAL);
657         }
658
659         exynos_gem_obj = to_exynos_gem_obj(obj);
660         buf = exynos_gem_obj->buffer;
661
662         *gem_obj = (unsigned int)obj;
663
664         return &buf->dma_addr;
665 }
666
667 void exynos_drm_gem_put_dma_addr(struct drm_device *dev, void *gem_obj)
668 {
669         struct exynos_drm_gem_obj *exynos_gem_obj;
670         struct drm_gem_object *obj;
671
672         if (!gem_obj)
673                 return;
674
675         /* use gem handle instead of object. TODO */
676
677         obj = gem_obj;
678
679         exynos_gem_obj = to_exynos_gem_obj(obj);
680
681         /*
682          * unreference this gem object because this had already been
683          * referenced at exynos_drm_gem_get_dma_addr().
684          */
685         drm_gem_object_unreference_unlocked(obj);
686 }
687
688 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
689                                     struct drm_file *file_priv)
690 {
691         struct drm_exynos_gem_map_off *args = data;
692
693         DRM_DEBUG_KMS("%s\n", __FILE__);
694
695         DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
696                         args->handle, (unsigned long)args->offset);
697
698         if (!(dev->driver->driver_features & DRIVER_GEM)) {
699                 DRM_ERROR("does not support GEM.\n");
700                 return -ENODEV;
701         }
702
703         return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
704                         &args->offset);
705 }
706
707 static int exynos_drm_gem_mmap_buffer(struct file *filp,
708                                       struct vm_area_struct *vma)
709 {
710         struct drm_gem_object *obj = filp->private_data;
711         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
712         struct exynos_drm_gem_buf *buffer;
713         unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
714         int ret;
715
716         DRM_DEBUG_KMS("%s\n", __FILE__);
717
718         vma->vm_flags |= (VM_IO | VM_RESERVED);
719
720         update_vm_cache_attr(exynos_gem_obj, vma);
721
722         vma->vm_file = filp;
723
724         vm_size = usize = vma->vm_end - vma->vm_start;
725
726         /*
727          * a buffer contains information to physically continuous memory
728          * allocated by user request or at framebuffer creation.
729          */
730         buffer = exynos_gem_obj->buffer;
731
732         /* check if user-requested size is valid. */
733         if (vm_size > buffer->size)
734                 return -EINVAL;
735
736         if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
737                 int i = 0;
738
739                 if (!buffer->pages)
740                         return -EINVAL;
741
742                 vma->vm_flags |= VM_MIXEDMAP;
743
744                 do {
745                         ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
746                         if (ret) {
747                                 DRM_ERROR("failed to remap user space.\n");
748                                 return ret;
749                         }
750
751                         uaddr += PAGE_SIZE;
752                         usize -= PAGE_SIZE;
753                 } while (usize > 0);
754         } else {
755                 /*
756                  * get page frame number to physical memory to be mapped
757                  * to user space.
758                  */
759                 pfn = ((unsigned long)exynos_gem_obj->buffer->paddr) >>
760                                                                 PAGE_SHIFT;
761
762                 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
763
764                 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
765                                         vma->vm_page_prot)) {
766                         DRM_ERROR("failed to remap pfn range.\n");
767                         return -EAGAIN;
768                 }
769         }
770
771         return 0;
772 }
773
774 static const struct file_operations exynos_drm_gem_fops = {
775         .mmap = exynos_drm_gem_mmap_buffer,
776 };
777
778 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
779                               struct drm_file *file_priv)
780 {
781         struct drm_exynos_gem_mmap *args = data;
782         struct drm_gem_object *obj;
783         unsigned int addr;
784
785         DRM_DEBUG_KMS("%s\n", __FILE__);
786
787         if (!(dev->driver->driver_features & DRIVER_GEM)) {
788                 DRM_ERROR("does not support GEM.\n");
789                 return -ENODEV;
790         }
791
792         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
793         if (!obj) {
794                 DRM_ERROR("failed to lookup gem object.\n");
795                 return -EINVAL;
796         }
797
798         obj->filp->f_op = &exynos_drm_gem_fops;
799         obj->filp->private_data = obj;
800
801         down_write(&current->mm->mmap_sem);
802         addr = do_mmap(obj->filp, 0, args->size,
803                         PROT_READ | PROT_WRITE, MAP_SHARED, 0);
804         up_write(&current->mm->mmap_sem);
805
806         drm_gem_object_unreference_unlocked(obj);
807
808         if (IS_ERR((void *)addr))
809                 return PTR_ERR((void *)addr);
810
811         args->mapped = addr;
812
813         DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
814
815         return 0;
816 }
817
818 static int exynos_drm_get_userptr(struct drm_device *dev,
819                                 struct exynos_drm_gem_obj *obj,
820                                 unsigned long userptr,
821                                 unsigned int write)
822 {
823         unsigned int get_npages;
824         unsigned long npages = 0;
825         struct vm_area_struct *vma;
826         struct exynos_drm_gem_buf *buf = obj->buffer;
827         int ret;
828
829         down_read(&current->mm->mmap_sem);
830         vma = find_vma(current->mm, userptr);
831
832         /* the memory region mmaped with VM_PFNMAP. */
833         if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
834                 unsigned long this_pfn, prev_pfn, pa;
835                 unsigned long start, end, offset;
836                 struct scatterlist *sgl;
837                 int ret;
838
839                 start = userptr;
840                 offset = userptr & ~PAGE_MASK;
841                 end = start + buf->size;
842                 sgl = buf->sgt->sgl;
843
844                 for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
845                         ret = follow_pfn(vma, start, &this_pfn);
846                         if (ret)
847                                 goto err;
848
849                         if (prev_pfn == 0) {
850                                 pa = this_pfn << PAGE_SHIFT;
851                                 buf->paddr = pa + offset;
852                         } else if (this_pfn != prev_pfn + 1) {
853                                 ret = -EINVAL;
854                                 goto err;
855                         }
856
857                         sg_dma_address(sgl) = (pa + offset);
858                         sg_dma_len(sgl) = PAGE_SIZE;
859                         prev_pfn = this_pfn;
860                         pa += PAGE_SIZE;
861                         npages++;
862                         sgl = sg_next(sgl);
863                 }
864
865                 obj->vma = get_vma(vma);
866                 if (!obj->vma) {
867                         ret = -ENOMEM;
868                         goto err;
869                 }
870
871                 up_read(&current->mm->mmap_sem);
872                 buf->pfnmap = true;
873
874                 return npages;
875 err:
876                 buf->paddr = 0;
877                 up_read(&current->mm->mmap_sem);
878
879                 return ret;
880         }
881
882         up_read(&current->mm->mmap_sem);
883
884         /*
885          * lock the vma within userptr to avoid userspace buffer
886          * from being swapped out.
887          */
888         ret = lock_userptr_vma(buf, 1);
889         if (ret < 0) {
890                 DRM_ERROR("failed to lock vma for userptr.\n");
891                 lock_userptr_vma(buf, 0);
892                 return 0;
893         }
894
895         buf->write = write;
896         npages = buf->size >> PAGE_SHIFT;
897
898         down_read(&current->mm->mmap_sem);
899         get_npages = get_user_pages(current, current->mm, userptr,
900                                         npages, write, 1, buf->pages, NULL);
901         up_read(&current->mm->mmap_sem);
902         if (get_npages != npages)
903                 DRM_ERROR("failed to get user_pages.\n");
904
905         buf->userptr = userptr;
906         buf->pfnmap = false;
907
908         return get_npages;
909 }
910
911 int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
912                                       struct drm_file *file_priv)
913 {
914         struct exynos_drm_private *priv = dev->dev_private;
915         struct exynos_drm_gem_obj *exynos_gem_obj;
916         struct drm_exynos_gem_userptr *args = data;
917         struct exynos_drm_gem_buf *buf;
918         struct scatterlist *sgl;
919         unsigned long size, userptr, packed_size;
920         unsigned int npages;
921         int ret, get_npages;
922
923         DRM_DEBUG_KMS("%s\n", __FILE__);
924
925         if (!args->size) {
926                 DRM_ERROR("invalid size.\n");
927                 return -EINVAL;
928         }
929
930         ret = check_gem_flags(args->flags);
931         if (ret)
932                 return ret;
933
934         packed_size = args->size;
935
936         size = roundup_gem_size(args->size, EXYNOS_BO_USERPTR);
937
938         userptr = args->userptr;
939
940         buf = exynos_drm_init_buf(dev, size);
941         if (!buf)
942                 return -ENOMEM;
943
944         exynos_gem_obj = exynos_drm_gem_init(dev, size);
945         if (!exynos_gem_obj) {
946                 ret = -ENOMEM;
947                 goto err_free_buffer;
948         }
949
950         exynos_gem_obj->packed_size = packed_size;
951
952         buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
953         if (!buf->sgt) {
954                 DRM_ERROR("failed to allocate buf->sgt.\n");
955                 ret = -ENOMEM;
956                 goto err_release_gem;
957         }
958
959         npages = size >> PAGE_SHIFT;
960
961         ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
962         if (ret < 0) {
963                 DRM_ERROR("failed to initailize sg table.\n");
964                 goto err_free_sgt;
965         }
966
967         buf->pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
968         if (!buf->pages) {
969                 DRM_ERROR("failed to allocate buf->pages\n");
970                 ret = -ENOMEM;
971                 goto err_free_table;
972         }
973
974         exynos_gem_obj->buffer = buf;
975
976         get_npages = exynos_drm_get_userptr(dev, exynos_gem_obj, userptr, 1);
977         if (get_npages != npages) {
978                 DRM_ERROR("failed to get user_pages.\n");
979                 ret = get_npages;
980                 goto err_release_userptr;
981         }
982
983         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
984                                                 &args->handle);
985         if (ret < 0) {
986                 DRM_ERROR("failed to create gem handle.\n");
987                 goto err_release_userptr;
988         }
989
990         sgl = buf->sgt->sgl;
991
992         /*
993          * if buf->pfnmap is true then update sgl of sgt with pages but
994          * if buf->pfnmap is false then it means the sgl was updated already
995          * so it doesn't need to update the sgl.
996          */
997         if (!buf->pfnmap) {
998                 unsigned int i = 0;
999
1000                 /* set all pages to sg list. */
1001                 while (i < npages) {
1002                         sg_set_page(sgl, buf->pages[i], PAGE_SIZE, 0);
1003                         sg_dma_address(sgl) = page_to_phys(buf->pages[i]);
1004                         i++;
1005                         sgl = sg_next(sgl);
1006                 }
1007         }
1008
1009         /* always use EXYNOS_BO_USERPTR as memory type for userptr. */
1010         exynos_gem_obj->flags |= EXYNOS_BO_USERPTR;
1011
1012         if (priv->vmm) {
1013                 exynos_gem_obj->vmm = priv->vmm;
1014
1015                 buf->dev_addr = exynos_drm_iommu_map_gem(dev,
1016                                                         &exynos_gem_obj->base);
1017                 if (!buf->dev_addr) {
1018                         DRM_ERROR("failed to map gem with iommu table.\n");
1019                         ret = -EFAULT;
1020
1021                         exynos_drm_free_buf(dev, exynos_gem_obj->flags, buf);
1022
1023                         drm_gem_object_release(&exynos_gem_obj->base);
1024
1025                         goto err_release_handle;
1026                 }
1027
1028                 buf->dma_addr = buf->dev_addr;
1029          } else
1030                 buf->dma_addr = buf->paddr;
1031
1032         return 0;
1033
1034 err_release_handle:
1035         drm_gem_handle_delete(file_priv, args->handle);
1036 err_release_userptr:
1037         get_npages--;
1038         while (get_npages >= 0)
1039                 put_page(buf->pages[get_npages--]);
1040         kfree(buf->pages);
1041         buf->pages = NULL;
1042 err_free_table:
1043         sg_free_table(buf->sgt);
1044 err_free_sgt:
1045         kfree(buf->sgt);
1046         buf->sgt = NULL;
1047 err_release_gem:
1048         drm_gem_object_release(&exynos_gem_obj->base);
1049         kfree(exynos_gem_obj);
1050         exynos_gem_obj = NULL;
1051 err_free_buffer:
1052         exynos_drm_free_buf(dev, 0, buf);
1053         return ret;
1054 }
1055
1056 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
1057                                       struct drm_file *file_priv)
1058 {       struct exynos_drm_gem_obj *exynos_gem_obj;
1059         struct drm_exynos_gem_info *args = data;
1060         struct drm_gem_object *obj;
1061
1062         mutex_lock(&dev->struct_mutex);
1063
1064         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1065         if (!obj) {
1066                 DRM_ERROR("failed to lookup gem object.\n");
1067                 mutex_unlock(&dev->struct_mutex);
1068                 return -EINVAL;
1069         }
1070
1071         exynos_gem_obj = to_exynos_gem_obj(obj);
1072
1073         args->flags = exynos_gem_obj->flags;
1074         args->size = exynos_gem_obj->size;
1075
1076         drm_gem_object_unreference(obj);
1077         mutex_unlock(&dev->struct_mutex);
1078
1079         return 0;
1080 }
1081
1082 int exynos_drm_gem_export_ump_ioctl(struct drm_device *dev, void *data,
1083                 struct drm_file *file)
1084 {
1085         struct exynos_drm_gem_obj *exynos_gem_obj;
1086         struct drm_gem_object *obj;
1087         struct drm_exynos_gem_ump *ump = data;
1088         int ret;
1089
1090         DRM_DEBUG_KMS("%s\n", __FILE__);
1091
1092         mutex_lock(&dev->struct_mutex);
1093
1094         obj = drm_gem_object_lookup(dev, file, ump->gem_handle);
1095         if (!obj) {
1096                 DRM_ERROR("failed to lookup gem object.\n");
1097                 mutex_unlock(&dev->struct_mutex);
1098                 return -EINVAL;
1099         }
1100
1101         exynos_gem_obj = to_exynos_gem_obj(obj);
1102
1103         /* register gem buffer to private buffer. */
1104         ret = register_buf_to_priv_mgr(exynos_gem_obj,
1105                                 (unsigned int *)&exynos_gem_obj->priv_handle,
1106                                 (unsigned int *)&exynos_gem_obj->priv_id);
1107         if (ret < 0)
1108                 goto err_unreference_gem;
1109
1110         ump->secure_id = exynos_gem_obj->priv_id;
1111         drm_gem_object_unreference(obj);
1112
1113         mutex_unlock(&dev->struct_mutex);
1114
1115         DRM_DEBUG_KMS("got secure id = %d\n", ump->secure_id);
1116
1117         return 0;
1118
1119 err_unreference_gem:
1120         drm_gem_object_unreference(obj);
1121         mutex_unlock(&dev->struct_mutex);
1122         return ret;
1123
1124 }
1125
1126 static int exynos_gem_l1_cache_ops(struct drm_device *drm_dev,
1127                                         struct drm_exynos_gem_cache_op *op) {
1128         if (op->flags & EXYNOS_DRM_CACHE_FSH_ALL) {
1129                 /*
1130                  * cortex-A9 core has individual l1 cache so flush l1 caches
1131                  * for all cores but other cores should be considered later.
1132                  * TODO
1133                  */
1134                 if (op->flags & EXYNOS_DRM_ALL_CORES)
1135                         flush_all_cpu_caches();
1136                 else
1137                         __cpuc_flush_user_all();
1138
1139         } else if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE) {
1140                 struct vm_area_struct *vma;
1141
1142                 down_read(&current->mm->mmap_sem);
1143                 vma = find_vma(current->mm, op->usr_addr);
1144                 up_read(&current->mm->mmap_sem);
1145
1146                 if (!vma) {
1147                         DRM_ERROR("failed to get vma.\n");
1148                         return -EFAULT;
1149                 }
1150
1151                 __cpuc_flush_user_range(op->usr_addr, op->usr_addr + op->size,
1152                                         vma->vm_flags);
1153         }
1154
1155         return 0;
1156 }
1157
1158 static int exynos_gem_l2_cache_ops(struct drm_device *drm_dev,
1159                                 struct drm_file *filp,
1160                                 struct drm_exynos_gem_cache_op *op)
1161 {
1162         if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE ||
1163                         op->flags & EXYNOS_DRM_CACHE_INV_RANGE ||
1164                         op->flags & EXYNOS_DRM_CACHE_CLN_RANGE) {
1165                 unsigned long virt_start = op->usr_addr, pfn;
1166                 phys_addr_t phy_start, phy_end;
1167                 struct vm_area_struct *vma;
1168                 int ret;
1169
1170                 down_read(&current->mm->mmap_sem);
1171                 vma = find_vma(current->mm, op->usr_addr);
1172                 up_read(&current->mm->mmap_sem);
1173
1174                 if (!vma) {
1175                         DRM_ERROR("failed to get vma.\n");
1176                         return -EFAULT;
1177                 }
1178
1179                 /*
1180                  * Range operation to l2 cache(PIPT)
1181                  */
1182                 if (vma && (vma->vm_flags & VM_PFNMAP)) {
1183                         ret = follow_pfn(vma, virt_start, &pfn);
1184                         if (ret < 0) {
1185                                 DRM_ERROR("failed to get pfn.\n");
1186                                 return ret;
1187                         }
1188
1189                         /*
1190                          * the memory region with VM_PFNMAP is contiguous
1191                          * physically so do range operagion just one time.
1192                          */
1193                         phy_start = pfn << PAGE_SHIFT;
1194                         phy_end = phy_start + op->size;
1195
1196                         if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE)
1197                                 outer_flush_range(phy_start, phy_end);
1198                         else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE)
1199                                 outer_inv_range(phy_start, phy_end);
1200                         else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE)
1201                                 outer_clean_range(phy_start, phy_end);
1202
1203                         return 0;
1204                 } else {
1205                         struct exynos_drm_gem_obj *exynos_obj;
1206                         struct exynos_drm_gem_buf *buf;
1207                         struct drm_gem_object *obj;
1208                         struct scatterlist *sgl;
1209                         unsigned int npages, i = 0;
1210
1211                         mutex_lock(&drm_dev->struct_mutex);
1212
1213                         obj = drm_gem_object_lookup(drm_dev, filp,
1214                                                         op->gem_handle);
1215                         if (!obj) {
1216                                 DRM_ERROR("failed to lookup gem object.\n");
1217                                 mutex_unlock(&drm_dev->struct_mutex);
1218                                 return -EINVAL;
1219                         }
1220
1221                         exynos_obj = to_exynos_gem_obj(obj);
1222                         buf = exynos_obj->buffer;
1223                         npages = buf->size >> PAGE_SHIFT;
1224                         sgl = buf->sgt->sgl;
1225
1226                         drm_gem_object_unreference(obj);
1227                         mutex_unlock(&drm_dev->struct_mutex);
1228
1229                         /*
1230                          * in this case, the memory region is non-contiguous
1231                          * physically  so do range operation to all the pages.
1232                          */
1233                         while (i < npages) {
1234                                 phy_start = sg_dma_address(sgl);
1235                                 phy_end = phy_start + buf->page_size;
1236
1237                                 if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE)
1238                                         outer_flush_range(phy_start, phy_end);
1239                                 else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE)
1240                                         outer_inv_range(phy_start, phy_end);
1241                                 else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE)
1242                                         outer_clean_range(phy_start, phy_end);
1243
1244                                 i++;
1245                                 sgl = sg_next(sgl);
1246                         }
1247
1248                         return 0;
1249                 }
1250         }
1251
1252         if (op->flags & EXYNOS_DRM_CACHE_FSH_ALL)
1253                 outer_flush_all();
1254         else if (op->flags & EXYNOS_DRM_CACHE_INV_ALL)
1255                 outer_inv_all();
1256         else if (op->flags & EXYNOS_DRM_CACHE_CLN_ALL)
1257                 outer_clean_all();
1258         else {
1259                 DRM_ERROR("invalid l2 cache operation.\n");
1260                 return -EINVAL;
1261         }
1262
1263
1264         return 0;
1265 }
1266
1267 int exynos_drm_gem_cache_op_ioctl(struct drm_device *drm_dev, void *data,
1268                 struct drm_file *file_priv)
1269 {
1270         struct drm_exynos_gem_cache_op *op = data;
1271         int ret;
1272
1273         DRM_DEBUG_KMS("%s\n", __FILE__);
1274
1275         ret = check_cache_flags(op->flags);
1276         if (ret)
1277                 return -EINVAL;
1278
1279         /*
1280          * do cache operation for all cache range if op->size is bigger
1281          * than SZ_1M because cache range operation with bit size has
1282          * big cost.
1283          */
1284         if (op->size >= SZ_1M) {
1285                 if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE) {
1286                         if (op->flags & EXYNOS_DRM_L1_CACHE)
1287                                 __cpuc_flush_user_all();
1288
1289                         if (op->flags & EXYNOS_DRM_L2_CACHE)
1290                                 outer_flush_all();
1291
1292                         return 0;
1293                 } else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE) {
1294                         if (op->flags & EXYNOS_DRM_L2_CACHE)
1295                                 outer_inv_all();
1296
1297                         return 0;
1298                 } else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE) {
1299                         if (op->flags & EXYNOS_DRM_L2_CACHE)
1300                                 outer_clean_all();
1301
1302                         return 0;
1303                 }
1304         }
1305
1306         if (op->flags & EXYNOS_DRM_L1_CACHE ||
1307                         op->flags & EXYNOS_DRM_ALL_CACHES) {
1308                 ret = exynos_gem_l1_cache_ops(drm_dev, op);
1309                 if (ret < 0)
1310                         goto err;
1311         }
1312
1313         if (op->flags & EXYNOS_DRM_L2_CACHE ||
1314                         op->flags & EXYNOS_DRM_ALL_CACHES)
1315                 ret = exynos_gem_l2_cache_ops(drm_dev, file_priv, op);
1316 err:
1317         return ret;
1318 }
1319
1320 /* temporary functions. */
1321 #ifndef CONFIG_SLP_DMABUF
1322 int exynos_drm_gem_get_phy_ioctl(struct drm_device *drm_dev, void *data,
1323                 struct drm_file *file_priv)
1324 {
1325         struct drm_exynos_gem_get_phy *get_phy = data;
1326         struct exynos_drm_gem_obj *exynos_gem_obj;
1327         struct drm_gem_object *obj;
1328
1329         DRM_DEBUG_KMS("%s\n", __FILE__);
1330
1331         mutex_lock(&drm_dev->struct_mutex);
1332
1333         obj = drm_gem_object_lookup(drm_dev, file_priv, get_phy->gem_handle);
1334         if (!obj) {
1335                 DRM_ERROR("failed to lookup gem object.\n");
1336                 mutex_unlock(&drm_dev->struct_mutex);
1337                 return -EINVAL;
1338         }
1339
1340         exynos_gem_obj = to_exynos_gem_obj(obj);
1341
1342         /*
1343          * we can get physical address only for EXYNOS_DRM_GEM_PC memory type.
1344          */
1345         if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
1346                 DRM_DEBUG_KMS("not physically continuous memory type.\n");
1347                 drm_gem_object_unreference(obj);
1348                 mutex_unlock(&drm_dev->struct_mutex);
1349                 return -EINVAL;
1350         }
1351
1352         get_phy->phy_addr = exynos_gem_obj->buffer->paddr;
1353         get_phy->size = exynos_gem_obj->buffer->size;
1354
1355         drm_gem_object_unreference(obj);
1356         mutex_unlock(&drm_dev->struct_mutex);
1357
1358         return 0;
1359 }
1360 #endif
1361
1362 int exynos_drm_gem_phy_imp_ioctl(struct drm_device *drm_dev, void *data,
1363                                  struct drm_file *file_priv)
1364 {
1365         struct drm_exynos_gem_phy_imp *args = data;
1366         struct exynos_drm_gem_obj *exynos_gem_obj;
1367         struct exynos_drm_private *private = drm_dev->dev_private;
1368         struct exynos_drm_gem_buf *buffer;
1369         unsigned long size, packed_size;
1370         unsigned int flags = EXYNOS_BO_CONTIG;
1371         unsigned int npages, i = 0;
1372         struct scatterlist *sgl;
1373         dma_addr_t start_addr;
1374         int ret = 0;
1375
1376         DRM_DEBUG_KMS("%s\n", __FILE__);
1377
1378         packed_size = args->size;
1379         size = roundup(args->size, PAGE_SIZE);
1380
1381         exynos_gem_obj = exynos_drm_gem_init(drm_dev, size);
1382         if (!exynos_gem_obj)
1383                 return -ENOMEM;
1384
1385         buffer = exynos_drm_init_buf(drm_dev, size);
1386         if (!buffer) {
1387                 DRM_DEBUG_KMS("failed to allocate buffer\n");
1388                 ret = -ENOMEM;
1389                 goto err_release_gem_obj;
1390         }
1391
1392         exynos_gem_obj->packed_size = packed_size;
1393         buffer->paddr = (dma_addr_t)args->phy_addr;
1394         buffer->size = size;
1395
1396         /*
1397          * if shared is true, this bufer wouldn't be released.
1398          * this buffer was allocated by other so don't release it.
1399          */
1400         buffer->shared = true;
1401
1402         exynos_gem_obj->buffer = buffer;
1403
1404         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
1405                         &args->gem_handle);
1406         if (ret)
1407                 goto err_fini_buf;
1408
1409         DRM_DEBUG_KMS("got gem handle = 0x%x\n", args->gem_handle);
1410
1411         npages = buffer->size >> PAGE_SHIFT;
1412         buffer->page_size = PAGE_SIZE;
1413
1414         buffer->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1415         if (!buffer->sgt) {
1416                 DRM_ERROR("failed to allocate sg table.\n");
1417                 ret = -ENOMEM;
1418                 goto err_release_handle;
1419         }
1420
1421         ret = sg_alloc_table(buffer->sgt, npages, GFP_KERNEL);
1422         if (ret < 0) {
1423                 DRM_ERROR("failed to initialize sg table.\n");
1424                 goto err_free_sgt;
1425         }
1426
1427         buffer->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
1428         if (!buffer->pages) {
1429                 DRM_ERROR("failed to allocate pages.\n");
1430                 ret = -ENOMEM;
1431                 goto err_sg_free_table;
1432         }
1433
1434         sgl = buffer->sgt->sgl;
1435         start_addr = buffer->paddr;
1436
1437         while (i < npages) {
1438                 buffer->pages[i] = phys_to_page(start_addr);
1439                 sg_set_page(sgl, buffer->pages[i], buffer->page_size, 0);
1440                 sg_dma_address(sgl) = start_addr;
1441                 start_addr += buffer->page_size;
1442                 sgl = sg_next(sgl);
1443                 i++;
1444         }
1445
1446         if (private->vmm) {
1447                 exynos_gem_obj->vmm = private->vmm;
1448
1449                 buffer->dev_addr = exynos_drm_iommu_map_gem(drm_dev,
1450                                                         &exynos_gem_obj->base);
1451                 if (!buffer->dev_addr) {
1452                         DRM_ERROR("failed to map gem with iommu table.\n");
1453                         ret = -EFAULT;
1454
1455                         exynos_drm_free_buf(drm_dev, flags, buffer);
1456
1457                         drm_gem_object_release(&exynos_gem_obj->base);
1458
1459                         goto err_free_pages;
1460                 }
1461
1462                 buffer->dma_addr = buffer->dev_addr;
1463          } else
1464                 buffer->dma_addr = buffer->paddr;
1465
1466         DRM_DEBUG_KMS("dma_addr = 0x%x\n", buffer->dma_addr);
1467
1468         return 0;
1469
1470 err_free_pages:
1471         kfree(buffer->pages);
1472         buffer->pages = NULL;
1473 err_sg_free_table:
1474         sg_free_table(buffer->sgt);
1475 err_free_sgt:
1476         kfree(buffer->sgt);
1477         buffer->sgt = NULL;
1478 err_release_handle:
1479         drm_gem_handle_delete(file_priv, args->gem_handle);
1480 err_fini_buf:
1481         exynos_drm_fini_buf(drm_dev, buffer);
1482 err_release_gem_obj:
1483         drm_gem_object_release(&exynos_gem_obj->base);
1484         kfree(exynos_gem_obj);
1485         return ret;
1486 }
1487
1488 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
1489 {
1490         DRM_DEBUG_KMS("%s\n", __FILE__);
1491
1492         return 0;
1493 }
1494
1495 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
1496 {
1497         struct exynos_drm_gem_obj *exynos_gem_obj;
1498         struct exynos_drm_gem_buf *buf;
1499
1500         DRM_DEBUG_KMS("%s\n", __FILE__);
1501
1502         exynos_gem_obj = to_exynos_gem_obj(obj);
1503         buf = exynos_gem_obj->buffer;
1504
1505         if (obj->import_attach)
1506                 drm_prime_gem_destroy(obj, buf->sgt);
1507
1508         exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
1509 }
1510
1511 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
1512                                struct drm_device *dev,
1513                                struct drm_mode_create_dumb *args)
1514 {
1515         struct exynos_drm_gem_obj *exynos_gem_obj;
1516         int ret;
1517
1518         DRM_DEBUG_KMS("%s\n", __FILE__);
1519
1520         /*
1521          * alocate memory to be used for framebuffer.
1522          * - this callback would be called by user application
1523          *      with DRM_IOCTL_MODE_CREATE_DUMB command.
1524          */
1525
1526         args->pitch = args->width * args->bpp >> 3;
1527         args->size = args->pitch * args->height;
1528
1529         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
1530         if (IS_ERR(exynos_gem_obj))
1531                 return PTR_ERR(exynos_gem_obj);
1532
1533         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
1534                         &args->handle);
1535         if (ret) {
1536                 exynos_drm_gem_destroy(exynos_gem_obj);
1537                 return ret;
1538         }
1539
1540         return 0;
1541 }
1542
1543 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
1544                                    struct drm_device *dev, uint32_t handle,
1545                                    uint64_t *offset)
1546 {
1547         struct drm_gem_object *obj;
1548         int ret = 0;
1549
1550         DRM_DEBUG_KMS("%s\n", __FILE__);
1551
1552         mutex_lock(&dev->struct_mutex);
1553
1554         /*
1555          * get offset of memory allocated for drm framebuffer.
1556          * - this callback would be called by user application
1557          *      with DRM_IOCTL_MODE_MAP_DUMB command.
1558          */
1559
1560         obj = drm_gem_object_lookup(dev, file_priv, handle);
1561         if (!obj) {
1562                 DRM_ERROR("failed to lookup gem object.\n");
1563                 ret = -EINVAL;
1564                 goto unlock;
1565         }
1566
1567         if (!obj->map_list.map) {
1568                 ret = drm_gem_create_mmap_offset(obj);
1569                 if (ret)
1570                         goto out;
1571         }
1572
1573         *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
1574         DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
1575
1576 out:
1577         drm_gem_object_unreference(obj);
1578 unlock:
1579         mutex_unlock(&dev->struct_mutex);
1580         return ret;
1581 }
1582
1583 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
1584                                 struct drm_device *dev,
1585                                 unsigned int handle)
1586 {
1587         int ret;
1588
1589         DRM_DEBUG_KMS("%s\n", __FILE__);
1590
1591         /*
1592          * obj->refcount and obj->handle_count are decreased and
1593          * if both them are 0 then exynos_drm_gem_free_object()
1594          * would be called by callback to release resources.
1595          */
1596         ret = drm_gem_handle_delete(file_priv, handle);
1597         if (ret < 0) {
1598                 DRM_ERROR("failed to delete drm_gem_handle.\n");
1599                 return ret;
1600         }
1601
1602         return 0;
1603 }
1604
1605 void exynos_drm_gem_close_object(struct drm_gem_object *obj,
1606                                 struct drm_file *file)
1607 {
1608         DRM_DEBUG_KMS("%s\n", __FILE__);
1609
1610         /* TODO */
1611 }
1612
1613 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1614 {
1615         struct drm_gem_object *obj = vma->vm_private_data;
1616         struct drm_device *dev = obj->dev;
1617         unsigned long f_vaddr;
1618         pgoff_t page_offset;
1619         int ret;
1620
1621         page_offset = ((unsigned long)vmf->virtual_address -
1622                         vma->vm_start) >> PAGE_SHIFT;
1623         f_vaddr = (unsigned long)vmf->virtual_address;
1624
1625         mutex_lock(&dev->struct_mutex);
1626
1627         ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
1628         if (ret < 0)
1629                 DRM_ERROR("failed to map pages.\n");
1630
1631         mutex_unlock(&dev->struct_mutex);
1632
1633         return convert_to_vm_err_msg(ret);
1634 }
1635
1636 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1637 {
1638         struct exynos_drm_gem_obj *exynos_gem_obj;
1639         struct drm_gem_object *obj;
1640         int ret;
1641
1642         DRM_DEBUG_KMS("%s\n", __FILE__);
1643
1644         /* set vm_area_struct. */
1645         ret = drm_gem_mmap(filp, vma);
1646         if (ret < 0) {
1647                 DRM_ERROR("failed to mmap.\n");
1648                 return ret;
1649         }
1650
1651         obj = vma->vm_private_data;
1652         exynos_gem_obj = to_exynos_gem_obj(obj);
1653
1654         ret = check_gem_flags(exynos_gem_obj->flags);
1655         if (ret) {
1656                 drm_gem_vm_close(vma);
1657                 drm_gem_free_mmap_offset(obj);
1658                 return ret;
1659         }
1660
1661         vma->vm_flags &= ~VM_PFNMAP;
1662         vma->vm_flags |= VM_MIXEDMAP;
1663
1664         update_vm_cache_attr(exynos_gem_obj, vma);
1665
1666         return ret;
1667 }