3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <drm/exynos_drm.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/dma-buf.h>
33 #include "exynos_drm_drv.h"
34 #include "exynos_drm_gem.h"
35 #include "exynos_drm_buf.h"
36 #include "exynos_drm_iommu.h"
38 #define USERPTR_MAX_SIZE SZ_64M
40 static struct exynos_drm_private_cb *private_cb;
42 void exynos_drm_priv_cb_register(struct exynos_drm_private_cb *cb)
48 int register_buf_to_priv_mgr(struct exynos_drm_gem_obj *obj,
49 unsigned int *priv_handle, unsigned int *priv_id)
51 if (private_cb && private_cb->add_buffer)
52 return private_cb->add_buffer(obj, priv_handle, priv_id);
57 static unsigned int convert_to_vm_err_msg(int msg)
65 out_msg = VM_FAULT_NOPAGE;
69 out_msg = VM_FAULT_OOM;
73 out_msg = VM_FAULT_SIGBUS;
80 static int check_gem_flags(unsigned int flags)
82 if (flags & ~(EXYNOS_BO_MASK)) {
83 DRM_ERROR("invalid flags.\n");
90 static int check_cache_flags(unsigned int flags)
92 if (flags & ~(EXYNOS_DRM_CACHE_SEL_MASK | EXYNOS_DRM_CACHE_OP_MASK)) {
93 DRM_ERROR("invalid flags.\n");
100 static struct vm_area_struct *get_vma(struct vm_area_struct *vma)
102 struct vm_area_struct *vma_copy;
104 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
108 if (vma->vm_ops && vma->vm_ops->open)
109 vma->vm_ops->open(vma);
112 get_file(vma->vm_file);
114 memcpy(vma_copy, vma, sizeof(*vma));
116 vma_copy->vm_mm = NULL;
117 vma_copy->vm_next = NULL;
118 vma_copy->vm_prev = NULL;
123 static void put_vma(struct vm_area_struct *vma)
128 if (vma->vm_ops && vma->vm_ops->close)
129 vma->vm_ops->close(vma);
138 * lock_userptr_vma - lock VMAs within user address space
140 * this function locks vma within user address space to avoid pages
141 * to the userspace from being swapped out.
142 * if this vma isn't locked, the pages to the userspace could be swapped out
143 * so unprivileged user might access different pages and dma of any device
144 * could access physical memory region not intended once swap-in.
146 static int lock_userptr_vma(struct exynos_drm_gem_buf *buf, unsigned int lock)
148 struct vm_area_struct *vma;
149 unsigned long start, end;
151 start = buf->userptr;
152 end = buf->userptr + buf->size - 1;
154 down_write(¤t->mm->mmap_sem);
157 vma = find_vma(current->mm, start);
159 up_write(¤t->mm->mmap_sem);
164 vma->vm_flags |= VM_LOCKED;
166 vma->vm_flags &= ~VM_LOCKED;
168 start = vma->vm_end + 1;
169 } while (vma->vm_end < end);
171 up_write(¤t->mm->mmap_sem);
176 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
177 struct vm_area_struct *vma)
179 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
181 /* non-cachable as default. */
182 if (obj->flags & EXYNOS_BO_CACHABLE)
183 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
184 else if (obj->flags & EXYNOS_BO_WC)
186 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
189 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
192 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
194 if (!IS_NONCONTIG_BUFFER(flags)) {
196 return roundup(size, SECTION_SIZE);
197 else if (size >= SZ_64K)
198 return roundup(size, SZ_64K);
203 return roundup(size, PAGE_SIZE);
206 struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
209 struct page *p, **pages;
212 npages = obj->size >> PAGE_SHIFT;
214 pages = drm_malloc_ab(npages, sizeof(struct page *));
216 return ERR_PTR(-ENOMEM);
218 for (i = 0; i < npages; i++) {
219 p = alloc_page(gfpmask);
229 __free_page(pages[i]);
231 drm_free_large(pages);
232 return ERR_PTR(PTR_ERR(p));
235 static void exynos_gem_put_pages(struct drm_gem_object *obj,
240 npages = obj->size >> PAGE_SHIFT;
242 while (--npages >= 0)
243 __free_page(pages[npages]);
245 drm_free_large(pages);
248 static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
249 struct vm_area_struct *vma,
250 unsigned long f_vaddr,
253 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
254 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
257 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
261 pfn = page_to_pfn(buf->pages[page_offset++]);
263 pfn = (buf->paddr >> PAGE_SHIFT) + page_offset;
265 return vm_insert_mixed(vma, f_vaddr, pfn);
268 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
270 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
271 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
272 struct scatterlist *sgl;
274 unsigned int npages, i = 0;
278 DRM_DEBUG_KMS("already allocated.\n");
282 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
284 DRM_ERROR("failed to get pages.\n");
285 return PTR_ERR(pages);
288 npages = obj->size >> PAGE_SHIFT;
289 buf->page_size = PAGE_SIZE;
291 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
293 DRM_ERROR("failed to allocate sg table.\n");
298 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
300 DRM_ERROR("failed to initialize sg table.\n");
307 /* set all pages to sg list. */
309 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
310 sg_dma_address(sgl) = page_to_phys(pages[i]);
321 exynos_gem_put_pages(obj, pages);
326 static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
328 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
329 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
332 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
333 * allocated at gem fault handler.
335 sg_free_table(buf->sgt);
339 exynos_gem_put_pages(obj, buf->pages);
342 /* add some codes for UNCACHED type here. TODO */
345 static void exynos_drm_put_userptr(struct drm_gem_object *obj)
347 struct exynos_drm_gem_obj *exynos_gem_obj;
348 struct exynos_drm_gem_buf *buf;
349 struct vm_area_struct *vma;
352 exynos_gem_obj = to_exynos_gem_obj(obj);
353 buf = exynos_gem_obj->buffer;
354 vma = exynos_gem_obj->vma;
356 if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
357 put_vma(exynos_gem_obj->vma);
361 npages = buf->size >> PAGE_SHIFT;
363 if (exynos_gem_obj->flags & EXYNOS_BO_USERPTR && !buf->pfnmap)
364 lock_userptr_vma(buf, 0);
367 while (npages >= 0) {
369 set_page_dirty_lock(buf->pages[npages]);
371 put_page(buf->pages[npages]);
383 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
384 struct drm_file *file_priv,
385 unsigned int *handle)
390 * allocate a id of idr table where the obj is registered
391 * and handle has the id what user can see.
393 ret = drm_gem_handle_create(file_priv, obj, handle);
397 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
399 /* drop reference from allocate - handle holds it now. */
400 drm_gem_object_unreference_unlocked(obj);
405 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
407 struct drm_gem_object *obj;
408 struct exynos_drm_gem_buf *buf;
409 struct exynos_drm_private *private;
411 DRM_DEBUG_KMS("%s\n", __FILE__);
413 obj = &exynos_gem_obj->base;
414 private = obj->dev->dev_private;
415 buf = exynos_gem_obj->buffer;
417 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
420 * release a private buffer from its table.
422 * this callback will release a ump object only if user requested
423 * ump export otherwise just return.
425 if (private_cb && private_cb->release_buffer)
426 private_cb->release_buffer(exynos_gem_obj->priv_handle);
432 * do not release memory region from exporter.
434 * the region will be released by exporter
435 * once dmabuf's refcount becomes 0.
437 if (obj->import_attach)
441 exynos_drm_iommu_unmap_gem(obj);
443 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
444 exynos_drm_gem_put_pages(obj);
445 else if (exynos_gem_obj->flags & EXYNOS_BO_USERPTR)
446 exynos_drm_put_userptr(obj);
448 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
451 exynos_drm_fini_buf(obj->dev, buf);
452 exynos_gem_obj->buffer = NULL;
454 if (obj->map_list.map)
455 drm_gem_free_mmap_offset(obj);
457 /* release file pointer to gem object. */
458 drm_gem_object_release(obj);
460 kfree(exynos_gem_obj);
461 exynos_gem_obj = NULL;
464 struct exynos_drm_gem_obj *exynos_drm_gem_get_obj(struct drm_device *dev,
465 unsigned int gem_handle,
466 struct drm_file *file_priv)
468 struct exynos_drm_gem_obj *exynos_gem_obj;
469 struct drm_gem_object *obj;
471 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
473 DRM_ERROR("failed to lookup gem object.\n");
474 return ERR_PTR(-EINVAL);
477 exynos_gem_obj = to_exynos_gem_obj(obj);
479 drm_gem_object_unreference_unlocked(obj);
481 return exynos_gem_obj;
484 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
485 unsigned int gem_handle,
486 struct drm_file *file_priv)
488 struct exynos_drm_gem_obj *exynos_gem_obj;
489 struct drm_gem_object *obj;
491 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
493 DRM_ERROR("failed to lookup gem object.\n");
497 exynos_gem_obj = to_exynos_gem_obj(obj);
499 drm_gem_object_unreference_unlocked(obj);
501 return exynos_gem_obj->buffer->size;
505 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
508 struct exynos_drm_gem_obj *exynos_gem_obj;
509 struct drm_gem_object *obj;
512 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
513 if (!exynos_gem_obj) {
514 DRM_ERROR("failed to allocate exynos gem object\n");
518 exynos_gem_obj->size = size;
519 obj = &exynos_gem_obj->base;
521 ret = drm_gem_object_init(dev, obj, size);
523 DRM_ERROR("failed to initialize gem object\n");
524 kfree(exynos_gem_obj);
528 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
530 return exynos_gem_obj;
533 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
537 struct exynos_drm_gem_obj *exynos_gem_obj;
538 struct exynos_drm_private *private = dev->dev_private;
539 struct exynos_drm_gem_buf *buf;
540 unsigned long packed_size = size;
544 DRM_ERROR("invalid size.\n");
545 return ERR_PTR(-EINVAL);
548 size = roundup_gem_size(size, flags);
549 DRM_DEBUG_KMS("%s\n", __FILE__);
551 ret = check_gem_flags(flags);
555 buf = exynos_drm_init_buf(dev, size);
557 return ERR_PTR(-ENOMEM);
559 exynos_gem_obj = exynos_drm_gem_init(dev, size);
560 if (!exynos_gem_obj) {
565 exynos_gem_obj->packed_size = packed_size;
566 exynos_gem_obj->buffer = buf;
568 /* set memory type and cache attribute from user side. */
569 exynos_gem_obj->flags = flags;
572 * allocate all pages as desired size if user wants to allocate
573 * physically non-continuous memory.
575 if (flags & EXYNOS_BO_NONCONTIG) {
576 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
578 drm_gem_object_release(&exynos_gem_obj->base);
582 ret = exynos_drm_alloc_buf(dev, buf, flags);
584 drm_gem_object_release(&exynos_gem_obj->base);
590 exynos_gem_obj->vmm = private->vmm;
592 buf->dev_addr = exynos_drm_iommu_map_gem(dev,
593 &exynos_gem_obj->base);
594 if (!buf->dev_addr) {
595 DRM_ERROR("failed to map gem with iommu table.\n");
598 if (flags & EXYNOS_BO_NONCONTIG)
599 exynos_drm_gem_put_pages(&exynos_gem_obj->base);
601 exynos_drm_free_buf(dev, flags, buf);
603 drm_gem_object_release(&exynos_gem_obj->base);
608 buf->dma_addr = buf->dev_addr;
610 buf->dma_addr = buf->paddr;
612 DRM_DEBUG_KMS("dma_addr = 0x%x\n", buf->dma_addr);
614 return exynos_gem_obj;
617 exynos_drm_fini_buf(dev, buf);
621 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
622 struct drm_file *file_priv)
624 struct drm_exynos_gem_create *args = data;
625 struct exynos_drm_gem_obj *exynos_gem_obj;
628 DRM_DEBUG_KMS("%s\n", __FILE__);
630 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
631 if (IS_ERR(exynos_gem_obj))
632 return PTR_ERR(exynos_gem_obj);
634 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
637 exynos_drm_gem_destroy(exynos_gem_obj);
644 void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
645 unsigned int gem_handle,
646 struct drm_file *filp,
647 unsigned int *gem_obj)
649 struct exynos_drm_gem_obj *exynos_gem_obj;
650 struct exynos_drm_gem_buf *buf;
651 struct drm_gem_object *obj;
653 obj = drm_gem_object_lookup(dev, filp, gem_handle);
655 DRM_ERROR("failed to lookup gem object.\n");
656 return ERR_PTR(-EINVAL);
659 exynos_gem_obj = to_exynos_gem_obj(obj);
660 buf = exynos_gem_obj->buffer;
662 *gem_obj = (unsigned int)obj;
664 return &buf->dma_addr;
667 void exynos_drm_gem_put_dma_addr(struct drm_device *dev, void *gem_obj)
669 struct exynos_drm_gem_obj *exynos_gem_obj;
670 struct drm_gem_object *obj;
675 /* use gem handle instead of object. TODO */
679 exynos_gem_obj = to_exynos_gem_obj(obj);
682 * unreference this gem object because this had already been
683 * referenced at exynos_drm_gem_get_dma_addr().
685 drm_gem_object_unreference_unlocked(obj);
688 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
689 struct drm_file *file_priv)
691 struct drm_exynos_gem_map_off *args = data;
693 DRM_DEBUG_KMS("%s\n", __FILE__);
695 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
696 args->handle, (unsigned long)args->offset);
698 if (!(dev->driver->driver_features & DRIVER_GEM)) {
699 DRM_ERROR("does not support GEM.\n");
703 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
707 static int exynos_drm_gem_mmap_buffer(struct file *filp,
708 struct vm_area_struct *vma)
710 struct drm_gem_object *obj = filp->private_data;
711 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
712 struct exynos_drm_gem_buf *buffer;
713 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
716 DRM_DEBUG_KMS("%s\n", __FILE__);
718 vma->vm_flags |= (VM_IO | VM_RESERVED);
720 update_vm_cache_attr(exynos_gem_obj, vma);
724 vm_size = usize = vma->vm_end - vma->vm_start;
727 * a buffer contains information to physically continuous memory
728 * allocated by user request or at framebuffer creation.
730 buffer = exynos_gem_obj->buffer;
732 /* check if user-requested size is valid. */
733 if (vm_size > buffer->size)
736 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
742 vma->vm_flags |= VM_MIXEDMAP;
745 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
747 DRM_ERROR("failed to remap user space.\n");
756 * get page frame number to physical memory to be mapped
759 pfn = ((unsigned long)exynos_gem_obj->buffer->paddr) >>
762 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
764 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
765 vma->vm_page_prot)) {
766 DRM_ERROR("failed to remap pfn range.\n");
774 static const struct file_operations exynos_drm_gem_fops = {
775 .mmap = exynos_drm_gem_mmap_buffer,
778 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
779 struct drm_file *file_priv)
781 struct drm_exynos_gem_mmap *args = data;
782 struct drm_gem_object *obj;
785 DRM_DEBUG_KMS("%s\n", __FILE__);
787 if (!(dev->driver->driver_features & DRIVER_GEM)) {
788 DRM_ERROR("does not support GEM.\n");
792 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
794 DRM_ERROR("failed to lookup gem object.\n");
798 obj->filp->f_op = &exynos_drm_gem_fops;
799 obj->filp->private_data = obj;
801 down_write(¤t->mm->mmap_sem);
802 addr = do_mmap(obj->filp, 0, args->size,
803 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
804 up_write(¤t->mm->mmap_sem);
806 drm_gem_object_unreference_unlocked(obj);
808 if (IS_ERR((void *)addr))
809 return PTR_ERR((void *)addr);
813 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
818 static int exynos_drm_get_userptr(struct drm_device *dev,
819 struct exynos_drm_gem_obj *obj,
820 unsigned long userptr,
823 unsigned int get_npages;
824 unsigned long npages = 0;
825 struct vm_area_struct *vma;
826 struct exynos_drm_gem_buf *buf = obj->buffer;
829 down_read(¤t->mm->mmap_sem);
830 vma = find_vma(current->mm, userptr);
832 /* the memory region mmaped with VM_PFNMAP. */
833 if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
834 unsigned long this_pfn, prev_pfn, pa;
835 unsigned long start, end, offset;
836 struct scatterlist *sgl;
840 offset = userptr & ~PAGE_MASK;
841 end = start + buf->size;
844 for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
845 ret = follow_pfn(vma, start, &this_pfn);
850 pa = this_pfn << PAGE_SHIFT;
851 buf->paddr = pa + offset;
852 } else if (this_pfn != prev_pfn + 1) {
857 sg_dma_address(sgl) = (pa + offset);
858 sg_dma_len(sgl) = PAGE_SIZE;
865 obj->vma = get_vma(vma);
871 up_read(¤t->mm->mmap_sem);
877 up_read(¤t->mm->mmap_sem);
882 up_read(¤t->mm->mmap_sem);
885 * lock the vma within userptr to avoid userspace buffer
886 * from being swapped out.
888 ret = lock_userptr_vma(buf, 1);
890 DRM_ERROR("failed to lock vma for userptr.\n");
891 lock_userptr_vma(buf, 0);
896 npages = buf->size >> PAGE_SHIFT;
898 down_read(¤t->mm->mmap_sem);
899 get_npages = get_user_pages(current, current->mm, userptr,
900 npages, write, 1, buf->pages, NULL);
901 up_read(¤t->mm->mmap_sem);
902 if (get_npages != npages)
903 DRM_ERROR("failed to get user_pages.\n");
905 buf->userptr = userptr;
911 int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
912 struct drm_file *file_priv)
914 struct exynos_drm_private *priv = dev->dev_private;
915 struct exynos_drm_gem_obj *exynos_gem_obj;
916 struct drm_exynos_gem_userptr *args = data;
917 struct exynos_drm_gem_buf *buf;
918 struct scatterlist *sgl;
919 unsigned long size, userptr, packed_size;
923 DRM_DEBUG_KMS("%s\n", __FILE__);
926 DRM_ERROR("invalid size.\n");
930 ret = check_gem_flags(args->flags);
934 packed_size = args->size;
936 size = roundup_gem_size(args->size, EXYNOS_BO_USERPTR);
938 userptr = args->userptr;
940 buf = exynos_drm_init_buf(dev, size);
944 exynos_gem_obj = exynos_drm_gem_init(dev, size);
945 if (!exynos_gem_obj) {
947 goto err_free_buffer;
950 exynos_gem_obj->packed_size = packed_size;
952 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
954 DRM_ERROR("failed to allocate buf->sgt.\n");
956 goto err_release_gem;
959 npages = size >> PAGE_SHIFT;
961 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
963 DRM_ERROR("failed to initailize sg table.\n");
967 buf->pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
969 DRM_ERROR("failed to allocate buf->pages\n");
974 exynos_gem_obj->buffer = buf;
976 get_npages = exynos_drm_get_userptr(dev, exynos_gem_obj, userptr, 1);
977 if (get_npages != npages) {
978 DRM_ERROR("failed to get user_pages.\n");
980 goto err_release_userptr;
983 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
986 DRM_ERROR("failed to create gem handle.\n");
987 goto err_release_userptr;
993 * if buf->pfnmap is true then update sgl of sgt with pages but
994 * if buf->pfnmap is false then it means the sgl was updated already
995 * so it doesn't need to update the sgl.
1000 /* set all pages to sg list. */
1001 while (i < npages) {
1002 sg_set_page(sgl, buf->pages[i], PAGE_SIZE, 0);
1003 sg_dma_address(sgl) = page_to_phys(buf->pages[i]);
1009 /* always use EXYNOS_BO_USERPTR as memory type for userptr. */
1010 exynos_gem_obj->flags |= EXYNOS_BO_USERPTR;
1013 exynos_gem_obj->vmm = priv->vmm;
1015 buf->dev_addr = exynos_drm_iommu_map_gem(dev,
1016 &exynos_gem_obj->base);
1017 if (!buf->dev_addr) {
1018 DRM_ERROR("failed to map gem with iommu table.\n");
1021 exynos_drm_free_buf(dev, exynos_gem_obj->flags, buf);
1023 drm_gem_object_release(&exynos_gem_obj->base);
1025 goto err_release_handle;
1028 buf->dma_addr = buf->dev_addr;
1030 buf->dma_addr = buf->paddr;
1035 drm_gem_handle_delete(file_priv, args->handle);
1036 err_release_userptr:
1038 while (get_npages >= 0)
1039 put_page(buf->pages[get_npages--]);
1043 sg_free_table(buf->sgt);
1048 drm_gem_object_release(&exynos_gem_obj->base);
1049 kfree(exynos_gem_obj);
1050 exynos_gem_obj = NULL;
1052 exynos_drm_free_buf(dev, 0, buf);
1056 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
1057 struct drm_file *file_priv)
1058 { struct exynos_drm_gem_obj *exynos_gem_obj;
1059 struct drm_exynos_gem_info *args = data;
1060 struct drm_gem_object *obj;
1062 mutex_lock(&dev->struct_mutex);
1064 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1066 DRM_ERROR("failed to lookup gem object.\n");
1067 mutex_unlock(&dev->struct_mutex);
1071 exynos_gem_obj = to_exynos_gem_obj(obj);
1073 args->flags = exynos_gem_obj->flags;
1074 args->size = exynos_gem_obj->size;
1076 drm_gem_object_unreference(obj);
1077 mutex_unlock(&dev->struct_mutex);
1082 int exynos_drm_gem_export_ump_ioctl(struct drm_device *dev, void *data,
1083 struct drm_file *file)
1085 struct exynos_drm_gem_obj *exynos_gem_obj;
1086 struct drm_gem_object *obj;
1087 struct drm_exynos_gem_ump *ump = data;
1090 DRM_DEBUG_KMS("%s\n", __FILE__);
1092 mutex_lock(&dev->struct_mutex);
1094 obj = drm_gem_object_lookup(dev, file, ump->gem_handle);
1096 DRM_ERROR("failed to lookup gem object.\n");
1097 mutex_unlock(&dev->struct_mutex);
1101 exynos_gem_obj = to_exynos_gem_obj(obj);
1103 /* register gem buffer to private buffer. */
1104 ret = register_buf_to_priv_mgr(exynos_gem_obj,
1105 (unsigned int *)&exynos_gem_obj->priv_handle,
1106 (unsigned int *)&exynos_gem_obj->priv_id);
1108 goto err_unreference_gem;
1110 ump->secure_id = exynos_gem_obj->priv_id;
1111 drm_gem_object_unreference(obj);
1113 mutex_unlock(&dev->struct_mutex);
1115 DRM_DEBUG_KMS("got secure id = %d\n", ump->secure_id);
1119 err_unreference_gem:
1120 drm_gem_object_unreference(obj);
1121 mutex_unlock(&dev->struct_mutex);
1126 static int exynos_gem_l1_cache_ops(struct drm_device *drm_dev,
1127 struct drm_exynos_gem_cache_op *op) {
1128 if (op->flags & EXYNOS_DRM_CACHE_FSH_ALL) {
1130 * cortex-A9 core has individual l1 cache so flush l1 caches
1131 * for all cores but other cores should be considered later.
1134 if (op->flags & EXYNOS_DRM_ALL_CORES)
1135 flush_all_cpu_caches();
1137 __cpuc_flush_user_all();
1139 } else if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE) {
1140 struct vm_area_struct *vma;
1142 down_read(¤t->mm->mmap_sem);
1143 vma = find_vma(current->mm, op->usr_addr);
1144 up_read(¤t->mm->mmap_sem);
1147 DRM_ERROR("failed to get vma.\n");
1151 __cpuc_flush_user_range(op->usr_addr, op->usr_addr + op->size,
1158 static int exynos_gem_l2_cache_ops(struct drm_device *drm_dev,
1159 struct drm_file *filp,
1160 struct drm_exynos_gem_cache_op *op)
1162 if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE ||
1163 op->flags & EXYNOS_DRM_CACHE_INV_RANGE ||
1164 op->flags & EXYNOS_DRM_CACHE_CLN_RANGE) {
1165 unsigned long virt_start = op->usr_addr, pfn;
1166 phys_addr_t phy_start, phy_end;
1167 struct vm_area_struct *vma;
1170 down_read(¤t->mm->mmap_sem);
1171 vma = find_vma(current->mm, op->usr_addr);
1172 up_read(¤t->mm->mmap_sem);
1175 DRM_ERROR("failed to get vma.\n");
1180 * Range operation to l2 cache(PIPT)
1182 if (vma && (vma->vm_flags & VM_PFNMAP)) {
1183 ret = follow_pfn(vma, virt_start, &pfn);
1185 DRM_ERROR("failed to get pfn.\n");
1190 * the memory region with VM_PFNMAP is contiguous
1191 * physically so do range operagion just one time.
1193 phy_start = pfn << PAGE_SHIFT;
1194 phy_end = phy_start + op->size;
1196 if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE)
1197 outer_flush_range(phy_start, phy_end);
1198 else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE)
1199 outer_inv_range(phy_start, phy_end);
1200 else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE)
1201 outer_clean_range(phy_start, phy_end);
1205 struct exynos_drm_gem_obj *exynos_obj;
1206 struct exynos_drm_gem_buf *buf;
1207 struct drm_gem_object *obj;
1208 struct scatterlist *sgl;
1209 unsigned int npages, i = 0;
1211 mutex_lock(&drm_dev->struct_mutex);
1213 obj = drm_gem_object_lookup(drm_dev, filp,
1216 DRM_ERROR("failed to lookup gem object.\n");
1217 mutex_unlock(&drm_dev->struct_mutex);
1221 exynos_obj = to_exynos_gem_obj(obj);
1222 buf = exynos_obj->buffer;
1223 npages = buf->size >> PAGE_SHIFT;
1224 sgl = buf->sgt->sgl;
1226 drm_gem_object_unreference(obj);
1227 mutex_unlock(&drm_dev->struct_mutex);
1230 * in this case, the memory region is non-contiguous
1231 * physically so do range operation to all the pages.
1233 while (i < npages) {
1234 phy_start = sg_dma_address(sgl);
1235 phy_end = phy_start + buf->page_size;
1237 if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE)
1238 outer_flush_range(phy_start, phy_end);
1239 else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE)
1240 outer_inv_range(phy_start, phy_end);
1241 else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE)
1242 outer_clean_range(phy_start, phy_end);
1252 if (op->flags & EXYNOS_DRM_CACHE_FSH_ALL)
1254 else if (op->flags & EXYNOS_DRM_CACHE_INV_ALL)
1256 else if (op->flags & EXYNOS_DRM_CACHE_CLN_ALL)
1259 DRM_ERROR("invalid l2 cache operation.\n");
1267 int exynos_drm_gem_cache_op_ioctl(struct drm_device *drm_dev, void *data,
1268 struct drm_file *file_priv)
1270 struct drm_exynos_gem_cache_op *op = data;
1273 DRM_DEBUG_KMS("%s\n", __FILE__);
1275 ret = check_cache_flags(op->flags);
1280 * do cache operation for all cache range if op->size is bigger
1281 * than SZ_1M because cache range operation with bit size has
1284 if (op->size >= SZ_1M) {
1285 if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE) {
1286 if (op->flags & EXYNOS_DRM_L1_CACHE)
1287 __cpuc_flush_user_all();
1289 if (op->flags & EXYNOS_DRM_L2_CACHE)
1293 } else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE) {
1294 if (op->flags & EXYNOS_DRM_L2_CACHE)
1298 } else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE) {
1299 if (op->flags & EXYNOS_DRM_L2_CACHE)
1306 if (op->flags & EXYNOS_DRM_L1_CACHE ||
1307 op->flags & EXYNOS_DRM_ALL_CACHES) {
1308 ret = exynos_gem_l1_cache_ops(drm_dev, op);
1313 if (op->flags & EXYNOS_DRM_L2_CACHE ||
1314 op->flags & EXYNOS_DRM_ALL_CACHES)
1315 ret = exynos_gem_l2_cache_ops(drm_dev, file_priv, op);
1320 /* temporary functions. */
1321 #ifndef CONFIG_SLP_DMABUF
1322 int exynos_drm_gem_get_phy_ioctl(struct drm_device *drm_dev, void *data,
1323 struct drm_file *file_priv)
1325 struct drm_exynos_gem_get_phy *get_phy = data;
1326 struct exynos_drm_gem_obj *exynos_gem_obj;
1327 struct drm_gem_object *obj;
1329 DRM_DEBUG_KMS("%s\n", __FILE__);
1331 mutex_lock(&drm_dev->struct_mutex);
1333 obj = drm_gem_object_lookup(drm_dev, file_priv, get_phy->gem_handle);
1335 DRM_ERROR("failed to lookup gem object.\n");
1336 mutex_unlock(&drm_dev->struct_mutex);
1340 exynos_gem_obj = to_exynos_gem_obj(obj);
1343 * we can get physical address only for EXYNOS_DRM_GEM_PC memory type.
1345 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
1346 DRM_DEBUG_KMS("not physically continuous memory type.\n");
1347 drm_gem_object_unreference(obj);
1348 mutex_unlock(&drm_dev->struct_mutex);
1352 get_phy->phy_addr = exynos_gem_obj->buffer->paddr;
1353 get_phy->size = exynos_gem_obj->buffer->size;
1355 drm_gem_object_unreference(obj);
1356 mutex_unlock(&drm_dev->struct_mutex);
1362 int exynos_drm_gem_phy_imp_ioctl(struct drm_device *drm_dev, void *data,
1363 struct drm_file *file_priv)
1365 struct drm_exynos_gem_phy_imp *args = data;
1366 struct exynos_drm_gem_obj *exynos_gem_obj;
1367 struct exynos_drm_private *private = drm_dev->dev_private;
1368 struct exynos_drm_gem_buf *buffer;
1369 unsigned long size, packed_size;
1370 unsigned int flags = EXYNOS_BO_CONTIG;
1371 unsigned int npages, i = 0;
1372 struct scatterlist *sgl;
1373 dma_addr_t start_addr;
1376 DRM_DEBUG_KMS("%s\n", __FILE__);
1378 packed_size = args->size;
1379 size = roundup(args->size, PAGE_SIZE);
1381 exynos_gem_obj = exynos_drm_gem_init(drm_dev, size);
1382 if (!exynos_gem_obj)
1385 buffer = exynos_drm_init_buf(drm_dev, size);
1387 DRM_DEBUG_KMS("failed to allocate buffer\n");
1389 goto err_release_gem_obj;
1392 exynos_gem_obj->packed_size = packed_size;
1393 buffer->paddr = (dma_addr_t)args->phy_addr;
1394 buffer->size = size;
1397 * if shared is true, this bufer wouldn't be released.
1398 * this buffer was allocated by other so don't release it.
1400 buffer->shared = true;
1402 exynos_gem_obj->buffer = buffer;
1404 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
1409 DRM_DEBUG_KMS("got gem handle = 0x%x\n", args->gem_handle);
1411 npages = buffer->size >> PAGE_SHIFT;
1412 buffer->page_size = PAGE_SIZE;
1414 buffer->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1416 DRM_ERROR("failed to allocate sg table.\n");
1418 goto err_release_handle;
1421 ret = sg_alloc_table(buffer->sgt, npages, GFP_KERNEL);
1423 DRM_ERROR("failed to initialize sg table.\n");
1427 buffer->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
1428 if (!buffer->pages) {
1429 DRM_ERROR("failed to allocate pages.\n");
1431 goto err_sg_free_table;
1434 sgl = buffer->sgt->sgl;
1435 start_addr = buffer->paddr;
1437 while (i < npages) {
1438 buffer->pages[i] = phys_to_page(start_addr);
1439 sg_set_page(sgl, buffer->pages[i], buffer->page_size, 0);
1440 sg_dma_address(sgl) = start_addr;
1441 start_addr += buffer->page_size;
1447 exynos_gem_obj->vmm = private->vmm;
1449 buffer->dev_addr = exynos_drm_iommu_map_gem(drm_dev,
1450 &exynos_gem_obj->base);
1451 if (!buffer->dev_addr) {
1452 DRM_ERROR("failed to map gem with iommu table.\n");
1455 exynos_drm_free_buf(drm_dev, flags, buffer);
1457 drm_gem_object_release(&exynos_gem_obj->base);
1459 goto err_free_pages;
1462 buffer->dma_addr = buffer->dev_addr;
1464 buffer->dma_addr = buffer->paddr;
1466 DRM_DEBUG_KMS("dma_addr = 0x%x\n", buffer->dma_addr);
1471 kfree(buffer->pages);
1472 buffer->pages = NULL;
1474 sg_free_table(buffer->sgt);
1479 drm_gem_handle_delete(file_priv, args->gem_handle);
1481 exynos_drm_fini_buf(drm_dev, buffer);
1482 err_release_gem_obj:
1483 drm_gem_object_release(&exynos_gem_obj->base);
1484 kfree(exynos_gem_obj);
1488 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
1490 DRM_DEBUG_KMS("%s\n", __FILE__);
1495 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
1497 struct exynos_drm_gem_obj *exynos_gem_obj;
1498 struct exynos_drm_gem_buf *buf;
1500 DRM_DEBUG_KMS("%s\n", __FILE__);
1502 exynos_gem_obj = to_exynos_gem_obj(obj);
1503 buf = exynos_gem_obj->buffer;
1505 if (obj->import_attach)
1506 drm_prime_gem_destroy(obj, buf->sgt);
1508 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
1511 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
1512 struct drm_device *dev,
1513 struct drm_mode_create_dumb *args)
1515 struct exynos_drm_gem_obj *exynos_gem_obj;
1518 DRM_DEBUG_KMS("%s\n", __FILE__);
1521 * alocate memory to be used for framebuffer.
1522 * - this callback would be called by user application
1523 * with DRM_IOCTL_MODE_CREATE_DUMB command.
1526 args->pitch = args->width * args->bpp >> 3;
1527 args->size = args->pitch * args->height;
1529 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
1530 if (IS_ERR(exynos_gem_obj))
1531 return PTR_ERR(exynos_gem_obj);
1533 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
1536 exynos_drm_gem_destroy(exynos_gem_obj);
1543 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
1544 struct drm_device *dev, uint32_t handle,
1547 struct drm_gem_object *obj;
1550 DRM_DEBUG_KMS("%s\n", __FILE__);
1552 mutex_lock(&dev->struct_mutex);
1555 * get offset of memory allocated for drm framebuffer.
1556 * - this callback would be called by user application
1557 * with DRM_IOCTL_MODE_MAP_DUMB command.
1560 obj = drm_gem_object_lookup(dev, file_priv, handle);
1562 DRM_ERROR("failed to lookup gem object.\n");
1567 if (!obj->map_list.map) {
1568 ret = drm_gem_create_mmap_offset(obj);
1573 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
1574 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
1577 drm_gem_object_unreference(obj);
1579 mutex_unlock(&dev->struct_mutex);
1583 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
1584 struct drm_device *dev,
1585 unsigned int handle)
1589 DRM_DEBUG_KMS("%s\n", __FILE__);
1592 * obj->refcount and obj->handle_count are decreased and
1593 * if both them are 0 then exynos_drm_gem_free_object()
1594 * would be called by callback to release resources.
1596 ret = drm_gem_handle_delete(file_priv, handle);
1598 DRM_ERROR("failed to delete drm_gem_handle.\n");
1605 void exynos_drm_gem_close_object(struct drm_gem_object *obj,
1606 struct drm_file *file)
1608 DRM_DEBUG_KMS("%s\n", __FILE__);
1613 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1615 struct drm_gem_object *obj = vma->vm_private_data;
1616 struct drm_device *dev = obj->dev;
1617 unsigned long f_vaddr;
1618 pgoff_t page_offset;
1621 page_offset = ((unsigned long)vmf->virtual_address -
1622 vma->vm_start) >> PAGE_SHIFT;
1623 f_vaddr = (unsigned long)vmf->virtual_address;
1625 mutex_lock(&dev->struct_mutex);
1627 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
1629 DRM_ERROR("failed to map pages.\n");
1631 mutex_unlock(&dev->struct_mutex);
1633 return convert_to_vm_err_msg(ret);
1636 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1638 struct exynos_drm_gem_obj *exynos_gem_obj;
1639 struct drm_gem_object *obj;
1642 DRM_DEBUG_KMS("%s\n", __FILE__);
1644 /* set vm_area_struct. */
1645 ret = drm_gem_mmap(filp, vma);
1647 DRM_ERROR("failed to mmap.\n");
1651 obj = vma->vm_private_data;
1652 exynos_gem_obj = to_exynos_gem_obj(obj);
1654 ret = check_gem_flags(exynos_gem_obj->flags);
1656 drm_gem_vm_close(vma);
1657 drm_gem_free_mmap_offset(obj);
1661 vma->vm_flags &= ~VM_PFNMAP;
1662 vma->vm_flags |= VM_MIXEDMAP;
1664 update_vm_cache_attr(exynos_gem_obj, vma);