3 * Copyright (c) 2014 Spreadtrum Communications, Inc.
5 * This program is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
19 #include <linux/shmem_fs.h>
20 #include <drm/sprd_drm.h>
21 #include <linux/sprd_iommu.h>
23 #include "video/ion_sprd.h"
24 #include "sprd_drm_drv.h"
25 #include "sprd_drm_gem.h"
26 #include "sprd_drm_buf.h"
28 static unsigned int convert_to_vm_err_msg(int msg)
36 out_msg = VM_FAULT_NOPAGE;
40 out_msg = VM_FAULT_OOM;
44 out_msg = VM_FAULT_SIGBUS;
51 static int check_gem_flags(unsigned int flags)
53 if (flags & ~(SPRD_BO_MASK | SPRD_BO_DEV_MASK))
56 #ifdef CONFIG_SPRD_IOMMU
57 if (IS_NONCONTIG_BUFFER(flags)) {
58 if (IS_DEV_OVERLAY_BUFFER(flags))
61 if (IS_DEV_SYSTEM_BUFFER(flags))
68 DRM_ERROR("invalid flags[0x%x]\n", flags);
72 static void update_vm_cache_attr(struct sprd_drm_gem_obj *obj,
73 struct vm_area_struct *vma)
75 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
77 /* non-cachable as default. */
78 if (obj->flags & SPRD_BO_CACHABLE)
79 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
80 else if (obj->flags & SPRD_BO_WC)
82 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
85 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
88 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
90 if (!IS_NONCONTIG_BUFFER(flags)) {
91 #ifndef CONFIG_CMA_ALIGNMENT
93 return roundup(size, SECTION_SIZE);
95 /* ToDo: need to sync with additional align size */
97 return roundup(size, SZ_64K);
102 return roundup(size, PAGE_SIZE);
105 struct page **sprd_gem_get_pages(struct drm_gem_object *obj,
109 struct address_space *mapping;
110 struct page *p, **pages;
113 /* This is the shared memory object that backs the GEM resource */
114 inode = obj->filp->f_path.dentry->d_inode;
115 mapping = inode->i_mapping;
117 npages = obj->size >> PAGE_SHIFT;
119 pages = drm_malloc_ab(npages, sizeof(struct page *));
121 return ERR_PTR(-ENOMEM);
123 gfpmask |= mapping_gfp_mask(mapping);
125 for (i = 0; i < npages; i++) {
126 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
136 page_cache_release(pages[i]);
138 drm_free_large(pages);
139 return ERR_PTR(PTR_ERR(p));
142 static void sprd_gem_put_pages(struct drm_gem_object *obj,
144 bool dirty, bool accessed)
148 npages = obj->size >> PAGE_SHIFT;
150 for (i = 0; i < npages; i++) {
152 set_page_dirty(pages[i]);
155 mark_page_accessed(pages[i]);
157 /* Undo the reference we took when populating the table */
158 page_cache_release(pages[i]);
161 drm_free_large(pages);
164 static int sprd_drm_gem_map_pages(struct drm_gem_object *obj,
165 struct vm_area_struct *vma,
166 unsigned long f_vaddr,
169 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
170 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
173 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
177 pfn = page_to_pfn(buf->pages[page_offset++]);
179 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
181 return vm_insert_mixed(vma, f_vaddr, pfn);
184 static int sprd_drm_gem_get_pages(struct drm_gem_object *obj)
186 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
187 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
188 struct scatterlist *sgl;
190 unsigned int npages, i = 0;
194 DRM_DEBUG_KMS("already allocated.\n");
198 pages = sprd_gem_get_pages(obj, GFP_KERNEL);
200 DRM_ERROR("failed to get pages.\n");
201 return PTR_ERR(pages);
204 npages = obj->size >> PAGE_SHIFT;
205 buf->page_size = PAGE_SIZE;
207 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
209 DRM_ERROR("failed to allocate sg table.\n");
214 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
216 DRM_ERROR("failed to initialize sg table.\n");
223 /* set all pages to sg list. */
225 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
226 sg_dma_address(sgl) = page_to_phys(pages[i]);
231 /* add some codes for UNCACHED type here. TODO */
239 sprd_gem_put_pages(obj, pages, true, false);
244 static void sprd_drm_gem_put_pages(struct drm_gem_object *obj)
246 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
247 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
250 * if buffer typs is SPRD_BO_NONCONTIG then release all pages
251 * allocated at gem fault handler.
253 sg_free_table(buf->sgt);
257 sprd_gem_put_pages(obj, buf->pages, true, false);
260 /* add some codes for UNCACHED type here. TODO */
263 static int sprd_drm_gem_handle_create(struct drm_gem_object *obj,
264 struct drm_file *file_priv,
265 unsigned int *handle)
270 * allocate a id of idr table where the obj is registered
271 * and handle has the id what user can see.
273 ret = drm_gem_handle_create(file_priv, obj, handle);
277 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
279 /* drop reference from allocate - handle holds it now. */
280 drm_gem_object_unreference_unlocked(obj);
285 void sprd_drm_gem_destroy(struct sprd_drm_gem_obj *sprd_gem_obj)
287 struct drm_gem_object *obj;
288 struct sprd_drm_gem_buf *buf;
290 obj = &sprd_gem_obj->base;
291 buf = sprd_gem_obj->buffer;
296 DRM_DEBUG("%s:o[0x%x]a[0x%x]\n", "gf",
297 (int)obj, (int)sprd_gem_obj->buffer->dma_addr);
299 sprd_drm_free_buf(obj->dev, sprd_gem_obj->flags, buf);
301 sprd_drm_fini_buf(obj->dev, buf);
302 sprd_gem_obj->buffer = NULL;
304 if (obj->map_list.map)
305 drm_gem_free_mmap_offset(obj);
307 /* release file pointer to gem object. */
308 drm_gem_object_release(obj);
314 struct sprd_drm_gem_obj *sprd_drm_gem_init(struct drm_device *dev,
317 struct sprd_drm_gem_obj *sprd_gem_obj;
318 struct drm_gem_object *obj;
321 sprd_gem_obj = kzalloc(sizeof(*sprd_gem_obj), GFP_KERNEL);
323 DRM_ERROR("failed to allocate sprd gem object\n");
327 sprd_gem_obj->size = size;
328 obj = &sprd_gem_obj->base;
330 ret = drm_gem_object_init(dev, obj, size);
332 DRM_ERROR("failed to initialize gem object\n");
337 sprd_gem_obj->pid = task_pid_nr(current);
338 sprd_gem_obj->tgid = task_tgid_nr(current);
340 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
345 struct sprd_drm_gem_obj *sprd_drm_gem_create(struct drm_device *dev,
346 struct sprd_drm_gem_index *args)
348 struct sprd_drm_gem_obj *sprd_gem_obj;
349 struct sprd_drm_gem_buf *buf;
350 int ret, i=0, j, tsize = 0;
352 ret = check_gem_flags(args->flags);
356 /* ToDo: need to check align */
357 for (i = 0; i < args->bufcount; i++)
358 tsize += args->idx_size[i];
361 DRM_ERROR("invalid size.\n");
362 return ERR_PTR(-EINVAL);
365 tsize = roundup_gem_size(tsize, args->flags);
367 buf = sprd_drm_init_buf(dev, tsize);
369 return ERR_PTR(-ENOMEM);
371 sprd_gem_obj = sprd_drm_gem_init(dev, tsize);
377 sprd_gem_obj->buffer = buf;
379 /* set memory type and cache attribute from user side. */
380 sprd_gem_obj->flags = args->flags;
382 buf->obj = &sprd_gem_obj->base;
384 ret = sprd_drm_alloc_buf(dev, buf, args->flags);
388 memset(buf->idx_addr, 0x00, sizeof(buf->idx_addr));
389 buf->idx_addr[0] = buf->dma_addr;
390 buf->bufcount = args->bufcount;
392 for (i = 0; i < buf->bufcount; i++) {
394 if (buf->bufcount > j)
395 buf->idx_addr[j] = buf->idx_addr[i] + args->idx_size[i];
398 sprd_gem_obj->lockpid=0;
399 INIT_LIST_HEAD(&sprd_gem_obj->wait_list);
401 for (i = 0; i < DRM_SPRD_HANDLE_WAIT_ENTRIES; i++) {
402 INIT_LIST_HEAD((struct list_head *) &sprd_gem_obj->wait_entries[i]);
403 sprd_gem_obj->wait_entries[i].pid = 0;
404 init_waitqueue_head(&sprd_gem_obj->wait_entries[i].process_wait_q);
410 drm_gem_object_release(&sprd_gem_obj->base);
413 sprd_drm_fini_buf(dev, buf);
417 int sprd_drm_gem_create_ioctl(struct drm_device *dev, void *data,
418 struct drm_file *file_priv)
420 struct drm_sprd_gem_create *args = data;
421 struct sprd_drm_gem_obj *sprd_gem_obj;
422 struct sprd_drm_gem_index gem_idx;
423 struct timeval val_start, val_end;
424 uint64_t time_start, time_end;
427 do_gettimeofday(&val_start);
428 time_start = (uint64_t)(val_start.tv_sec * 1000000 + val_start.tv_usec);
431 gem_idx.idx_size[0] = args->size;
432 gem_idx.flags = args->flags;
434 sprd_gem_obj = sprd_drm_gem_create(dev, &gem_idx);
435 if (IS_ERR(sprd_gem_obj)) {
436 DRM_ERROR("failed to sprd_drm_gem_create:s[%d]f[0x%x]\n",
437 (int)args->size, args->flags);
438 return PTR_ERR(sprd_gem_obj);
441 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
444 DRM_ERROR("failed to sprd_drm_gem_handle_create:s[%d]f[0x%x]\n",
445 (int)args->size, args->flags);
446 sprd_drm_gem_destroy(sprd_gem_obj);
450 do_gettimeofday(&val_end);
451 time_end = (uint64_t)(val_end.tv_sec * 1000000 + val_end.tv_usec);
453 DRM_DEBUG("%s:h[%d]s[%d]f[0x%x]o[0x%x]a[0x%x][%lld us]\n",
454 "ga",args->handle, (int)args->size, args->flags,
455 (int)&sprd_gem_obj->base,
456 (int)sprd_gem_obj->buffer->dma_addr, time_end - time_start);
461 int sprd_drm_gem_create_index_ioctl(struct drm_device *dev, void *data,
462 struct drm_file *file_priv)
464 struct sprd_drm_gem_index *args = data;
465 struct sprd_drm_gem_obj *sprd_gem_obj;
468 if (args->flags & SPRD_BO_NONCONTIG) {
469 DRM_ERROR("does not support non-contig memory\n");
473 sprd_gem_obj = sprd_drm_gem_create(dev, args);
474 if (IS_ERR(sprd_gem_obj))
475 return PTR_ERR(sprd_gem_obj);
477 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
480 sprd_drm_gem_destroy(sprd_gem_obj);
484 DRM_INFO("%s:h[%d]cnt[%d]sz[%d %d %d]f[0x%x]o[0x%x]a[0x%x]\n",
485 __func__,args->handle, args->bufcount,
486 (int)args->idx_size[0], (int)args->idx_size[1], (int)args->idx_size[2],
487 args->flags, (int)&sprd_gem_obj->base,
488 (int)sprd_gem_obj->buffer->dma_addr);
493 struct dma_buf *sprd_prime_export(struct drm_device *dev,
494 struct drm_gem_object *obj, int flags)
496 struct sprd_drm_private *private = dev->dev_private;
497 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
498 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
499 struct dma_buf *dmabuf;
501 dmabuf = ion_share_dma_buf(private->sprd_drm_ion_client,
504 pr_err("%s: dmabuf is error and dmabuf is %p!\n",
510 struct drm_gem_object *sprd_prime_import(struct drm_device *dev,
511 struct dma_buf *dma_buf)
513 struct ion_handle *ion_handle;
514 struct sprd_drm_gem_obj *sprd_gem_obj;
516 struct sprd_drm_gem_buf *buf = NULL;
517 unsigned int i = 0, nr_pages = 0, heap_id;
519 struct sprd_drm_private *private;
520 struct scatterlist *sg = NULL;
521 struct drm_gem_object *obj;
522 unsigned long sgt_size;
524 private = dev->dev_private;
525 ion_handle = get_ion_handle_from_dmabuf(private->sprd_drm_ion_client, dma_buf);
526 if (IS_ERR_OR_NULL(ion_handle)) {
527 DRM_ERROR("Unable to import dmabuf\n");
528 return ERR_PTR(-EINVAL);
531 ion_handle_get_size(private->sprd_drm_ion_client,
532 ion_handle, &size, &heap_id);
535 "cannot create GEM object from zero size ION buffer\n");
540 obj = ion_get_gem(ion_handle);
542 sprd_gem_obj = to_sprd_gem_obj(obj);
543 if (sprd_gem_obj->buffer->ion_handle != ion_handle) {
544 DRM_ERROR("Unable get GEM object from ion\n");
549 drm_gem_object_reference(obj);
550 ion_free(private->sprd_drm_ion_client, ion_handle);
555 buf = sprd_drm_init_buf(dev, size);
557 DRM_ERROR("Unable to allocate the GEM buffer\n");
562 sprd_gem_obj = sprd_drm_gem_init(dev, size);
564 DRM_ERROR("Unable to initialize GEM object\n");
568 sprd_gem_obj->buffer = buf;
569 obj = &sprd_gem_obj->base;
571 ret = ion_is_phys(private->sprd_drm_ion_client, ion_handle);
573 sprd_gem_obj->flags = SPRD_BO_NONCONTIG;
575 sprd_gem_obj->flags = SPRD_BO_CONTIG;
577 DRM_ERROR("Unable to get flag, Invalid handle\n");
581 /* ion_handle is validated in ion_is_phys, no need to check again */
582 ret = ion_is_cached(private->sprd_drm_ion_client, ion_handle);
584 sprd_gem_obj->flags |= SPRD_BO_CACHABLE;
586 if ((heap_id == ION_HEAP_ID_MASK_GSP) || (heap_id == ION_HEAP_ID_MASK_GSP_IOMMU))
587 sprd_gem_obj->flags |= SPRD_BO_DEV_GSP;
588 else if ((heap_id == ION_HEAP_ID_MASK_MM) || (heap_id == ION_HEAP_ID_MASK_MM_IOMMU))
589 sprd_gem_obj->flags |= SPRD_BO_DEV_MM;
590 else if (heap_id == ION_HEAP_ID_MASK_OVERLAY)
591 sprd_gem_obj->flags |= SPRD_BO_DEV_OVERLAY;
592 else if (heap_id == ION_HEAP_ID_MASK_SYSTEM)
593 sprd_gem_obj->flags |= SPRD_BO_DEV_SYSTEM;
595 DRM_ERROR("Heap id not supported\n");
600 buf->ion_handle = ion_handle;
601 buf->sgt = ion_sg_table(private->sprd_drm_ion_client, buf->ion_handle);
603 DRM_ERROR("failed to allocate sg table.\n");
608 buf->dma_addr = sg_dma_address(buf->sgt->sgl);
609 for_each_sg(buf->sgt->sgl, sg, buf->sgt->nents, i)
612 sgt_size = sizeof(struct page) * nr_pages;
613 buf->pages = kzalloc(sgt_size, GFP_KERNEL | __GFP_NOWARN);
616 order = get_order(sgt_size);
617 DRM_ERROR("%s: kzalloc failed for sg list: order:%d\n",
619 buf->pages = vzalloc(sgt_size);
621 DRM_ERROR("failed to allocate pages.\n");
627 for_each_sg(buf->sgt->sgl, sg, buf->sgt->nents, i)
628 buf->pages[i] = phys_to_page(sg_dma_address(sg));
630 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
631 (unsigned long)buf->dma_addr, buf->size);
636 buf->dma_addr = (dma_addr_t)NULL;
639 sprd_gem_obj->buffer = NULL;
640 /* release file pointer to gem object. */
641 drm_gem_object_release(obj);
645 sprd_drm_fini_buf(dev, buf);
647 ion_free(private->sprd_drm_ion_client, ion_handle);
652 void *sprd_drm_gem_get_dma_addr(struct drm_device *dev,
653 unsigned int gem_handle,
654 struct drm_file *file_priv)
656 struct sprd_drm_gem_obj *sprd_gem_obj;
657 struct drm_gem_object *obj;
658 struct ion_handle *ion_handle;
659 struct sprd_drm_gem_buf *buf;
662 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
664 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
665 return ERR_PTR(-EINVAL);
668 sprd_gem_obj = to_sprd_gem_obj(obj);
670 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
671 buf = sprd_gem_obj->buffer;
672 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
673 domain_num = IOMMU_MM;
674 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
675 domain_num = IOMMU_GSP;
677 ion_handle = buf->ion_handle;
678 if (sprd_map_iommu(ion_handle, domain_num,
679 (unsigned long *)&sprd_gem_obj->buffer->dma_addr)) {
680 DRM_ERROR("failed to map iommu:h[%d]o[0x%x]\n",
681 gem_handle, (int)obj);
682 drm_gem_object_unreference_unlocked(obj);
683 return ERR_PTR(-EINVAL);
687 DRM_DEBUG("%s:h[%d]o[0x%x]a[0x%x]\n",
688 __func__,gem_handle, (int)obj,
689 (int)sprd_gem_obj->buffer->dma_addr);
691 return &sprd_gem_obj->buffer->dma_addr;
694 void sprd_drm_gem_put_dma_addr(struct drm_device *dev,
695 unsigned int gem_handle,
696 struct drm_file *file_priv)
698 struct sprd_drm_gem_obj *sprd_gem_obj;
699 struct drm_gem_object *obj;
700 struct ion_handle *ion_handle;
701 struct sprd_drm_gem_buf *buf;
704 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
706 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
710 sprd_gem_obj = to_sprd_gem_obj(obj);
712 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
713 buf = sprd_gem_obj->buffer;
714 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
715 domain_num = IOMMU_MM;
716 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
717 domain_num = IOMMU_GSP;
719 ion_handle = buf->ion_handle;
720 if (sprd_unmap_iommu(ion_handle, domain_num))
721 DRM_ERROR("failed to unmap iommu:h[%d]o[0x%x]\n",
722 gem_handle, (int)obj);
725 drm_gem_object_unreference_unlocked(obj);
727 DRM_DEBUG("%s:h[%d]o[0x%x]\n",
728 __func__,gem_handle, (int)obj);
730 * decrease obj->refcount one more time because we has already
731 * increased it at sprd_drm_gem_get_dma_addr().
733 drm_gem_object_unreference_unlocked(obj);
736 unsigned long sprd_drm_gem_get_size(struct drm_device *dev,
737 unsigned int gem_handle,
738 struct drm_file *file_priv)
740 struct sprd_drm_gem_obj *sprd_gem_obj;
741 struct drm_gem_object *obj;
743 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
745 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
749 sprd_gem_obj = to_sprd_gem_obj(obj);
751 drm_gem_object_unreference_unlocked(obj);
753 return sprd_gem_obj->buffer->size;
756 void *sprd_drm_gem_get_obj_addr(unsigned int name, unsigned int index)
758 struct sprd_drm_gem_obj *sprd_gem_obj;
759 struct drm_gem_object *obj;
760 struct ion_handle *ion_handle;
761 struct sprd_drm_gem_buf *buf;
764 mutex_lock(&sprd_drm_dev->object_name_lock);
765 obj = idr_find(&sprd_drm_dev->object_name_idr, (int) name);
766 mutex_unlock(&sprd_drm_dev->object_name_lock);
769 DRM_ERROR("name[%d]failed to lookup gem object.\n", name);
770 return ERR_PTR(-EFAULT);
773 sprd_gem_obj = to_sprd_gem_obj(obj);
774 buf = sprd_gem_obj->buffer;
776 if (index >= buf->bufcount) {
777 DRM_ERROR("invalid index[%d],bufcount[%d]\n",
778 index, buf->bufcount);
779 return ERR_PTR(-EINVAL);
782 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
783 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
784 domain_num = IOMMU_MM;
785 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
786 domain_num = IOMMU_GSP;
788 ion_handle = buf->ion_handle;
789 if (sprd_map_iommu(ion_handle, domain_num,
790 (unsigned long *)&sprd_gem_obj->buffer->dma_addr)) {
791 DRM_ERROR("failed to map iommu\n");
792 return ERR_PTR(-EINVAL);
796 DRM_DEBUG("%s:name[%d]o[0x%x]idx[%d]a[0x%x]\n",
797 __func__, name, (int)obj, index, (int)buf->idx_addr[index]);
799 return &buf->idx_addr[index];
801 EXPORT_SYMBOL(sprd_drm_gem_get_obj_addr);
803 int sprd_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
804 struct drm_file *file_priv)
806 struct drm_sprd_gem_map_off *args = data;
808 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
809 args->handle, (unsigned long)args->offset);
811 if (!(dev->driver->driver_features & DRIVER_GEM)) {
812 DRM_ERROR("does not support GEM.\n");
816 return sprd_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
820 static int sprd_drm_gem_mmap_buffer(struct file *filp,
821 struct vm_area_struct *vma)
823 struct drm_gem_object *obj = filp->private_data;
824 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
825 struct sprd_drm_gem_buf *buffer;
826 unsigned long pfn, vm_size;
828 vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
830 update_vm_cache_attr(sprd_gem_obj, vma);
832 vm_size = vma->vm_end - vma->vm_start;
835 * a buffer contains information to physically continuous memory
836 * allocated by user request or at framebuffer creation.
838 buffer = sprd_gem_obj->buffer;
840 /* check if user-requested size is valid. */
841 if (vm_size > buffer->size)
844 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
845 unsigned long addr = vma->vm_start;
846 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
847 struct scatterlist *sg;
850 for_each_sg(buffer->sgt->sgl, sg, buffer->sgt->nents, i) {
851 struct page *page = sg_page(sg);
852 unsigned long remainder = vma->vm_end - addr;
853 unsigned long len = sg_dma_len(sg);
855 if (offset >= sg_dma_len(sg)) {
856 offset -= sg_dma_len(sg);
859 page += offset / PAGE_SIZE;
860 len = sg_dma_len(sg) - offset;
863 len = min(len, remainder);
864 remap_pfn_range(vma, addr, page_to_pfn(page), len,
867 if (addr >= vma->vm_end) {
873 * get page frame number to physical memory to be mapped
876 pfn = ((unsigned long)sprd_gem_obj->buffer->dma_addr) >>
879 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
881 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
882 vma->vm_page_prot)) {
883 DRM_ERROR("failed to remap pfn range.\n");
891 static const struct file_operations sprd_drm_gem_fops = {
892 .mmap = sprd_drm_gem_mmap_buffer,
895 int sprd_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
896 struct drm_file *file_priv)
898 struct drm_sprd_gem_mmap *args = data;
899 struct drm_gem_object *obj;
902 if (!(dev->driver->driver_features & DRIVER_GEM)) {
903 DRM_ERROR("does not support GEM.\n");
907 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
909 DRM_ERROR("failed to lookup gem object:h[%d]\n", args->handle);
913 obj->filp->f_op = &sprd_drm_gem_fops;
914 obj->filp->private_data = obj;
916 addr = vm_mmap(obj->filp, 0, args->size,
917 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
919 drm_gem_object_unreference_unlocked(obj);
921 if (IS_ERR_VALUE(addr))
926 DRM_DEBUG("%s:h[%d]s[%d]o[0x%x]mapped[0x%x]\n", __func__,
927 args->handle, (int)args->size, (int)obj, (int)args->mapped);
932 int sprd_drm_gem_mmap_iommu_ioctl(struct drm_device *dev, void *data,
933 struct drm_file *file_priv)
935 struct drm_sprd_gem_mmap *args = data;
936 struct drm_gem_object *obj;
937 struct ion_handle *ion_handle;
939 struct sprd_drm_gem_obj *sprd_gem_obj;
940 struct sprd_drm_gem_buf *buf;
943 if (!(dev->driver->driver_features & DRIVER_GEM)) {
944 DRM_ERROR("does not support GEM.\n");
948 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
950 DRM_ERROR("failed to lookup gem object.\n");
954 sprd_gem_obj = to_sprd_gem_obj(obj);
955 buf = sprd_gem_obj->buffer;
956 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
957 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
958 domain_num = IOMMU_MM;
959 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
960 domain_num = IOMMU_GSP;
962 ion_handle = buf->ion_handle;
963 sprd_map_iommu(ion_handle, domain_num, &addr);
965 DRM_ERROR("MMAP_IOMMU not applicable on CONTIG HEAP\n");
966 drm_gem_object_unreference_unlocked(obj);
974 int sprd_drm_gem_unmap_iommu_ioctl(struct drm_device *dev, void *data,
975 struct drm_file *file_priv)
977 struct drm_sprd_gem_mmap *args = data;
978 struct drm_gem_object *obj;
979 struct ion_handle *ion_handle;
980 struct sprd_drm_gem_obj *sprd_gem_obj;
981 struct sprd_drm_gem_buf *buf;
982 int ret = 0, domain_num = 0;
984 if (!(dev->driver->driver_features & DRIVER_GEM)) {
985 DRM_ERROR("does not support GEM.\n");
989 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
991 DRM_ERROR("failed to lookup gem object.\n");
995 sprd_gem_obj = to_sprd_gem_obj(obj);
996 buf = sprd_gem_obj->buffer;
997 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
998 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
999 domain_num = IOMMU_MM;
1000 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
1001 domain_num = IOMMU_GSP;
1003 ion_handle = buf->ion_handle;
1004 sprd_unmap_iommu(ion_handle, domain_num);
1006 DRM_ERROR("UNMAP_IOMMU not applicable on CONTIG HEAP\n");
1010 drm_gem_object_unreference_unlocked(obj);
1012 * decrease obj->refcount one more time because we has already
1013 * increased it at sprd_drm_gem_mmap_iommu_ioctl().
1015 drm_gem_object_unreference_unlocked(obj);
1019 int sprd_drm_gem_get_ioctl(struct drm_device *dev, void *data,
1020 struct drm_file *file_priv)
1021 { struct sprd_drm_gem_obj *sprd_gem_obj;
1022 struct drm_sprd_gem_info *args = data;
1023 struct drm_gem_object *obj;
1025 mutex_lock(&dev->struct_mutex);
1027 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1029 DRM_ERROR("failed to lookup gem object.\n");
1030 mutex_unlock(&dev->struct_mutex);
1034 sprd_gem_obj = to_sprd_gem_obj(obj);
1036 args->flags = sprd_gem_obj->flags;
1037 args->size = sprd_gem_obj->size;
1039 drm_gem_object_unreference(obj);
1040 mutex_unlock(&dev->struct_mutex);
1045 int sprd_drm_gem_init_object(struct drm_gem_object *obj)
1050 void sprd_drm_gem_free_object(struct drm_gem_object *obj)
1052 struct sprd_drm_gem_obj *sprd_gem_obj;
1053 struct sprd_drm_gem_buf *buf;
1055 sprd_gem_obj = to_sprd_gem_obj(obj);
1056 buf = sprd_gem_obj->buffer;
1058 if (obj->import_attach)
1059 drm_prime_gem_destroy(obj, buf->sgt);
1061 sprd_drm_gem_destroy(to_sprd_gem_obj(obj));
1064 int sprd_drm_gem_dumb_create(struct drm_file *file_priv,
1065 struct drm_device *dev,
1066 struct drm_mode_create_dumb *args)
1068 struct sprd_drm_gem_obj *sprd_gem_obj;
1069 struct sprd_drm_gem_index gem_idx;
1073 * alocate memory to be used for framebuffer.
1074 * - this callback would be called by user application
1075 * with DRM_IOCTL_MODE_CREATE_DUMB command.
1078 args->pitch = args->width * args->bpp >> 3;
1079 args->size = PAGE_ALIGN(args->pitch * args->height);
1081 gem_idx.bufcount= 1;
1082 gem_idx.idx_size[0] = args->size;
1083 gem_idx.flags = args->flags;
1085 sprd_gem_obj = sprd_drm_gem_create(dev, &gem_idx);
1086 if (IS_ERR(sprd_gem_obj))
1087 return PTR_ERR(sprd_gem_obj);
1089 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
1092 sprd_drm_gem_destroy(sprd_gem_obj);
1099 int sprd_drm_gem_dumb_map_offset(struct drm_file *file_priv,
1100 struct drm_device *dev, uint32_t handle,
1103 struct drm_gem_object *obj;
1106 mutex_lock(&dev->struct_mutex);
1109 * get offset of memory allocated for drm framebuffer.
1110 * - this callback would be called by user application
1111 * with DRM_IOCTL_MODE_MAP_DUMB command.
1114 obj = drm_gem_object_lookup(dev, file_priv, handle);
1116 DRM_ERROR("failed to lookup gem object.\n");
1121 if (!obj->map_list.map) {
1122 ret = drm_gem_create_mmap_offset(obj);
1127 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
1128 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
1131 drm_gem_object_unreference(obj);
1133 mutex_unlock(&dev->struct_mutex);
1137 int sprd_drm_gem_dumb_destroy(struct drm_file *file_priv,
1138 struct drm_device *dev,
1139 unsigned int handle)
1144 * obj->refcount and obj->handle_count are decreased and
1145 * if both them are 0 then sprd_drm_gem_free_object()
1146 * would be called by callback to release resources.
1148 ret = drm_gem_handle_delete(file_priv, handle);
1150 DRM_ERROR("failed to delete drm_gem_handle.\n");
1157 int sprd_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1159 struct drm_gem_object *obj = vma->vm_private_data;
1160 struct drm_device *dev = obj->dev;
1161 unsigned long f_vaddr;
1162 pgoff_t page_offset;
1165 page_offset = ((unsigned long)vmf->virtual_address -
1166 vma->vm_start) >> PAGE_SHIFT;
1167 f_vaddr = (unsigned long)vmf->virtual_address;
1169 mutex_lock(&dev->struct_mutex);
1171 ret = sprd_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
1173 DRM_ERROR("failed to map pages.\n");
1175 mutex_unlock(&dev->struct_mutex);
1177 return convert_to_vm_err_msg(ret);
1180 int sprd_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1182 struct sprd_drm_gem_obj *sprd_gem_obj;
1183 struct drm_gem_object *obj;
1186 /* set vm_area_struct. */
1187 ret = drm_gem_mmap(filp, vma);
1189 DRM_ERROR("failed to mmap.\n");
1193 obj = vma->vm_private_data;
1194 sprd_gem_obj = to_sprd_gem_obj(obj);
1196 ret = check_gem_flags(sprd_gem_obj->flags);
1198 drm_gem_vm_close(vma);
1199 drm_gem_free_mmap_offset(obj);
1203 vma->vm_flags &= ~VM_PFNMAP;
1204 vma->vm_flags |= VM_MIXEDMAP;
1206 update_vm_cache_attr(sprd_gem_obj, vma);
1211 int sprd_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1212 struct drm_file *file_priv)
1214 struct drm_sprd_gem_lock_handle *args = data;
1215 struct drm_gem_object *obj;
1216 struct sprd_drm_gem_obj *sprd_gem_obj;
1217 struct drm_sprd_gem_object_wait_list_entry *lock_item;
1221 DRM_DEBUG_DRIVER("%s line:%d\n", __func__, __LINE__);
1222 mutex_lock(&dev->struct_mutex);
1224 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1227 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1232 sprd_gem_obj = to_sprd_gem_obj(obj);
1234 if (sprd_gem_obj->lockpid) {
1235 /* if a pid already had it locked */
1236 /* create and add to wait list */
1237 for (i = 0; i < DRM_SPRD_HANDLE_WAIT_ENTRIES; i++) {
1238 if (sprd_gem_obj->wait_entries[i].in_use == 0) {
1239 /* this one is empty */
1240 lock_item = &sprd_gem_obj->wait_entries[i];
1241 lock_item->in_use = 1;
1242 lock_item->pid = args->pid;
1243 INIT_LIST_HEAD((struct list_head *)
1244 &sprd_gem_obj->wait_entries[i]);
1249 if (i == DRM_SPRD_HANDLE_WAIT_ENTRIES) {
1252 drm_gem_object_unreference(obj);
1255 list_add_tail((struct list_head *)&lock_item->list,
1256 &sprd_gem_obj->wait_list);
1257 mutex_unlock(&dev->struct_mutex);
1258 /* here we need to block */
1259 wait_event_interruptible_timeout(
1260 sprd_gem_obj->wait_entries[i].process_wait_q,
1261 (sprd_gem_obj->lockpid == 0),
1262 msecs_to_jiffies(20000));
1263 mutex_lock(&dev->struct_mutex);
1264 lock_item->in_use = 0;
1266 sprd_gem_obj->lockpid = args->pid;
1267 DRM_DEBUG_DRIVER("%s lockpid:%d\n", __func__, sprd_gem_obj->lockpid);
1270 mutex_unlock(&dev->struct_mutex);
1275 int sprd_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1276 struct drm_file *file_priv)
1279 struct drm_sprd_gem_unlock_handle *args = data;
1280 struct drm_gem_object *obj;
1281 struct sprd_drm_gem_obj *unlock_obj;
1282 struct drm_sprd_gem_object_wait_list_entry *lock_next;
1285 DRM_DEBUG_DRIVER("%s line:%d\n", __func__, __LINE__);
1286 mutex_lock(&dev->struct_mutex);
1288 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1291 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1296 unlock_obj = to_sprd_gem_obj(obj);
1297 if (!list_empty(&unlock_obj->wait_list)) {
1299 (struct drm_sprd_gem_object_wait_list_entry *)
1300 unlock_obj->wait_list.prev;
1302 list_del((struct list_head *)&lock_next->list);
1304 unlock_obj->lockpid = 0;
1305 wake_up_interruptible(
1306 &lock_next->process_wait_q);
1310 /* List is empty so set pid to 0 */
1311 unlock_obj->lockpid = 0;
1313 drm_gem_object_unreference(obj);
1315 drm_gem_object_unreference(obj);
1317 mutex_unlock(&dev->struct_mutex);
1322 int sprd_gem_cache_op_ioctl(struct drm_device *dev, void *data,
1323 struct drm_file *file_priv)
1325 struct drm_sprd_gem_cache_op *args = data;
1327 struct drm_gem_object *obj;
1328 struct sprd_drm_gem_obj *sprd_gem_obj;
1329 struct sprd_drm_gem_buf *buf;
1330 struct sg_table *sgt;
1331 unsigned int cache_op = args->flags &(~SPRD_DRM_ALL_CACHE);
1333 mutex_lock(&dev->struct_mutex);
1334 obj = drm_gem_object_lookup(dev, file_priv, args->gem_handle);
1337 DRM_ERROR("invalid handle[%d]\n", args->gem_handle);
1339 goto err_invalid_handle;
1342 sprd_gem_obj = to_sprd_gem_obj(obj);
1343 buf = sprd_gem_obj->buffer;
1346 DRM_DEBUG("%s:h[%d]s[%d]f[0x%x]a[0x%x]o[0x%x]\n",
1347 "gc",args->gem_handle, (int)args->size, args->flags,
1348 (int)args->usr_addr, (int)obj);
1350 if (!IS_CACHABLE_BUFFER(sprd_gem_obj->flags)) {
1351 DRM_ERROR("invalid flags[0x%x]for h[%d]\n",
1352 sprd_gem_obj->flags, args->gem_handle);
1357 case SPRD_DRM_CACHE_INV:
1358 dma_sync_sg_for_cpu(NULL, sgt->sgl, sgt->nents,
1361 case SPRD_DRM_CACHE_CLN:
1362 dma_sync_sg_for_device(NULL, sgt->sgl, sgt->nents,
1365 case SPRD_DRM_CACHE_FSH:
1366 dma_sync_sg_for_device(NULL, sgt->sgl, sgt->nents,
1368 dma_sync_sg_for_cpu(NULL, sgt->sgl, sgt->nents,
1372 DRM_ERROR("invalid op[0x%x]for h[%d]\n", cache_op, args->gem_handle);
1378 drm_gem_object_unreference(obj);
1381 mutex_unlock(&dev->struct_mutex);