3 * Copyright (c) 2014 Spreadtrum Communications, Inc.
5 * This program is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
19 #include <linux/shmem_fs.h>
20 #include <drm/sprd_drm.h>
21 #include <linux/sprd_iommu.h>
23 #include "video/ion_sprd.h"
24 #include "sprd_drm_drv.h"
25 #include "sprd_drm_gem.h"
26 #include "sprd_drm_buf.h"
28 static unsigned int convert_to_vm_err_msg(int msg)
36 out_msg = VM_FAULT_NOPAGE;
40 out_msg = VM_FAULT_OOM;
44 out_msg = VM_FAULT_SIGBUS;
51 static int check_gem_flags(unsigned int flags)
53 if (flags & ~(SPRD_BO_MASK | SPRD_BO_DEV_MASK))
56 #ifdef CONFIG_SPRD_IOMMU
57 if (IS_NONCONTIG_BUFFER(flags)) {
58 if (IS_DEV_OVERLAY_BUFFER(flags))
61 if (IS_DEV_SYSTEM_BUFFER(flags))
68 DRM_ERROR("invalid flags[0x%x]\n", flags);
72 static void update_vm_cache_attr(struct sprd_drm_gem_obj *obj,
73 struct vm_area_struct *vma)
75 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
77 /* non-cachable as default. */
78 if (obj->flags & SPRD_BO_CACHABLE)
79 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
80 else if (obj->flags & SPRD_BO_WC)
82 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
85 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
88 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
90 if (!IS_NONCONTIG_BUFFER(flags)) {
91 #ifndef CONFIG_CMA_ALIGNMENT
93 return roundup(size, SECTION_SIZE);
95 /* ToDo: need to sync with additional align size */
97 return roundup(size, SZ_64K);
102 return roundup(size, PAGE_SIZE);
105 struct page **sprd_gem_get_pages(struct drm_gem_object *obj,
109 struct address_space *mapping;
110 struct page *p, **pages;
113 /* This is the shared memory object that backs the GEM resource */
114 inode = obj->filp->f_path.dentry->d_inode;
115 mapping = inode->i_mapping;
117 npages = obj->size >> PAGE_SHIFT;
119 pages = drm_malloc_ab(npages, sizeof(struct page *));
121 return ERR_PTR(-ENOMEM);
123 gfpmask |= mapping_gfp_mask(mapping);
125 for (i = 0; i < npages; i++) {
126 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
136 page_cache_release(pages[i]);
138 drm_free_large(pages);
139 return ERR_PTR(PTR_ERR(p));
142 static void sprd_gem_put_pages(struct drm_gem_object *obj,
144 bool dirty, bool accessed)
148 npages = obj->size >> PAGE_SHIFT;
150 for (i = 0; i < npages; i++) {
152 set_page_dirty(pages[i]);
155 mark_page_accessed(pages[i]);
157 /* Undo the reference we took when populating the table */
158 page_cache_release(pages[i]);
161 drm_free_large(pages);
164 static int sprd_drm_gem_map_pages(struct drm_gem_object *obj,
165 struct vm_area_struct *vma,
166 unsigned long f_vaddr,
169 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
170 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
173 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
177 pfn = page_to_pfn(buf->pages[page_offset++]);
179 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
181 return vm_insert_mixed(vma, f_vaddr, pfn);
184 static int sprd_drm_gem_get_pages(struct drm_gem_object *obj)
186 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
187 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
188 struct scatterlist *sgl;
190 unsigned int npages, i = 0;
194 DRM_DEBUG_KMS("already allocated.\n");
198 pages = sprd_gem_get_pages(obj, GFP_KERNEL);
200 DRM_ERROR("failed to get pages.\n");
201 return PTR_ERR(pages);
204 npages = obj->size >> PAGE_SHIFT;
205 buf->page_size = PAGE_SIZE;
207 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
209 DRM_ERROR("failed to allocate sg table.\n");
214 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
216 DRM_ERROR("failed to initialize sg table.\n");
223 /* set all pages to sg list. */
225 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
226 sg_dma_address(sgl) = page_to_phys(pages[i]);
231 /* add some codes for UNCACHED type here. TODO */
239 sprd_gem_put_pages(obj, pages, true, false);
244 static void sprd_drm_gem_put_pages(struct drm_gem_object *obj)
246 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
247 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
250 * if buffer typs is SPRD_BO_NONCONTIG then release all pages
251 * allocated at gem fault handler.
253 sg_free_table(buf->sgt);
257 sprd_gem_put_pages(obj, buf->pages, true, false);
260 /* add some codes for UNCACHED type here. TODO */
263 static int sprd_drm_gem_handle_create(struct drm_gem_object *obj,
264 struct drm_file *file_priv,
265 unsigned int *handle)
270 * allocate a id of idr table where the obj is registered
271 * and handle has the id what user can see.
273 ret = drm_gem_handle_create(file_priv, obj, handle);
277 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
279 /* drop reference from allocate - handle holds it now. */
280 drm_gem_object_unreference_unlocked(obj);
285 void sprd_drm_gem_destroy(struct sprd_drm_gem_obj *sprd_gem_obj)
287 struct drm_gem_object *obj;
288 struct sprd_drm_gem_buf *buf;
290 obj = &sprd_gem_obj->base;
291 buf = sprd_gem_obj->buffer;
296 DRM_DEBUG("%s:o[0x%x]a[0x%x]\n", "gf",
297 (int)obj, (int)sprd_gem_obj->buffer->dma_addr);
299 sprd_drm_free_buf(obj->dev, sprd_gem_obj->flags, buf);
301 sprd_drm_fini_buf(obj->dev, buf);
302 sprd_gem_obj->buffer = NULL;
304 if (obj->map_list.map)
305 drm_gem_free_mmap_offset(obj);
307 /* release file pointer to gem object. */
308 drm_gem_object_release(obj);
314 struct sprd_drm_gem_obj *sprd_drm_gem_init(struct drm_device *dev,
317 struct sprd_drm_gem_obj *sprd_gem_obj;
318 struct drm_gem_object *obj;
321 sprd_gem_obj = kzalloc(sizeof(*sprd_gem_obj), GFP_KERNEL);
323 DRM_ERROR("failed to allocate sprd gem object\n");
327 sprd_gem_obj->size = size;
328 obj = &sprd_gem_obj->base;
330 ret = drm_gem_object_init(dev, obj, size);
332 DRM_ERROR("failed to initialize gem object\n");
337 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
342 struct sprd_drm_gem_obj *sprd_drm_gem_create(struct drm_device *dev,
343 struct sprd_drm_gem_index *args)
345 struct sprd_drm_gem_obj *sprd_gem_obj;
346 struct sprd_drm_gem_buf *buf;
347 int ret, i=0, j, tsize = 0;
349 ret = check_gem_flags(args->flags);
353 /* ToDo: need to check align */
354 for (i = 0; i < args->bufcount; i++)
355 tsize += args->idx_size[i];
358 DRM_ERROR("invalid size.\n");
359 return ERR_PTR(-EINVAL);
362 tsize = roundup_gem_size(tsize, args->flags);
364 buf = sprd_drm_init_buf(dev, tsize);
366 return ERR_PTR(-ENOMEM);
368 sprd_gem_obj = sprd_drm_gem_init(dev, tsize);
374 sprd_gem_obj->buffer = buf;
376 /* set memory type and cache attribute from user side. */
377 sprd_gem_obj->flags = args->flags;
379 ret = sprd_drm_alloc_buf(dev, buf, args->flags);
383 memset(buf->idx_addr, 0x00, sizeof(buf->idx_addr));
384 buf->idx_addr[0] = buf->dma_addr;
385 buf->bufcount = args->bufcount;
387 for (i = 0; i < buf->bufcount; i++) {
389 if (buf->bufcount > j)
390 buf->idx_addr[j] = buf->idx_addr[i] + args->idx_size[i];
393 sprd_gem_obj->lockpid=0;
394 INIT_LIST_HEAD(&sprd_gem_obj->wait_list);
396 for (i = 0; i < DRM_SPRD_HANDLE_WAIT_ENTRIES; i++) {
397 INIT_LIST_HEAD((struct list_head *) &sprd_gem_obj->wait_entries[i]);
398 sprd_gem_obj->wait_entries[i].pid = 0;
399 init_waitqueue_head(&sprd_gem_obj->wait_entries[i].process_wait_q);
405 drm_gem_object_release(&sprd_gem_obj->base);
408 sprd_drm_fini_buf(dev, buf);
412 int sprd_drm_gem_create_ioctl(struct drm_device *dev, void *data,
413 struct drm_file *file_priv)
415 struct drm_sprd_gem_create *args = data;
416 struct sprd_drm_gem_obj *sprd_gem_obj;
417 struct sprd_drm_gem_index gem_idx;
418 struct timeval val_start, val_end;
419 uint64_t time_start, time_end;
422 do_gettimeofday(&val_start);
423 time_start = (uint64_t)(val_start.tv_sec * 1000000 + val_start.tv_usec);
426 gem_idx.idx_size[0] = args->size;
427 gem_idx.flags = args->flags;
429 sprd_gem_obj = sprd_drm_gem_create(dev, &gem_idx);
430 if (IS_ERR(sprd_gem_obj)) {
431 DRM_ERROR("failed to sprd_drm_gem_create:s[%d]f[0x%x]\n",
432 (int)args->size, args->flags);
433 return PTR_ERR(sprd_gem_obj);
436 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
439 DRM_ERROR("failed to sprd_drm_gem_handle_create:s[%d]f[0x%x]\n",
440 (int)args->size, args->flags);
441 sprd_drm_gem_destroy(sprd_gem_obj);
445 do_gettimeofday(&val_end);
446 time_end = (uint64_t)(val_end.tv_sec * 1000000 + val_end.tv_usec);
448 DRM_DEBUG("%s:h[%d]s[%d]f[0x%x]o[0x%x]a[0x%x][%lld us]\n",
449 "ga",args->handle, (int)args->size, args->flags,
450 (int)&sprd_gem_obj->base,
451 (int)sprd_gem_obj->buffer->dma_addr, time_end - time_start);
456 int sprd_drm_gem_create_index_ioctl(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
459 struct sprd_drm_gem_index *args = data;
460 struct sprd_drm_gem_obj *sprd_gem_obj;
463 if (args->flags & SPRD_BO_NONCONTIG) {
464 DRM_ERROR("does not support non-contig memory\n");
468 sprd_gem_obj = sprd_drm_gem_create(dev, args);
469 if (IS_ERR(sprd_gem_obj))
470 return PTR_ERR(sprd_gem_obj);
472 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
475 sprd_drm_gem_destroy(sprd_gem_obj);
479 DRM_INFO("%s:h[%d]cnt[%d]sz[%d %d %d]f[0x%x]o[0x%x]a[0x%x]\n",
480 __func__,args->handle, args->bufcount,
481 (int)args->idx_size[0], (int)args->idx_size[1], (int)args->idx_size[2],
482 args->flags, (int)&sprd_gem_obj->base,
483 (int)sprd_gem_obj->buffer->dma_addr);
488 struct dma_buf *sprd_prime_export(struct drm_device *dev,
489 struct drm_gem_object *obj, int flags)
491 struct sprd_drm_private *private = dev->dev_private;
492 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
493 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
494 struct dma_buf *dmabuf;
496 dmabuf = ion_share_dma_buf(private->sprd_drm_ion_client,
499 pr_err("%s: dmabuf is error and dmabuf is %p!\n",
505 struct drm_gem_object *sprd_prime_import(struct drm_device *dev,
506 struct dma_buf *dma_buf)
508 struct ion_handle *ion_handle;
509 struct sprd_drm_gem_obj *sprd_gem_obj;
511 struct sprd_drm_gem_buf *buf = NULL;
512 unsigned int i = 0, nr_pages = 0, heap_id;
514 struct sprd_drm_private *private;
515 struct scatterlist *sg = NULL;
516 struct drm_gem_object *obj;
517 unsigned long sgt_size;
519 private = dev->dev_private;
520 ion_handle = get_ion_handle_from_dmabuf(private->sprd_drm_ion_client, dma_buf);
521 if (IS_ERR_OR_NULL(ion_handle)) {
522 DRM_ERROR("Unable to import dmabuf\n");
523 return ERR_PTR(-EINVAL);
526 ion_handle_get_size(private->sprd_drm_ion_client,
527 ion_handle, &size, &heap_id);
530 "cannot create GEM object from zero size ION buffer\n");
535 buf = sprd_drm_init_buf(dev, size);
537 DRM_ERROR("Unable to allocate the GEM buffer\n");
542 sprd_gem_obj = sprd_drm_gem_init(dev, size);
544 DRM_ERROR("Unable to initialize GEM object\n");
548 sprd_gem_obj->buffer = buf;
549 obj = &sprd_gem_obj->base;
551 ret = ion_is_phys(private->sprd_drm_ion_client, ion_handle);
553 sprd_gem_obj->flags = SPRD_BO_NONCONTIG;
555 sprd_gem_obj->flags = SPRD_BO_CONTIG;
557 DRM_ERROR("Unable to get flag, Invalid handle\n");
561 /* ion_handle is validated in ion_is_phys, no need to check again */
562 ret = ion_is_cached(private->sprd_drm_ion_client, ion_handle);
564 sprd_gem_obj->flags |= SPRD_BO_CACHABLE;
566 if ((heap_id == ION_HEAP_ID_MASK_GSP) || (heap_id == ION_HEAP_ID_MASK_GSP_IOMMU))
567 sprd_gem_obj->flags |= SPRD_BO_DEV_GSP;
568 else if ((heap_id == ION_HEAP_ID_MASK_MM) || (heap_id == ION_HEAP_ID_MASK_MM_IOMMU))
569 sprd_gem_obj->flags |= SPRD_BO_DEV_MM;
570 else if (heap_id == ION_HEAP_ID_MASK_OVERLAY)
571 sprd_gem_obj->flags |= SPRD_BO_DEV_OVERLAY;
572 else if (heap_id == ION_HEAP_ID_MASK_SYSTEM)
573 sprd_gem_obj->flags |= SPRD_BO_DEV_SYSTEM;
575 DRM_ERROR("Heap id not supported\n");
580 buf->ion_handle = ion_handle;
581 buf->sgt = ion_sg_table(private->sprd_drm_ion_client, buf->ion_handle);
583 DRM_ERROR("failed to allocate sg table.\n");
588 buf->dma_addr = sg_dma_address(buf->sgt->sgl);
589 for_each_sg(buf->sgt->sgl, sg, buf->sgt->nents, i)
592 sgt_size = sizeof(struct page) * nr_pages;
593 buf->pages = kzalloc(sgt_size, GFP_KERNEL | __GFP_NOWARN);
596 order = get_order(sgt_size);
597 DRM_ERROR("%s: kzalloc failed for sg list: order:%d\n",
599 buf->pages = vzalloc(sgt_size);
601 DRM_ERROR("failed to allocate pages.\n");
607 for_each_sg(buf->sgt->sgl, sg, buf->sgt->nents, i)
608 buf->pages[i] = phys_to_page(sg_dma_address(sg));
610 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
611 (unsigned long)buf->dma_addr, buf->size);
616 buf->dma_addr = (dma_addr_t)NULL;
619 sprd_gem_obj->buffer = NULL;
620 /* release file pointer to gem object. */
621 drm_gem_object_release(obj);
625 sprd_drm_fini_buf(dev, buf);
627 ion_free(private->sprd_drm_ion_client, ion_handle);
632 void *sprd_drm_gem_get_dma_addr(struct drm_device *dev,
633 unsigned int gem_handle,
634 struct drm_file *file_priv)
636 struct sprd_drm_gem_obj *sprd_gem_obj;
637 struct drm_gem_object *obj;
638 struct ion_handle *ion_handle;
639 struct sprd_drm_gem_buf *buf;
642 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
644 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
645 return ERR_PTR(-EINVAL);
648 sprd_gem_obj = to_sprd_gem_obj(obj);
650 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
651 buf = sprd_gem_obj->buffer;
652 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
653 domain_num = IOMMU_MM;
654 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
655 domain_num = IOMMU_GSP;
657 ion_handle = buf->ion_handle;
658 if (sprd_map_iommu(ion_handle, domain_num,
659 (unsigned long *)&sprd_gem_obj->buffer->dma_addr)) {
660 DRM_ERROR("failed to map iommu:h[%d]o[0x%x]\n",
661 gem_handle, (int)obj);
662 drm_gem_object_unreference_unlocked(obj);
663 return ERR_PTR(-EINVAL);
667 DRM_DEBUG("%s:h[%d]o[0x%x]a[0x%x]\n",
668 __func__,gem_handle, (int)obj,
669 (int)sprd_gem_obj->buffer->dma_addr);
671 return &sprd_gem_obj->buffer->dma_addr;
674 void sprd_drm_gem_put_dma_addr(struct drm_device *dev,
675 unsigned int gem_handle,
676 struct drm_file *file_priv)
678 struct sprd_drm_gem_obj *sprd_gem_obj;
679 struct drm_gem_object *obj;
680 struct ion_handle *ion_handle;
681 struct sprd_drm_gem_buf *buf;
684 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
686 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
690 sprd_gem_obj = to_sprd_gem_obj(obj);
692 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
693 buf = sprd_gem_obj->buffer;
694 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
695 domain_num = IOMMU_MM;
696 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
697 domain_num = IOMMU_GSP;
699 ion_handle = buf->ion_handle;
700 if (sprd_unmap_iommu(ion_handle, domain_num))
701 DRM_ERROR("failed to unmap iommu:h[%d]o[0x%x]\n",
702 gem_handle, (int)obj);
705 drm_gem_object_unreference_unlocked(obj);
707 DRM_DEBUG("%s:h[%d]o[0x%x]\n",
708 __func__,gem_handle, (int)obj);
710 * decrease obj->refcount one more time because we has already
711 * increased it at sprd_drm_gem_get_dma_addr().
713 drm_gem_object_unreference_unlocked(obj);
716 unsigned long sprd_drm_gem_get_size(struct drm_device *dev,
717 unsigned int gem_handle,
718 struct drm_file *file_priv)
720 struct sprd_drm_gem_obj *sprd_gem_obj;
721 struct drm_gem_object *obj;
723 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
725 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
729 sprd_gem_obj = to_sprd_gem_obj(obj);
731 drm_gem_object_unreference_unlocked(obj);
733 return sprd_gem_obj->buffer->size;
736 void *sprd_drm_gem_get_obj_addr(unsigned int name, unsigned int index)
738 struct sprd_drm_gem_obj *sprd_gem_obj;
739 struct drm_gem_object *obj;
740 struct ion_handle *ion_handle;
741 struct sprd_drm_gem_buf *buf;
744 mutex_lock(&sprd_drm_dev->object_name_lock);
745 obj = idr_find(&sprd_drm_dev->object_name_idr, (int) name);
746 mutex_unlock(&sprd_drm_dev->object_name_lock);
749 DRM_ERROR("name[%d]failed to lookup gem object.\n", name);
750 return ERR_PTR(-EFAULT);
753 sprd_gem_obj = to_sprd_gem_obj(obj);
754 buf = sprd_gem_obj->buffer;
756 if (index >= buf->bufcount) {
757 DRM_ERROR("invalid index[%d],bufcount[%d]\n",
758 index, buf->bufcount);
759 return ERR_PTR(-EINVAL);
762 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
763 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
764 domain_num = IOMMU_MM;
765 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
766 domain_num = IOMMU_GSP;
768 ion_handle = buf->ion_handle;
769 if (sprd_map_iommu(ion_handle, domain_num,
770 (unsigned long *)&sprd_gem_obj->buffer->dma_addr)) {
771 DRM_ERROR("failed to map iommu\n");
772 return ERR_PTR(-EINVAL);
776 DRM_DEBUG("%s:name[%d]o[0x%x]idx[%d]a[0x%x]\n",
777 __func__, name, (int)obj, index, (int)buf->idx_addr[index]);
779 return &buf->idx_addr[index];
781 EXPORT_SYMBOL(sprd_drm_gem_get_obj_addr);
783 int sprd_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
784 struct drm_file *file_priv)
786 struct drm_sprd_gem_map_off *args = data;
788 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
789 args->handle, (unsigned long)args->offset);
791 if (!(dev->driver->driver_features & DRIVER_GEM)) {
792 DRM_ERROR("does not support GEM.\n");
796 return sprd_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
800 static int sprd_drm_gem_mmap_buffer(struct file *filp,
801 struct vm_area_struct *vma)
803 struct drm_gem_object *obj = filp->private_data;
804 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
805 struct sprd_drm_gem_buf *buffer;
806 unsigned long pfn, vm_size;
808 vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
810 update_vm_cache_attr(sprd_gem_obj, vma);
812 vm_size = vma->vm_end - vma->vm_start;
815 * a buffer contains information to physically continuous memory
816 * allocated by user request or at framebuffer creation.
818 buffer = sprd_gem_obj->buffer;
820 /* check if user-requested size is valid. */
821 if (vm_size > buffer->size)
824 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
825 unsigned long addr = vma->vm_start;
826 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
827 struct scatterlist *sg;
830 for_each_sg(buffer->sgt->sgl, sg, buffer->sgt->nents, i) {
831 struct page *page = sg_page(sg);
832 unsigned long remainder = vma->vm_end - addr;
833 unsigned long len = sg_dma_len(sg);
835 if (offset >= sg_dma_len(sg)) {
836 offset -= sg_dma_len(sg);
839 page += offset / PAGE_SIZE;
840 len = sg_dma_len(sg) - offset;
843 len = min(len, remainder);
844 remap_pfn_range(vma, addr, page_to_pfn(page), len,
847 if (addr >= vma->vm_end) {
853 * get page frame number to physical memory to be mapped
856 pfn = ((unsigned long)sprd_gem_obj->buffer->dma_addr) >>
859 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
861 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
862 vma->vm_page_prot)) {
863 DRM_ERROR("failed to remap pfn range.\n");
871 static const struct file_operations sprd_drm_gem_fops = {
872 .mmap = sprd_drm_gem_mmap_buffer,
875 int sprd_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
876 struct drm_file *file_priv)
878 struct drm_sprd_gem_mmap *args = data;
879 struct drm_gem_object *obj;
882 if (!(dev->driver->driver_features & DRIVER_GEM)) {
883 DRM_ERROR("does not support GEM.\n");
887 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
889 DRM_ERROR("failed to lookup gem object:h[%d]\n", args->handle);
893 obj->filp->f_op = &sprd_drm_gem_fops;
894 obj->filp->private_data = obj;
896 addr = vm_mmap(obj->filp, 0, args->size,
897 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
899 drm_gem_object_unreference_unlocked(obj);
901 if (IS_ERR_VALUE(addr))
906 DRM_DEBUG("%s:h[%d]s[%d]o[0x%x]mapped[0x%x]\n", __func__,
907 args->handle, (int)args->size, (int)obj, (int)args->mapped);
912 int sprd_drm_gem_mmap_iommu_ioctl(struct drm_device *dev, void *data,
913 struct drm_file *file_priv)
915 struct drm_sprd_gem_mmap *args = data;
916 struct drm_gem_object *obj;
917 struct ion_handle *ion_handle;
919 struct sprd_drm_gem_obj *sprd_gem_obj;
920 struct sprd_drm_gem_buf *buf;
923 if (!(dev->driver->driver_features & DRIVER_GEM)) {
924 DRM_ERROR("does not support GEM.\n");
928 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
930 DRM_ERROR("failed to lookup gem object.\n");
934 sprd_gem_obj = to_sprd_gem_obj(obj);
935 buf = sprd_gem_obj->buffer;
936 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
937 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
938 domain_num = IOMMU_MM;
939 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
940 domain_num = IOMMU_GSP;
942 ion_handle = buf->ion_handle;
943 sprd_map_iommu(ion_handle, domain_num, &addr);
945 DRM_ERROR("MMAP_IOMMU not applicable on CONTIG HEAP\n");
946 drm_gem_object_unreference_unlocked(obj);
954 int sprd_drm_gem_unmap_iommu_ioctl(struct drm_device *dev, void *data,
955 struct drm_file *file_priv)
957 struct drm_sprd_gem_mmap *args = data;
958 struct drm_gem_object *obj;
959 struct ion_handle *ion_handle;
960 struct sprd_drm_gem_obj *sprd_gem_obj;
961 struct sprd_drm_gem_buf *buf;
962 int ret = 0, domain_num = 0;
964 if (!(dev->driver->driver_features & DRIVER_GEM)) {
965 DRM_ERROR("does not support GEM.\n");
969 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
971 DRM_ERROR("failed to lookup gem object.\n");
975 sprd_gem_obj = to_sprd_gem_obj(obj);
976 buf = sprd_gem_obj->buffer;
977 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
978 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
979 domain_num = IOMMU_MM;
980 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
981 domain_num = IOMMU_GSP;
983 ion_handle = buf->ion_handle;
984 sprd_unmap_iommu(ion_handle, domain_num);
986 DRM_ERROR("UNMAP_IOMMU not applicable on CONTIG HEAP\n");
990 drm_gem_object_unreference_unlocked(obj);
992 * decrease obj->refcount one more time because we has already
993 * increased it at sprd_drm_gem_mmap_iommu_ioctl().
995 drm_gem_object_unreference_unlocked(obj);
999 int sprd_drm_gem_get_ioctl(struct drm_device *dev, void *data,
1000 struct drm_file *file_priv)
1001 { struct sprd_drm_gem_obj *sprd_gem_obj;
1002 struct drm_sprd_gem_info *args = data;
1003 struct drm_gem_object *obj;
1005 mutex_lock(&dev->struct_mutex);
1007 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1009 DRM_ERROR("failed to lookup gem object.\n");
1010 mutex_unlock(&dev->struct_mutex);
1014 sprd_gem_obj = to_sprd_gem_obj(obj);
1016 args->flags = sprd_gem_obj->flags;
1017 args->size = sprd_gem_obj->size;
1019 drm_gem_object_unreference(obj);
1020 mutex_unlock(&dev->struct_mutex);
1025 int sprd_drm_gem_init_object(struct drm_gem_object *obj)
1030 void sprd_drm_gem_free_object(struct drm_gem_object *obj)
1032 struct sprd_drm_gem_obj *sprd_gem_obj;
1033 struct sprd_drm_gem_buf *buf;
1035 sprd_gem_obj = to_sprd_gem_obj(obj);
1036 buf = sprd_gem_obj->buffer;
1038 if (obj->import_attach)
1039 drm_prime_gem_destroy(obj, buf->sgt);
1041 sprd_drm_gem_destroy(to_sprd_gem_obj(obj));
1044 int sprd_drm_gem_dumb_create(struct drm_file *file_priv,
1045 struct drm_device *dev,
1046 struct drm_mode_create_dumb *args)
1048 struct sprd_drm_gem_obj *sprd_gem_obj;
1049 struct sprd_drm_gem_index gem_idx;
1053 * alocate memory to be used for framebuffer.
1054 * - this callback would be called by user application
1055 * with DRM_IOCTL_MODE_CREATE_DUMB command.
1058 args->pitch = args->width * args->bpp >> 3;
1059 args->size = PAGE_ALIGN(args->pitch * args->height);
1061 gem_idx.bufcount= 1;
1062 gem_idx.idx_size[0] = args->size;
1063 gem_idx.flags = args->flags;
1065 sprd_gem_obj = sprd_drm_gem_create(dev, &gem_idx);
1066 if (IS_ERR(sprd_gem_obj))
1067 return PTR_ERR(sprd_gem_obj);
1069 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
1072 sprd_drm_gem_destroy(sprd_gem_obj);
1079 int sprd_drm_gem_dumb_map_offset(struct drm_file *file_priv,
1080 struct drm_device *dev, uint32_t handle,
1083 struct drm_gem_object *obj;
1086 mutex_lock(&dev->struct_mutex);
1089 * get offset of memory allocated for drm framebuffer.
1090 * - this callback would be called by user application
1091 * with DRM_IOCTL_MODE_MAP_DUMB command.
1094 obj = drm_gem_object_lookup(dev, file_priv, handle);
1096 DRM_ERROR("failed to lookup gem object.\n");
1101 if (!obj->map_list.map) {
1102 ret = drm_gem_create_mmap_offset(obj);
1107 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
1108 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
1111 drm_gem_object_unreference(obj);
1113 mutex_unlock(&dev->struct_mutex);
1117 int sprd_drm_gem_dumb_destroy(struct drm_file *file_priv,
1118 struct drm_device *dev,
1119 unsigned int handle)
1124 * obj->refcount and obj->handle_count are decreased and
1125 * if both them are 0 then sprd_drm_gem_free_object()
1126 * would be called by callback to release resources.
1128 ret = drm_gem_handle_delete(file_priv, handle);
1130 DRM_ERROR("failed to delete drm_gem_handle.\n");
1137 int sprd_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1139 struct drm_gem_object *obj = vma->vm_private_data;
1140 struct drm_device *dev = obj->dev;
1141 unsigned long f_vaddr;
1142 pgoff_t page_offset;
1145 page_offset = ((unsigned long)vmf->virtual_address -
1146 vma->vm_start) >> PAGE_SHIFT;
1147 f_vaddr = (unsigned long)vmf->virtual_address;
1149 mutex_lock(&dev->struct_mutex);
1151 ret = sprd_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
1153 DRM_ERROR("failed to map pages.\n");
1155 mutex_unlock(&dev->struct_mutex);
1157 return convert_to_vm_err_msg(ret);
1160 int sprd_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1162 struct sprd_drm_gem_obj *sprd_gem_obj;
1163 struct drm_gem_object *obj;
1166 /* set vm_area_struct. */
1167 ret = drm_gem_mmap(filp, vma);
1169 DRM_ERROR("failed to mmap.\n");
1173 obj = vma->vm_private_data;
1174 sprd_gem_obj = to_sprd_gem_obj(obj);
1176 ret = check_gem_flags(sprd_gem_obj->flags);
1178 drm_gem_vm_close(vma);
1179 drm_gem_free_mmap_offset(obj);
1183 vma->vm_flags &= ~VM_PFNMAP;
1184 vma->vm_flags |= VM_MIXEDMAP;
1186 update_vm_cache_attr(sprd_gem_obj, vma);
1191 int sprd_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1192 struct drm_file *file_priv)
1194 struct drm_sprd_gem_lock_handle *args = data;
1195 struct drm_gem_object *obj;
1196 struct sprd_drm_gem_obj *sprd_gem_obj;
1197 struct drm_sprd_gem_object_wait_list_entry *lock_item;
1201 DRM_DEBUG_DRIVER("%s line:%d\n", __func__, __LINE__);
1202 mutex_lock(&dev->struct_mutex);
1204 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1207 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1212 sprd_gem_obj = to_sprd_gem_obj(obj);
1214 if (sprd_gem_obj->lockpid) {
1215 /* if a pid already had it locked */
1216 /* create and add to wait list */
1217 for (i = 0; i < DRM_SPRD_HANDLE_WAIT_ENTRIES; i++) {
1218 if (sprd_gem_obj->wait_entries[i].in_use == 0) {
1219 /* this one is empty */
1220 lock_item = &sprd_gem_obj->wait_entries[i];
1221 lock_item->in_use = 1;
1222 lock_item->pid = args->pid;
1223 INIT_LIST_HEAD((struct list_head *)
1224 &sprd_gem_obj->wait_entries[i]);
1229 if (i == DRM_SPRD_HANDLE_WAIT_ENTRIES) {
1232 drm_gem_object_unreference(obj);
1235 list_add_tail((struct list_head *)&lock_item->list,
1236 &sprd_gem_obj->wait_list);
1237 mutex_unlock(&dev->struct_mutex);
1238 /* here we need to block */
1239 wait_event_interruptible_timeout(
1240 sprd_gem_obj->wait_entries[i].process_wait_q,
1241 (sprd_gem_obj->lockpid == 0),
1242 msecs_to_jiffies(20000));
1243 mutex_lock(&dev->struct_mutex);
1244 lock_item->in_use = 0;
1246 sprd_gem_obj->lockpid = args->pid;
1247 DRM_DEBUG_DRIVER("%s lockpid:%d\n", __func__, sprd_gem_obj->lockpid);
1250 mutex_unlock(&dev->struct_mutex);
1255 int sprd_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1256 struct drm_file *file_priv)
1259 struct drm_sprd_gem_unlock_handle *args = data;
1260 struct drm_gem_object *obj;
1261 struct sprd_drm_gem_obj *unlock_obj;
1262 struct drm_sprd_gem_object_wait_list_entry *lock_next;
1265 DRM_DEBUG_DRIVER("%s line:%d\n", __func__, __LINE__);
1266 mutex_lock(&dev->struct_mutex);
1268 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1271 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1276 unlock_obj = to_sprd_gem_obj(obj);
1277 if (!list_empty(&unlock_obj->wait_list)) {
1279 (struct drm_sprd_gem_object_wait_list_entry *)
1280 unlock_obj->wait_list.prev;
1282 list_del((struct list_head *)&lock_next->list);
1284 unlock_obj->lockpid = 0;
1285 wake_up_interruptible(
1286 &lock_next->process_wait_q);
1290 /* List is empty so set pid to 0 */
1291 unlock_obj->lockpid = 0;
1293 drm_gem_object_unreference(obj);
1295 drm_gem_object_unreference(obj);
1297 mutex_unlock(&dev->struct_mutex);
1302 int sprd_gem_cache_op_ioctl(struct drm_device *dev, void *data,
1303 struct drm_file *file_priv)
1305 struct drm_sprd_gem_cache_op *args = data;
1307 struct drm_gem_object *obj;
1308 struct sprd_drm_gem_obj *sprd_gem_obj;
1309 struct sprd_drm_gem_buf *buf;
1310 struct sg_table *sgt;
1311 unsigned int cache_op = args->flags &(~SPRD_DRM_ALL_CACHE);
1313 mutex_lock(&dev->struct_mutex);
1314 obj = drm_gem_object_lookup(dev, file_priv, args->gem_handle);
1317 DRM_ERROR("invalid handle[%d]\n", args->gem_handle);
1319 goto err_invalid_handle;
1322 sprd_gem_obj = to_sprd_gem_obj(obj);
1323 buf = sprd_gem_obj->buffer;
1326 DRM_DEBUG("%s:h[%d]s[%d]f[0x%x]a[0x%x]o[0x%x]\n",
1327 "gc",args->gem_handle, (int)args->size, args->flags,
1328 (int)args->usr_addr, (int)obj);
1330 if (!IS_CACHABLE_BUFFER(sprd_gem_obj->flags)) {
1331 DRM_ERROR("invalid flags[0x%x]for h[%d]\n",
1332 sprd_gem_obj->flags, args->gem_handle);
1337 case SPRD_DRM_CACHE_INV:
1338 dma_sync_sg_for_cpu(NULL, sgt->sgl, sgt->nents,
1341 case SPRD_DRM_CACHE_CLN:
1342 dma_sync_sg_for_device(NULL, sgt->sgl, sgt->nents,
1345 case SPRD_DRM_CACHE_FSH:
1346 dma_sync_sg_for_device(NULL, sgt->sgl, sgt->nents,
1348 dma_sync_sg_for_cpu(NULL, sgt->sgl, sgt->nents,
1352 DRM_ERROR("invalid op[0x%x]for h[%d]\n", cache_op, args->gem_handle);
1358 drm_gem_object_unreference(obj);
1361 mutex_unlock(&dev->struct_mutex);