3 * Copyright (c) 2014 Spreadtrum Communications, Inc.
5 * This program is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
19 #include <linux/shmem_fs.h>
20 #include <drm/sprd_drm.h>
21 #include <linux/sprd_iommu.h>
23 #include "video/ion_sprd.h"
24 #include "sprd_drm_drv.h"
25 #include "sprd_drm_gem.h"
26 #include "sprd_drm_buf.h"
28 static unsigned int convert_to_vm_err_msg(int msg)
36 out_msg = VM_FAULT_NOPAGE;
40 out_msg = VM_FAULT_OOM;
44 out_msg = VM_FAULT_SIGBUS;
51 static int check_gem_flags(unsigned int flags)
53 if (flags & ~(SPRD_BO_MASK | SPRD_BO_DEV_MASK))
56 #ifdef CONFIG_SPRD_IOMMU
57 if (IS_NONCONTIG_BUFFER(flags)) {
58 if (IS_DEV_OVERLAY_BUFFER(flags))
61 if (IS_DEV_SYSTEM_BUFFER(flags))
68 DRM_ERROR("invalid flags[0x%x]\n", flags);
72 static void update_vm_cache_attr(struct sprd_drm_gem_obj *obj,
73 struct vm_area_struct *vma)
75 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
77 /* non-cachable as default. */
78 if (obj->flags & SPRD_BO_CACHABLE)
79 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
80 else if (obj->flags & SPRD_BO_WC)
82 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
85 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
88 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
90 if (!IS_NONCONTIG_BUFFER(flags)) {
91 #ifndef CONFIG_CMA_ALIGNMENT
93 return roundup(size, SECTION_SIZE);
95 /* ToDo: need to sync with additional align size */
97 return roundup(size, SZ_64K);
102 return roundup(size, PAGE_SIZE);
105 struct page **sprd_gem_get_pages(struct drm_gem_object *obj,
109 struct address_space *mapping;
110 struct page *p, **pages;
113 /* This is the shared memory object that backs the GEM resource */
114 inode = obj->filp->f_path.dentry->d_inode;
115 mapping = inode->i_mapping;
117 npages = obj->size >> PAGE_SHIFT;
119 pages = drm_malloc_ab(npages, sizeof(struct page *));
121 return ERR_PTR(-ENOMEM);
123 gfpmask |= mapping_gfp_mask(mapping);
125 for (i = 0; i < npages; i++) {
126 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
136 page_cache_release(pages[i]);
138 drm_free_large(pages);
139 return ERR_PTR(PTR_ERR(p));
142 static void sprd_gem_put_pages(struct drm_gem_object *obj,
144 bool dirty, bool accessed)
148 npages = obj->size >> PAGE_SHIFT;
150 for (i = 0; i < npages; i++) {
152 set_page_dirty(pages[i]);
155 mark_page_accessed(pages[i]);
157 /* Undo the reference we took when populating the table */
158 page_cache_release(pages[i]);
161 drm_free_large(pages);
164 static int sprd_drm_gem_map_pages(struct drm_gem_object *obj,
165 struct vm_area_struct *vma,
166 unsigned long f_vaddr,
169 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
170 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
173 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
177 pfn = page_to_pfn(buf->pages[page_offset++]);
179 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
181 return vm_insert_mixed(vma, f_vaddr, pfn);
184 static int sprd_drm_gem_get_pages(struct drm_gem_object *obj)
186 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
187 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
188 struct scatterlist *sgl;
190 unsigned int npages, i = 0;
194 DRM_DEBUG_KMS("already allocated.\n");
198 pages = sprd_gem_get_pages(obj, GFP_KERNEL);
200 DRM_ERROR("failed to get pages.\n");
201 return PTR_ERR(pages);
204 npages = obj->size >> PAGE_SHIFT;
205 buf->page_size = PAGE_SIZE;
207 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
209 DRM_ERROR("failed to allocate sg table.\n");
214 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
216 DRM_ERROR("failed to initialize sg table.\n");
223 /* set all pages to sg list. */
225 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
226 sg_dma_address(sgl) = page_to_phys(pages[i]);
231 /* add some codes for UNCACHED type here. TODO */
239 sprd_gem_put_pages(obj, pages, true, false);
244 static void sprd_drm_gem_put_pages(struct drm_gem_object *obj)
246 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
247 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
250 * if buffer typs is SPRD_BO_NONCONTIG then release all pages
251 * allocated at gem fault handler.
253 sg_free_table(buf->sgt);
257 sprd_gem_put_pages(obj, buf->pages, true, false);
260 /* add some codes for UNCACHED type here. TODO */
263 static int sprd_drm_gem_handle_create(struct drm_gem_object *obj,
264 struct drm_file *file_priv,
265 unsigned int *handle)
270 * allocate a id of idr table where the obj is registered
271 * and handle has the id what user can see.
273 ret = drm_gem_handle_create(file_priv, obj, handle);
277 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
279 /* drop reference from allocate - handle holds it now. */
280 drm_gem_object_unreference_unlocked(obj);
285 void sprd_drm_gem_destroy(struct sprd_drm_gem_obj *sprd_gem_obj)
287 struct drm_gem_object *obj;
288 struct sprd_drm_gem_buf *buf;
290 obj = &sprd_gem_obj->base;
291 buf = sprd_gem_obj->buffer;
296 DRM_DEBUG("%s:o[0x%x]a[0x%x]\n", "gf",
297 (int)obj, (int)sprd_gem_obj->buffer->dma_addr);
299 sprd_drm_free_buf(obj->dev, sprd_gem_obj->flags, buf);
301 sprd_drm_fini_buf(obj->dev, buf);
302 sprd_gem_obj->buffer = NULL;
304 if (obj->map_list.map)
305 drm_gem_free_mmap_offset(obj);
307 /* release file pointer to gem object. */
308 drm_gem_object_release(obj);
314 struct sprd_drm_gem_obj *sprd_drm_gem_init(struct drm_device *dev,
317 struct sprd_drm_gem_obj *sprd_gem_obj;
318 struct drm_gem_object *obj;
321 sprd_gem_obj = kzalloc(sizeof(*sprd_gem_obj), GFP_KERNEL);
323 DRM_ERROR("failed to allocate sprd gem object\n");
327 sprd_gem_obj->size = size;
328 obj = &sprd_gem_obj->base;
330 ret = drm_gem_object_init(dev, obj, size);
332 DRM_ERROR("failed to initialize gem object\n");
337 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
342 struct sprd_drm_gem_obj *sprd_drm_gem_create(struct drm_device *dev,
343 struct sprd_drm_gem_index *args)
345 struct sprd_drm_gem_obj *sprd_gem_obj;
346 struct sprd_drm_gem_buf *buf;
347 int ret, i=0, j, tsize = 0;
349 ret = check_gem_flags(args->flags);
353 /* ToDo: need to check align */
354 for (i = 0; i < args->bufcount; i++)
355 tsize += args->idx_size[i];
358 DRM_ERROR("invalid size.\n");
359 return ERR_PTR(-EINVAL);
362 tsize = roundup_gem_size(tsize, args->flags);
364 buf = sprd_drm_init_buf(dev, tsize);
366 return ERR_PTR(-ENOMEM);
368 sprd_gem_obj = sprd_drm_gem_init(dev, tsize);
374 sprd_gem_obj->buffer = buf;
376 /* set memory type and cache attribute from user side. */
377 sprd_gem_obj->flags = args->flags;
379 ret = sprd_drm_alloc_buf(dev, buf, args->flags);
383 memset(buf->idx_addr, 0x00, sizeof(buf->idx_addr));
384 buf->idx_addr[0] = buf->dma_addr;
385 buf->bufcount = args->bufcount;
387 for (i = 0; i < buf->bufcount; i++) {
389 if (buf->bufcount > j)
390 buf->idx_addr[j] = buf->idx_addr[i] + args->idx_size[i];
393 sprd_gem_obj->lockpid=0;
394 INIT_LIST_HEAD(&sprd_gem_obj->wait_list);
396 for (i = 0; i < DRM_SPRD_HANDLE_WAIT_ENTRIES; i++) {
397 INIT_LIST_HEAD((struct list_head *) &sprd_gem_obj->wait_entries[i]);
398 sprd_gem_obj->wait_entries[i].pid = 0;
399 init_waitqueue_head(&sprd_gem_obj->wait_entries[i].process_wait_q);
405 drm_gem_object_release(&sprd_gem_obj->base);
408 sprd_drm_fini_buf(dev, buf);
412 int sprd_drm_gem_create_ioctl(struct drm_device *dev, void *data,
413 struct drm_file *file_priv)
415 struct drm_sprd_gem_create *args = data;
416 struct sprd_drm_gem_obj *sprd_gem_obj;
417 struct sprd_drm_gem_index gem_idx;
418 struct timeval val_start, val_end;
419 uint64_t time_start, time_end;
422 do_gettimeofday(&val_start);
423 time_start = (uint64_t)(val_start.tv_sec * 1000000 + val_start.tv_usec);
426 gem_idx.idx_size[0] = args->size;
427 gem_idx.flags = args->flags;
429 sprd_gem_obj = sprd_drm_gem_create(dev, &gem_idx);
430 if (IS_ERR(sprd_gem_obj)) {
431 DRM_ERROR("failed to sprd_drm_gem_create:s[%d]f[0x%x]\n",
432 (int)args->size, args->flags);
433 return PTR_ERR(sprd_gem_obj);
436 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
439 DRM_ERROR("failed to sprd_drm_gem_handle_create:s[%d]f[0x%x]\n",
440 (int)args->size, args->flags);
441 sprd_drm_gem_destroy(sprd_gem_obj);
445 do_gettimeofday(&val_end);
446 time_end = (uint64_t)(val_end.tv_sec * 1000000 + val_end.tv_usec);
448 DRM_DEBUG("%s:h[%d]s[%d]f[0x%x]o[0x%x]a[0x%x][%lld us]\n",
449 "ga",args->handle, (int)args->size, args->flags,
450 (int)&sprd_gem_obj->base,
451 (int)sprd_gem_obj->buffer->dma_addr, time_end - time_start);
456 int sprd_drm_gem_create_index_ioctl(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
459 struct sprd_drm_gem_index *args = data;
460 struct sprd_drm_gem_obj *sprd_gem_obj;
463 if (args->flags & SPRD_BO_NONCONTIG) {
464 DRM_ERROR("does not support non-contig memory\n");
468 sprd_gem_obj = sprd_drm_gem_create(dev, args);
469 if (IS_ERR(sprd_gem_obj))
470 return PTR_ERR(sprd_gem_obj);
472 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
475 sprd_drm_gem_destroy(sprd_gem_obj);
479 DRM_INFO("%s:h[%d]cnt[%d]sz[%d %d %d]f[0x%x]o[0x%x]a[0x%x]\n",
480 __func__,args->handle, args->bufcount,
481 (int)args->idx_size[0], (int)args->idx_size[1], (int)args->idx_size[2],
482 args->flags, (int)&sprd_gem_obj->base,
483 (int)sprd_gem_obj->buffer->dma_addr);
488 int sprd_drm_gem_prime_handle_to_fd(struct drm_device *dev,
489 struct drm_file *file_priv, uint32_t handle,
490 uint32_t flags, int *prime_fd)
493 struct sprd_drm_gem_obj *sprd_gem_obj;
494 struct drm_gem_object *obj;
495 struct sprd_drm_gem_buf *buf;
496 struct sprd_drm_private *private;
499 DRM_ERROR("%s: Handle to fd failed. Null handle\n", __func__);
503 obj = drm_gem_object_lookup(dev, file_priv, handle);
505 DRM_ERROR("failed to lookup gem object.\n");
509 private = dev->dev_private;
510 sprd_gem_obj = to_sprd_gem_obj(obj);
511 buf = sprd_gem_obj->buffer;
512 *prime_fd = ion_share_dma_buf_fd(private->sprd_drm_ion_client,
514 drm_gem_object_unreference(obj);
516 if (*prime_fd == -EINVAL) {
524 int sprd_drm_gem_prime_fd_to_handle(struct drm_device *dev,
525 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
527 struct ion_handle *ion_handle;
528 struct sprd_drm_gem_obj *sprd_gem_obj;
530 struct sprd_drm_gem_buf *buf = NULL;
531 unsigned int i = 0, nr_pages = 0, heap_id;
532 int ret = 0, gem_handle;
533 struct sprd_drm_private *private;
534 struct scatterlist *sg = NULL;
535 struct drm_gem_object *obj;
536 unsigned long sgt_size;
538 private = dev->dev_private;
539 ion_handle = ion_import_dma_buf(private->sprd_drm_ion_client, prime_fd);
540 if (IS_ERR_OR_NULL(ion_handle)) {
541 DRM_ERROR("Unable to import dmabuf\n");
545 ion_handle_get_size(private->sprd_drm_ion_client,
546 ion_handle, &size, &heap_id);
549 "cannot create GEM object from zero size ION buffer\n");
554 buf = sprd_drm_init_buf(dev, size);
556 DRM_ERROR("Unable to allocate the GEM buffer\n");
561 sprd_gem_obj = sprd_drm_gem_init(dev, size);
563 DRM_ERROR("Unable to initialize GEM object\n");
567 sprd_gem_obj->buffer = buf;
568 obj = &sprd_gem_obj->base;
570 ret = ion_is_phys(private->sprd_drm_ion_client, ion_handle);
572 sprd_gem_obj->flags = SPRD_BO_NONCONTIG;
574 sprd_gem_obj->flags = SPRD_BO_CONTIG;
576 DRM_ERROR("Unable to get flag, Invalid handle\n");
580 /* ion_handle is validated in ion_is_phys, no need to check again */
581 ret = ion_is_cached(private->sprd_drm_ion_client, ion_handle);
583 sprd_gem_obj->flags |= SPRD_BO_CACHABLE;
585 if ((heap_id == ION_HEAP_ID_MASK_GSP) || (heap_id == ION_HEAP_ID_MASK_GSP_IOMMU))
586 sprd_gem_obj->flags |= SPRD_BO_DEV_GSP;
587 else if ((heap_id == ION_HEAP_ID_MASK_MM) || (heap_id == ION_HEAP_ID_MASK_MM_IOMMU))
588 sprd_gem_obj->flags |= SPRD_BO_DEV_MM;
589 else if (heap_id == ION_HEAP_ID_MASK_OVERLAY)
590 sprd_gem_obj->flags |= SPRD_BO_DEV_OVERLAY;
591 else if (heap_id == ION_HEAP_ID_MASK_SYSTEM)
592 sprd_gem_obj->flags |= SPRD_BO_DEV_SYSTEM;
594 DRM_ERROR("Heap id not supported\n");
599 buf->ion_handle = ion_handle;
600 buf->sgt = ion_sg_table(private->sprd_drm_ion_client, buf->ion_handle);
602 DRM_ERROR("failed to allocate sg table.\n");
607 buf->dma_addr = sg_dma_address(buf->sgt->sgl);
608 for_each_sg(buf->sgt->sgl, sg, buf->sgt->nents, i)
611 sgt_size = sizeof(struct page) * nr_pages;
612 buf->pages = kzalloc(sgt_size, GFP_KERNEL | __GFP_NOWARN);
615 order = get_order(sgt_size);
616 DRM_ERROR("%s: kzalloc failed for sg list: order:%d\n",
618 buf->pages = vzalloc(sgt_size);
620 DRM_ERROR("failed to allocate pages.\n");
626 for_each_sg(buf->sgt->sgl, sg, buf->sgt->nents, i)
627 buf->pages[i] = phys_to_page(sg_dma_address(sg));
629 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
630 (unsigned long)buf->dma_addr, buf->size);
632 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
635 sprd_drm_gem_destroy(sprd_gem_obj);
638 *handle = gem_handle;
642 buf->dma_addr = (dma_addr_t)NULL;
645 sprd_gem_obj->buffer = NULL;
646 /* release file pointer to gem object. */
647 drm_gem_object_release(obj);
651 sprd_drm_fini_buf(dev, buf);
653 ion_free(private->sprd_drm_ion_client, ion_handle);
658 void *sprd_drm_gem_get_dma_addr(struct drm_device *dev,
659 unsigned int gem_handle,
660 struct drm_file *file_priv)
662 struct sprd_drm_gem_obj *sprd_gem_obj;
663 struct drm_gem_object *obj;
664 struct ion_handle *ion_handle;
665 struct sprd_drm_gem_buf *buf;
668 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
670 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
671 return ERR_PTR(-EINVAL);
674 sprd_gem_obj = to_sprd_gem_obj(obj);
676 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
677 buf = sprd_gem_obj->buffer;
678 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
679 domain_num = IOMMU_MM;
680 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
681 domain_num = IOMMU_GSP;
683 ion_handle = buf->ion_handle;
684 if (sprd_map_iommu(ion_handle, domain_num,
685 (unsigned long *)&sprd_gem_obj->buffer->dma_addr)) {
686 DRM_ERROR("failed to map iommu:h[%d]o[0x%x]\n",
687 gem_handle, (int)obj);
688 drm_gem_object_unreference_unlocked(obj);
689 return ERR_PTR(-EINVAL);
693 DRM_DEBUG("%s:h[%d]o[0x%x]a[0x%x]\n",
694 __func__,gem_handle, (int)obj,
695 (int)sprd_gem_obj->buffer->dma_addr);
697 return &sprd_gem_obj->buffer->dma_addr;
700 void sprd_drm_gem_put_dma_addr(struct drm_device *dev,
701 unsigned int gem_handle,
702 struct drm_file *file_priv)
704 struct sprd_drm_gem_obj *sprd_gem_obj;
705 struct drm_gem_object *obj;
706 struct ion_handle *ion_handle;
707 struct sprd_drm_gem_buf *buf;
710 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
712 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
716 sprd_gem_obj = to_sprd_gem_obj(obj);
718 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
719 buf = sprd_gem_obj->buffer;
720 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
721 domain_num = IOMMU_MM;
722 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
723 domain_num = IOMMU_GSP;
725 ion_handle = buf->ion_handle;
726 if (sprd_unmap_iommu(ion_handle, domain_num))
727 DRM_ERROR("failed to unmap iommu:h[%d]o[0x%x]\n",
728 gem_handle, (int)obj);
731 drm_gem_object_unreference_unlocked(obj);
733 DRM_DEBUG("%s:h[%d]o[0x%x]\n",
734 __func__,gem_handle, (int)obj);
736 * decrease obj->refcount one more time because we has already
737 * increased it at sprd_drm_gem_get_dma_addr().
739 drm_gem_object_unreference_unlocked(obj);
742 unsigned long sprd_drm_gem_get_size(struct drm_device *dev,
743 unsigned int gem_handle,
744 struct drm_file *file_priv)
746 struct sprd_drm_gem_obj *sprd_gem_obj;
747 struct drm_gem_object *obj;
749 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
751 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
755 sprd_gem_obj = to_sprd_gem_obj(obj);
757 drm_gem_object_unreference_unlocked(obj);
759 return sprd_gem_obj->buffer->size;
762 void *sprd_drm_gem_get_obj_addr(unsigned int name, unsigned int index)
764 struct sprd_drm_gem_obj *sprd_gem_obj;
765 struct drm_gem_object *obj;
766 struct ion_handle *ion_handle;
767 struct sprd_drm_gem_buf *buf;
770 mutex_lock(&sprd_drm_dev->object_name_lock);
771 obj = idr_find(&sprd_drm_dev->object_name_idr, (int) name);
772 mutex_unlock(&sprd_drm_dev->object_name_lock);
775 DRM_ERROR("name[%d]failed to lookup gem object.\n", name);
776 return ERR_PTR(-EFAULT);
779 sprd_gem_obj = to_sprd_gem_obj(obj);
780 buf = sprd_gem_obj->buffer;
782 if (index >= buf->bufcount) {
783 DRM_ERROR("invalid index[%d],bufcount[%d]\n",
784 index, buf->bufcount);
785 return ERR_PTR(-EINVAL);
788 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
789 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
790 domain_num = IOMMU_MM;
791 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
792 domain_num = IOMMU_GSP;
794 ion_handle = buf->ion_handle;
795 if (sprd_map_iommu(ion_handle, domain_num,
796 (unsigned long *)&sprd_gem_obj->buffer->dma_addr)) {
797 DRM_ERROR("failed to map iommu\n");
798 return ERR_PTR(-EINVAL);
802 DRM_DEBUG("%s:name[%d]o[0x%x]idx[%d]a[0x%x]\n",
803 __func__, name, (int)obj, index, (int)buf->idx_addr[index]);
805 return &buf->idx_addr[index];
807 EXPORT_SYMBOL(sprd_drm_gem_get_obj_addr);
809 int sprd_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
810 struct drm_file *file_priv)
812 struct drm_sprd_gem_map_off *args = data;
814 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
815 args->handle, (unsigned long)args->offset);
817 if (!(dev->driver->driver_features & DRIVER_GEM)) {
818 DRM_ERROR("does not support GEM.\n");
822 return sprd_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
826 static int sprd_drm_gem_mmap_buffer(struct file *filp,
827 struct vm_area_struct *vma)
829 struct drm_gem_object *obj = filp->private_data;
830 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
831 struct sprd_drm_gem_buf *buffer;
832 unsigned long pfn, vm_size;
834 vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
836 update_vm_cache_attr(sprd_gem_obj, vma);
838 vm_size = vma->vm_end - vma->vm_start;
841 * a buffer contains information to physically continuous memory
842 * allocated by user request or at framebuffer creation.
844 buffer = sprd_gem_obj->buffer;
846 /* check if user-requested size is valid. */
847 if (vm_size > buffer->size)
850 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
851 unsigned long addr = vma->vm_start;
852 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
853 struct scatterlist *sg;
856 for_each_sg(buffer->sgt->sgl, sg, buffer->sgt->nents, i) {
857 struct page *page = sg_page(sg);
858 unsigned long remainder = vma->vm_end - addr;
859 unsigned long len = sg_dma_len(sg);
861 if (offset >= sg_dma_len(sg)) {
862 offset -= sg_dma_len(sg);
865 page += offset / PAGE_SIZE;
866 len = sg_dma_len(sg) - offset;
869 len = min(len, remainder);
870 remap_pfn_range(vma, addr, page_to_pfn(page), len,
873 if (addr >= vma->vm_end) {
879 * get page frame number to physical memory to be mapped
882 pfn = ((unsigned long)sprd_gem_obj->buffer->dma_addr) >>
885 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
887 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
888 vma->vm_page_prot)) {
889 DRM_ERROR("failed to remap pfn range.\n");
897 static const struct file_operations sprd_drm_gem_fops = {
898 .mmap = sprd_drm_gem_mmap_buffer,
901 int sprd_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
902 struct drm_file *file_priv)
904 struct drm_sprd_gem_mmap *args = data;
905 struct drm_gem_object *obj;
908 if (!(dev->driver->driver_features & DRIVER_GEM)) {
909 DRM_ERROR("does not support GEM.\n");
913 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
915 DRM_ERROR("failed to lookup gem object:h[%d]\n", args->handle);
919 obj->filp->f_op = &sprd_drm_gem_fops;
920 obj->filp->private_data = obj;
922 addr = vm_mmap(obj->filp, 0, args->size,
923 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
925 drm_gem_object_unreference_unlocked(obj);
927 if (IS_ERR_VALUE(addr))
932 DRM_DEBUG("%s:h[%d]s[%d]o[0x%x]mapped[0x%x]\n", __func__,
933 args->handle, (int)args->size, (int)obj, (int)args->mapped);
938 int sprd_drm_gem_mmap_iommu_ioctl(struct drm_device *dev, void *data,
939 struct drm_file *file_priv)
941 struct drm_sprd_gem_mmap *args = data;
942 struct drm_gem_object *obj;
943 struct ion_handle *ion_handle;
945 struct sprd_drm_gem_obj *sprd_gem_obj;
946 struct sprd_drm_gem_buf *buf;
949 if (!(dev->driver->driver_features & DRIVER_GEM)) {
950 DRM_ERROR("does not support GEM.\n");
954 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
956 DRM_ERROR("failed to lookup gem object.\n");
960 sprd_gem_obj = to_sprd_gem_obj(obj);
961 buf = sprd_gem_obj->buffer;
962 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
963 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
964 domain_num = IOMMU_MM;
965 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
966 domain_num = IOMMU_GSP;
968 ion_handle = buf->ion_handle;
969 sprd_map_iommu(ion_handle, domain_num, &addr);
971 DRM_ERROR("MMAP_IOMMU not applicable on CONTIG HEAP\n");
972 drm_gem_object_unreference_unlocked(obj);
980 int sprd_drm_gem_unmap_iommu_ioctl(struct drm_device *dev, void *data,
981 struct drm_file *file_priv)
983 struct drm_sprd_gem_mmap *args = data;
984 struct drm_gem_object *obj;
985 struct ion_handle *ion_handle;
986 struct sprd_drm_gem_obj *sprd_gem_obj;
987 struct sprd_drm_gem_buf *buf;
988 int ret = 0, domain_num = 0;
990 if (!(dev->driver->driver_features & DRIVER_GEM)) {
991 DRM_ERROR("does not support GEM.\n");
995 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
997 DRM_ERROR("failed to lookup gem object.\n");
1001 sprd_gem_obj = to_sprd_gem_obj(obj);
1002 buf = sprd_gem_obj->buffer;
1003 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
1004 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
1005 domain_num = IOMMU_MM;
1006 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
1007 domain_num = IOMMU_GSP;
1009 ion_handle = buf->ion_handle;
1010 sprd_unmap_iommu(ion_handle, domain_num);
1012 DRM_ERROR("UNMAP_IOMMU not applicable on CONTIG HEAP\n");
1016 drm_gem_object_unreference_unlocked(obj);
1018 * decrease obj->refcount one more time because we has already
1019 * increased it at sprd_drm_gem_mmap_iommu_ioctl().
1021 drm_gem_object_unreference_unlocked(obj);
1025 int sprd_drm_gem_get_ioctl(struct drm_device *dev, void *data,
1026 struct drm_file *file_priv)
1027 { struct sprd_drm_gem_obj *sprd_gem_obj;
1028 struct drm_sprd_gem_info *args = data;
1029 struct drm_gem_object *obj;
1031 mutex_lock(&dev->struct_mutex);
1033 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1035 DRM_ERROR("failed to lookup gem object.\n");
1036 mutex_unlock(&dev->struct_mutex);
1040 sprd_gem_obj = to_sprd_gem_obj(obj);
1042 args->flags = sprd_gem_obj->flags;
1043 args->size = sprd_gem_obj->size;
1045 drm_gem_object_unreference(obj);
1046 mutex_unlock(&dev->struct_mutex);
1051 int sprd_drm_gem_init_object(struct drm_gem_object *obj)
1056 void sprd_drm_gem_free_object(struct drm_gem_object *obj)
1058 struct sprd_drm_gem_obj *sprd_gem_obj;
1059 struct sprd_drm_gem_buf *buf;
1061 sprd_gem_obj = to_sprd_gem_obj(obj);
1062 buf = sprd_gem_obj->buffer;
1064 if (obj->import_attach)
1065 drm_prime_gem_destroy(obj, buf->sgt);
1067 sprd_drm_gem_destroy(to_sprd_gem_obj(obj));
1070 int sprd_drm_gem_dumb_create(struct drm_file *file_priv,
1071 struct drm_device *dev,
1072 struct drm_mode_create_dumb *args)
1074 struct sprd_drm_gem_obj *sprd_gem_obj;
1075 struct sprd_drm_gem_index gem_idx;
1079 * alocate memory to be used for framebuffer.
1080 * - this callback would be called by user application
1081 * with DRM_IOCTL_MODE_CREATE_DUMB command.
1084 args->pitch = args->width * args->bpp >> 3;
1085 args->size = PAGE_ALIGN(args->pitch * args->height);
1087 gem_idx.bufcount= 1;
1088 gem_idx.idx_size[0] = args->size;
1089 gem_idx.flags = args->flags;
1091 sprd_gem_obj = sprd_drm_gem_create(dev, &gem_idx);
1092 if (IS_ERR(sprd_gem_obj))
1093 return PTR_ERR(sprd_gem_obj);
1095 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
1098 sprd_drm_gem_destroy(sprd_gem_obj);
1105 int sprd_drm_gem_dumb_map_offset(struct drm_file *file_priv,
1106 struct drm_device *dev, uint32_t handle,
1109 struct drm_gem_object *obj;
1112 mutex_lock(&dev->struct_mutex);
1115 * get offset of memory allocated for drm framebuffer.
1116 * - this callback would be called by user application
1117 * with DRM_IOCTL_MODE_MAP_DUMB command.
1120 obj = drm_gem_object_lookup(dev, file_priv, handle);
1122 DRM_ERROR("failed to lookup gem object.\n");
1127 if (!obj->map_list.map) {
1128 ret = drm_gem_create_mmap_offset(obj);
1133 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
1134 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
1137 drm_gem_object_unreference(obj);
1139 mutex_unlock(&dev->struct_mutex);
1143 int sprd_drm_gem_dumb_destroy(struct drm_file *file_priv,
1144 struct drm_device *dev,
1145 unsigned int handle)
1150 * obj->refcount and obj->handle_count are decreased and
1151 * if both them are 0 then sprd_drm_gem_free_object()
1152 * would be called by callback to release resources.
1154 ret = drm_gem_handle_delete(file_priv, handle);
1156 DRM_ERROR("failed to delete drm_gem_handle.\n");
1163 int sprd_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1165 struct drm_gem_object *obj = vma->vm_private_data;
1166 struct drm_device *dev = obj->dev;
1167 unsigned long f_vaddr;
1168 pgoff_t page_offset;
1171 page_offset = ((unsigned long)vmf->virtual_address -
1172 vma->vm_start) >> PAGE_SHIFT;
1173 f_vaddr = (unsigned long)vmf->virtual_address;
1175 mutex_lock(&dev->struct_mutex);
1177 ret = sprd_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
1179 DRM_ERROR("failed to map pages.\n");
1181 mutex_unlock(&dev->struct_mutex);
1183 return convert_to_vm_err_msg(ret);
1186 int sprd_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1188 struct sprd_drm_gem_obj *sprd_gem_obj;
1189 struct drm_gem_object *obj;
1192 /* set vm_area_struct. */
1193 ret = drm_gem_mmap(filp, vma);
1195 DRM_ERROR("failed to mmap.\n");
1199 obj = vma->vm_private_data;
1200 sprd_gem_obj = to_sprd_gem_obj(obj);
1202 ret = check_gem_flags(sprd_gem_obj->flags);
1204 drm_gem_vm_close(vma);
1205 drm_gem_free_mmap_offset(obj);
1209 vma->vm_flags &= ~VM_PFNMAP;
1210 vma->vm_flags |= VM_MIXEDMAP;
1212 update_vm_cache_attr(sprd_gem_obj, vma);
1217 int sprd_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1218 struct drm_file *file_priv)
1220 struct drm_sprd_gem_lock_handle *args = data;
1221 struct drm_gem_object *obj;
1222 struct sprd_drm_gem_obj *sprd_gem_obj;
1223 struct drm_sprd_gem_object_wait_list_entry *lock_item;
1227 DRM_DEBUG_DRIVER("%s line:%d\n", __func__, __LINE__);
1228 mutex_lock(&dev->struct_mutex);
1230 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1233 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1238 sprd_gem_obj = to_sprd_gem_obj(obj);
1240 if (sprd_gem_obj->lockpid) {
1241 /* if a pid already had it locked */
1242 /* create and add to wait list */
1243 for (i = 0; i < DRM_SPRD_HANDLE_WAIT_ENTRIES; i++) {
1244 if (sprd_gem_obj->wait_entries[i].in_use == 0) {
1245 /* this one is empty */
1246 lock_item = &sprd_gem_obj->wait_entries[i];
1247 lock_item->in_use = 1;
1248 lock_item->pid = args->pid;
1249 INIT_LIST_HEAD((struct list_head *)
1250 &sprd_gem_obj->wait_entries[i]);
1255 if (i == DRM_SPRD_HANDLE_WAIT_ENTRIES) {
1258 drm_gem_object_unreference(obj);
1261 list_add_tail((struct list_head *)&lock_item->list,
1262 &sprd_gem_obj->wait_list);
1263 mutex_unlock(&dev->struct_mutex);
1264 /* here we need to block */
1265 wait_event_interruptible_timeout(
1266 sprd_gem_obj->wait_entries[i].process_wait_q,
1267 (sprd_gem_obj->lockpid == 0),
1268 msecs_to_jiffies(20000));
1269 mutex_lock(&dev->struct_mutex);
1270 lock_item->in_use = 0;
1272 sprd_gem_obj->lockpid = args->pid;
1273 DRM_DEBUG_DRIVER("%s lockpid:%d\n", __func__, sprd_gem_obj->lockpid);
1276 mutex_unlock(&dev->struct_mutex);
1281 int sprd_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1282 struct drm_file *file_priv)
1285 struct drm_sprd_gem_unlock_handle *args = data;
1286 struct drm_gem_object *obj;
1287 struct sprd_drm_gem_obj *unlock_obj;
1288 struct drm_sprd_gem_object_wait_list_entry *lock_next;
1291 DRM_DEBUG_DRIVER("%s line:%d\n", __func__, __LINE__);
1292 mutex_lock(&dev->struct_mutex);
1294 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1297 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1302 unlock_obj = to_sprd_gem_obj(obj);
1303 if (!list_empty(&unlock_obj->wait_list)) {
1305 (struct drm_sprd_gem_object_wait_list_entry *)
1306 unlock_obj->wait_list.prev;
1308 list_del((struct list_head *)&lock_next->list);
1310 unlock_obj->lockpid = 0;
1311 wake_up_interruptible(
1312 &lock_next->process_wait_q);
1316 /* List is empty so set pid to 0 */
1317 unlock_obj->lockpid = 0;
1319 drm_gem_object_unreference(obj);
1321 drm_gem_object_unreference(obj);
1323 mutex_unlock(&dev->struct_mutex);
1328 int sprd_gem_cache_op_ioctl(struct drm_device *dev, void *data,
1329 struct drm_file *file_priv)
1331 struct drm_sprd_gem_cache_op *args = data;
1333 struct drm_gem_object *obj;
1334 struct sprd_drm_gem_obj *sprd_gem_obj;
1335 struct sprd_drm_gem_buf *buf;
1336 struct sg_table *sgt;
1337 unsigned int cache_op = args->flags &(~SPRD_DRM_ALL_CACHE);
1339 mutex_lock(&dev->struct_mutex);
1340 obj = drm_gem_object_lookup(dev, file_priv, args->gem_handle);
1343 DRM_ERROR("invalid handle[%d]\n", args->gem_handle);
1345 goto err_invalid_handle;
1348 sprd_gem_obj = to_sprd_gem_obj(obj);
1349 buf = sprd_gem_obj->buffer;
1352 DRM_DEBUG("%s:h[%d]s[%d]f[0x%x]a[0x%x]o[0x%x]\n",
1353 "gc",args->gem_handle, (int)args->size, args->flags,
1354 (int)args->usr_addr, (int)obj);
1356 if (!IS_CACHABLE_BUFFER(sprd_gem_obj->flags)) {
1357 DRM_ERROR("invalid flags[0x%x]for h[%d]\n",
1358 sprd_gem_obj->flags, args->gem_handle);
1363 case SPRD_DRM_CACHE_INV:
1364 dma_sync_sg_for_cpu(NULL, sgt->sgl, sgt->nents,
1367 case SPRD_DRM_CACHE_CLN:
1368 dma_sync_sg_for_device(NULL, sgt->sgl, sgt->nents,
1371 case SPRD_DRM_CACHE_FSH:
1372 dma_sync_sg_for_device(NULL, sgt->sgl, sgt->nents,
1374 dma_sync_sg_for_cpu(NULL, sgt->sgl, sgt->nents,
1378 DRM_ERROR("invalid op[0x%x]for h[%d]\n", cache_op, args->gem_handle);
1384 drm_gem_object_unreference(obj);
1387 mutex_unlock(&dev->struct_mutex);