3 * Copyright (c) 2014 Spreadtrum Communications, Inc.
5 * This program is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
19 #include <linux/shmem_fs.h>
20 #include <drm/sprd_drm.h>
21 #include <linux/sprd_iommu.h>
23 #include "video/ion_sprd.h"
24 #include "sprd_drm_drv.h"
25 #include "sprd_drm_gem.h"
26 #include "sprd_drm_buf.h"
28 static unsigned int convert_to_vm_err_msg(int msg)
36 out_msg = VM_FAULT_NOPAGE;
40 out_msg = VM_FAULT_OOM;
44 out_msg = VM_FAULT_SIGBUS;
51 static int check_gem_flags(unsigned int flags)
53 if (flags & ~(SPRD_BO_MASK | SPRD_BO_DEV_MASK))
56 #ifdef CONFIG_SPRD_IOMMU
57 if (IS_NONCONTIG_BUFFER(flags)) {
58 if (IS_DEV_OVERLAY_BUFFER(flags))
61 if (IS_DEV_SYSTEM_BUFFER(flags))
68 DRM_ERROR("invalid flags[0x%x]\n", flags);
72 static void update_vm_cache_attr(struct sprd_drm_gem_obj *obj,
73 struct vm_area_struct *vma)
75 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
77 /* non-cachable as default. */
78 if (obj->flags & SPRD_BO_CACHABLE)
79 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
80 else if (obj->flags & SPRD_BO_WC)
82 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
85 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
88 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
90 if (!IS_NONCONTIG_BUFFER(flags)) {
91 #ifndef CONFIG_CMA_ALIGNMENT
93 return roundup(size, SECTION_SIZE);
95 /* ToDo: need to sync with additional align size */
97 return roundup(size, SZ_64K);
102 return roundup(size, PAGE_SIZE);
105 struct page **sprd_gem_get_pages(struct drm_gem_object *obj,
109 struct address_space *mapping;
110 struct page *p, **pages;
113 /* This is the shared memory object that backs the GEM resource */
114 inode = obj->filp->f_path.dentry->d_inode;
115 mapping = inode->i_mapping;
117 npages = obj->size >> PAGE_SHIFT;
119 pages = drm_malloc_ab(npages, sizeof(struct page *));
121 return ERR_PTR(-ENOMEM);
123 gfpmask |= mapping_gfp_mask(mapping);
125 for (i = 0; i < npages; i++) {
126 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
136 page_cache_release(pages[i]);
138 drm_free_large(pages);
139 return ERR_PTR(PTR_ERR(p));
142 static void sprd_gem_put_pages(struct drm_gem_object *obj,
144 bool dirty, bool accessed)
148 npages = obj->size >> PAGE_SHIFT;
150 for (i = 0; i < npages; i++) {
152 set_page_dirty(pages[i]);
155 mark_page_accessed(pages[i]);
157 /* Undo the reference we took when populating the table */
158 page_cache_release(pages[i]);
161 drm_free_large(pages);
164 static int sprd_drm_gem_map_pages(struct drm_gem_object *obj,
165 struct vm_area_struct *vma,
166 unsigned long f_vaddr,
169 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
170 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
173 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
177 pfn = page_to_pfn(buf->pages[page_offset++]);
179 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
181 return vm_insert_mixed(vma, f_vaddr, pfn);
184 static int sprd_drm_gem_get_pages(struct drm_gem_object *obj)
186 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
187 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
188 struct scatterlist *sgl;
190 unsigned int npages, i = 0;
194 DRM_DEBUG_KMS("already allocated.\n");
198 pages = sprd_gem_get_pages(obj, GFP_KERNEL);
200 DRM_ERROR("failed to get pages.\n");
201 return PTR_ERR(pages);
204 npages = obj->size >> PAGE_SHIFT;
205 buf->page_size = PAGE_SIZE;
207 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
209 DRM_ERROR("failed to allocate sg table.\n");
214 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
216 DRM_ERROR("failed to initialize sg table.\n");
223 /* set all pages to sg list. */
225 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
226 sg_dma_address(sgl) = page_to_phys(pages[i]);
231 /* add some codes for UNCACHED type here. TODO */
239 sprd_gem_put_pages(obj, pages, true, false);
244 static void sprd_drm_gem_put_pages(struct drm_gem_object *obj)
246 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
247 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
250 * if buffer typs is SPRD_BO_NONCONTIG then release all pages
251 * allocated at gem fault handler.
253 sg_free_table(buf->sgt);
257 sprd_gem_put_pages(obj, buf->pages, true, false);
260 /* add some codes for UNCACHED type here. TODO */
263 static int sprd_drm_gem_handle_create(struct drm_gem_object *obj,
264 struct drm_file *file_priv,
265 unsigned int *handle)
270 * allocate a id of idr table where the obj is registered
271 * and handle has the id what user can see.
273 ret = drm_gem_handle_create(file_priv, obj, handle);
277 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
279 /* drop reference from allocate - handle holds it now. */
280 drm_gem_object_unreference_unlocked(obj);
285 static void sprd_drm_gem_register_pid(struct drm_file *file_priv)
287 struct drm_sprd_file_private *driver_priv = file_priv->driver_priv;
289 if (!driver_priv->pid && !driver_priv->tgid) {
290 driver_priv->pid = task_pid_nr(current);
291 driver_priv->tgid = task_tgid_nr(current);
293 if (driver_priv->pid != task_pid_nr(current))
294 DRM_DEBUG_KMS("wrong pid: %ld, %ld\n",
295 (unsigned long)driver_priv->pid,
296 (unsigned long)task_pid_nr(current));
297 if (driver_priv->tgid != task_tgid_nr(current))
298 DRM_DEBUG_KMS("wrong tgid: %ld, %ld\n",
299 (unsigned long)driver_priv->tgid,
300 (unsigned long)task_tgid_nr(current));
304 void sprd_drm_gem_destroy(struct sprd_drm_gem_obj *sprd_gem_obj)
306 struct drm_gem_object *obj;
307 struct sprd_drm_gem_buf *buf;
309 obj = &sprd_gem_obj->base;
310 buf = sprd_gem_obj->buffer;
315 DRM_DEBUG("%s:o[0x%x]a[0x%x]\n", "gf",
316 (int)obj, (int)sprd_gem_obj->buffer->dma_addr);
318 sprd_drm_free_buf(obj->dev, sprd_gem_obj->flags, buf);
320 sprd_drm_fini_buf(obj->dev, buf);
321 sprd_gem_obj->buffer = NULL;
323 if (obj->map_list.map)
324 drm_gem_free_mmap_offset(obj);
326 /* release file pointer to gem object. */
327 drm_gem_object_release(obj);
333 struct sprd_drm_gem_obj *sprd_drm_gem_init(struct drm_device *dev,
336 struct sprd_drm_gem_obj *sprd_gem_obj;
337 struct drm_gem_object *obj;
340 sprd_gem_obj = kzalloc(sizeof(*sprd_gem_obj), GFP_KERNEL);
342 DRM_ERROR("failed to allocate sprd gem object\n");
346 sprd_gem_obj->size = size;
347 obj = &sprd_gem_obj->base;
349 ret = drm_gem_object_init(dev, obj, size);
351 DRM_ERROR("failed to initialize gem object\n");
356 sprd_gem_obj->pid = task_pid_nr(current);
357 sprd_gem_obj->tgid = task_tgid_nr(current);
359 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
364 struct sprd_drm_gem_obj *sprd_drm_gem_create(struct drm_device *dev,
365 struct sprd_drm_gem_index *args)
367 struct sprd_drm_gem_obj *sprd_gem_obj;
368 struct sprd_drm_gem_buf *buf;
369 int ret, i=0, j, tsize = 0;
371 ret = check_gem_flags(args->flags);
375 /* ToDo: need to check align */
376 for (i = 0; i < args->bufcount; i++)
377 tsize += args->idx_size[i];
380 DRM_ERROR("invalid size.\n");
381 return ERR_PTR(-EINVAL);
384 tsize = roundup_gem_size(tsize, args->flags);
386 buf = sprd_drm_init_buf(dev, tsize);
388 return ERR_PTR(-ENOMEM);
390 sprd_gem_obj = sprd_drm_gem_init(dev, tsize);
396 sprd_gem_obj->buffer = buf;
398 /* set memory type and cache attribute from user side. */
399 sprd_gem_obj->flags = args->flags;
401 buf->obj = &sprd_gem_obj->base;
403 ret = sprd_drm_alloc_buf(dev, buf, args->flags);
407 memset(buf->idx_addr, 0x00, sizeof(buf->idx_addr));
408 buf->idx_addr[0] = buf->dma_addr;
409 buf->bufcount = args->bufcount;
411 for (i = 0; i < buf->bufcount; i++) {
413 if (buf->bufcount > j)
414 buf->idx_addr[j] = buf->idx_addr[i] + args->idx_size[i];
417 sprd_gem_obj->lockpid=0;
418 INIT_LIST_HEAD(&sprd_gem_obj->wait_list);
420 for (i = 0; i < DRM_SPRD_HANDLE_WAIT_ENTRIES; i++) {
421 INIT_LIST_HEAD((struct list_head *) &sprd_gem_obj->wait_entries[i]);
422 sprd_gem_obj->wait_entries[i].pid = 0;
423 init_waitqueue_head(&sprd_gem_obj->wait_entries[i].process_wait_q);
429 drm_gem_object_release(&sprd_gem_obj->base);
432 sprd_drm_fini_buf(dev, buf);
436 int sprd_drm_gem_create_ioctl(struct drm_device *dev, void *data,
437 struct drm_file *file_priv)
439 struct drm_sprd_gem_create *args = data;
440 struct sprd_drm_gem_obj *sprd_gem_obj;
441 struct sprd_drm_gem_index gem_idx;
442 struct timeval val_start, val_end;
443 uint64_t time_start, time_end;
446 do_gettimeofday(&val_start);
447 time_start = (uint64_t)(val_start.tv_sec * 1000000 + val_start.tv_usec);
450 gem_idx.idx_size[0] = args->size;
451 gem_idx.flags = args->flags;
453 sprd_gem_obj = sprd_drm_gem_create(dev, &gem_idx);
454 if (IS_ERR(sprd_gem_obj)) {
455 DRM_ERROR("failed to sprd_drm_gem_create:s[%d]f[0x%x]\n",
456 (int)args->size, args->flags);
457 return PTR_ERR(sprd_gem_obj);
460 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
463 DRM_ERROR("failed to sprd_drm_gem_handle_create:s[%d]f[0x%x]\n",
464 (int)args->size, args->flags);
465 sprd_drm_gem_destroy(sprd_gem_obj);
469 sprd_drm_gem_register_pid(file_priv);
471 do_gettimeofday(&val_end);
472 time_end = (uint64_t)(val_end.tv_sec * 1000000 + val_end.tv_usec);
474 DRM_DEBUG("%s:h[%d]s[%d]f[0x%x]o[0x%x]a[0x%x][%lld us]\n",
475 "ga",args->handle, (int)args->size, args->flags,
476 (int)&sprd_gem_obj->base,
477 (int)sprd_gem_obj->buffer->dma_addr, time_end - time_start);
482 int sprd_drm_gem_create_index_ioctl(struct drm_device *dev, void *data,
483 struct drm_file *file_priv)
485 struct sprd_drm_gem_index *args = data;
486 struct sprd_drm_gem_obj *sprd_gem_obj;
489 if (args->flags & SPRD_BO_NONCONTIG) {
490 DRM_ERROR("does not support non-contig memory\n");
494 sprd_gem_obj = sprd_drm_gem_create(dev, args);
495 if (IS_ERR(sprd_gem_obj))
496 return PTR_ERR(sprd_gem_obj);
498 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
501 sprd_drm_gem_destroy(sprd_gem_obj);
505 sprd_drm_gem_register_pid(file_priv);
507 DRM_INFO("%s:h[%d]cnt[%d]sz[%d %d %d]f[0x%x]o[0x%x]a[0x%x]\n",
508 __func__,args->handle, args->bufcount,
509 (int)args->idx_size[0], (int)args->idx_size[1], (int)args->idx_size[2],
510 args->flags, (int)&sprd_gem_obj->base,
511 (int)sprd_gem_obj->buffer->dma_addr);
516 struct dma_buf *sprd_prime_export(struct drm_device *dev,
517 struct drm_gem_object *obj, int flags)
519 struct sprd_drm_private *private = dev->dev_private;
520 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
521 struct sprd_drm_gem_buf *buf = sprd_gem_obj->buffer;
522 struct dma_buf *dmabuf;
524 dmabuf = ion_share_dma_buf(private->sprd_drm_ion_client,
527 pr_err("%s: dmabuf is error and dmabuf is %p!\n",
533 struct drm_gem_object *sprd_prime_import(struct drm_device *dev,
534 struct dma_buf *dma_buf)
536 struct ion_handle *ion_handle;
537 struct sprd_drm_gem_obj *sprd_gem_obj;
539 struct sprd_drm_gem_buf *buf = NULL;
540 unsigned int i = 0, nr_pages = 0, heap_id;
542 struct sprd_drm_private *private;
543 struct scatterlist *sg = NULL;
544 struct drm_gem_object *obj;
545 unsigned long sgt_size;
547 private = dev->dev_private;
548 ion_handle = get_ion_handle_from_dmabuf(private->sprd_drm_ion_client, dma_buf);
549 if (IS_ERR_OR_NULL(ion_handle)) {
550 DRM_ERROR("Unable to import dmabuf\n");
551 return ERR_PTR(-EINVAL);
554 ion_handle_get_size(private->sprd_drm_ion_client,
555 ion_handle, &size, &heap_id);
558 "cannot create GEM object from zero size ION buffer\n");
563 obj = ion_get_gem(ion_handle);
565 sprd_gem_obj = to_sprd_gem_obj(obj);
566 if (sprd_gem_obj->buffer->ion_handle != ion_handle) {
567 DRM_ERROR("Unable get GEM object from ion\n");
572 drm_gem_object_reference(obj);
573 ion_free(private->sprd_drm_ion_client, ion_handle);
578 buf = sprd_drm_init_buf(dev, size);
580 DRM_ERROR("Unable to allocate the GEM buffer\n");
585 sprd_gem_obj = sprd_drm_gem_init(dev, size);
587 DRM_ERROR("Unable to initialize GEM object\n");
591 sprd_gem_obj->buffer = buf;
592 obj = &sprd_gem_obj->base;
594 ret = ion_is_phys(private->sprd_drm_ion_client, ion_handle);
596 sprd_gem_obj->flags = SPRD_BO_NONCONTIG;
598 sprd_gem_obj->flags = SPRD_BO_CONTIG;
600 DRM_ERROR("Unable to get flag, Invalid handle\n");
604 /* ion_handle is validated in ion_is_phys, no need to check again */
605 ret = ion_is_cached(private->sprd_drm_ion_client, ion_handle);
607 sprd_gem_obj->flags |= SPRD_BO_CACHABLE;
609 if ((heap_id == ION_HEAP_ID_MASK_GSP) || (heap_id == ION_HEAP_ID_MASK_GSP_IOMMU))
610 sprd_gem_obj->flags |= SPRD_BO_DEV_GSP;
611 else if ((heap_id == ION_HEAP_ID_MASK_MM) || (heap_id == ION_HEAP_ID_MASK_MM_IOMMU))
612 sprd_gem_obj->flags |= SPRD_BO_DEV_MM;
613 else if (heap_id == ION_HEAP_ID_MASK_OVERLAY)
614 sprd_gem_obj->flags |= SPRD_BO_DEV_OVERLAY;
615 else if (heap_id == ION_HEAP_ID_MASK_SYSTEM)
616 sprd_gem_obj->flags |= SPRD_BO_DEV_SYSTEM;
618 DRM_ERROR("Heap id not supported\n");
623 buf->ion_handle = ion_handle;
624 buf->sgt = ion_sg_table(private->sprd_drm_ion_client, buf->ion_handle);
626 DRM_ERROR("failed to allocate sg table.\n");
631 buf->dma_addr = sg_dma_address(buf->sgt->sgl);
632 for_each_sg(buf->sgt->sgl, sg, buf->sgt->nents, i)
635 sgt_size = sizeof(struct page) * nr_pages;
636 buf->pages = kzalloc(sgt_size, GFP_KERNEL | __GFP_NOWARN);
639 order = get_order(sgt_size);
640 DRM_ERROR("%s: kzalloc failed for sg list: order:%d\n",
642 buf->pages = vzalloc(sgt_size);
644 DRM_ERROR("failed to allocate pages.\n");
650 for_each_sg(buf->sgt->sgl, sg, buf->sgt->nents, i)
651 buf->pages[i] = phys_to_page(sg_dma_address(sg));
653 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
654 (unsigned long)buf->dma_addr, buf->size);
659 buf->dma_addr = (dma_addr_t)NULL;
662 sprd_gem_obj->buffer = NULL;
663 /* release file pointer to gem object. */
664 drm_gem_object_release(obj);
668 sprd_drm_fini_buf(dev, buf);
670 ion_free(private->sprd_drm_ion_client, ion_handle);
675 int sprd_drm_gem_prime_fd_to_handle(struct drm_device *dev,
676 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
680 ret = drm_gem_prime_fd_to_handle(dev, file_priv, prime_fd, handle);
684 sprd_drm_gem_register_pid(file_priv);
691 void *sprd_drm_gem_get_dma_addr(struct drm_device *dev,
692 unsigned int gem_handle,
693 struct drm_file *file_priv)
695 struct sprd_drm_gem_obj *sprd_gem_obj;
696 struct drm_gem_object *obj;
697 struct ion_handle *ion_handle;
698 struct sprd_drm_gem_buf *buf;
701 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
703 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
704 return ERR_PTR(-EINVAL);
707 sprd_gem_obj = to_sprd_gem_obj(obj);
709 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
710 buf = sprd_gem_obj->buffer;
711 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
712 domain_num = IOMMU_MM;
713 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
714 domain_num = IOMMU_GSP;
716 ion_handle = buf->ion_handle;
717 if (sprd_map_iommu(ion_handle, domain_num,
718 (unsigned long *)&sprd_gem_obj->buffer->dma_addr)) {
719 DRM_ERROR("failed to map iommu:h[%d]o[0x%x]\n",
720 gem_handle, (int)obj);
721 drm_gem_object_unreference_unlocked(obj);
722 return ERR_PTR(-EINVAL);
726 DRM_DEBUG("%s:h[%d]o[0x%x]a[0x%x]\n",
727 __func__,gem_handle, (int)obj,
728 (int)sprd_gem_obj->buffer->dma_addr);
730 return &sprd_gem_obj->buffer->dma_addr;
733 void sprd_drm_gem_put_dma_addr(struct drm_device *dev,
734 unsigned int gem_handle,
735 struct drm_file *file_priv)
737 struct sprd_drm_gem_obj *sprd_gem_obj;
738 struct drm_gem_object *obj;
739 struct ion_handle *ion_handle;
740 struct sprd_drm_gem_buf *buf;
743 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
745 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
749 sprd_gem_obj = to_sprd_gem_obj(obj);
751 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
752 buf = sprd_gem_obj->buffer;
753 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
754 domain_num = IOMMU_MM;
755 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
756 domain_num = IOMMU_GSP;
758 ion_handle = buf->ion_handle;
759 if (sprd_unmap_iommu(ion_handle, domain_num))
760 DRM_ERROR("failed to unmap iommu:h[%d]o[0x%x]\n",
761 gem_handle, (int)obj);
764 drm_gem_object_unreference_unlocked(obj);
766 DRM_DEBUG("%s:h[%d]o[0x%x]\n",
767 __func__,gem_handle, (int)obj);
769 * decrease obj->refcount one more time because we has already
770 * increased it at sprd_drm_gem_get_dma_addr().
772 drm_gem_object_unreference_unlocked(obj);
775 unsigned long sprd_drm_gem_get_size(struct drm_device *dev,
776 unsigned int gem_handle,
777 struct drm_file *file_priv)
779 struct sprd_drm_gem_obj *sprd_gem_obj;
780 struct drm_gem_object *obj;
782 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
784 DRM_ERROR("failed to lookup gem object:h[%d]\n", gem_handle);
788 sprd_gem_obj = to_sprd_gem_obj(obj);
790 drm_gem_object_unreference_unlocked(obj);
792 return sprd_gem_obj->buffer->size;
795 void *sprd_drm_gem_get_obj_addr(unsigned int name, unsigned int index)
797 struct sprd_drm_gem_obj *sprd_gem_obj;
798 struct drm_gem_object *obj;
799 struct ion_handle *ion_handle;
800 struct sprd_drm_gem_buf *buf;
803 mutex_lock(&sprd_drm_dev->object_name_lock);
804 obj = idr_find(&sprd_drm_dev->object_name_idr, (int) name);
805 mutex_unlock(&sprd_drm_dev->object_name_lock);
808 DRM_ERROR("name[%d]failed to lookup gem object.\n", name);
809 return ERR_PTR(-EFAULT);
812 sprd_gem_obj = to_sprd_gem_obj(obj);
813 buf = sprd_gem_obj->buffer;
815 if (index >= buf->bufcount) {
816 DRM_ERROR("invalid index[%d],bufcount[%d]\n",
817 index, buf->bufcount);
818 return ERR_PTR(-EINVAL);
821 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
822 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
823 domain_num = IOMMU_MM;
824 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
825 domain_num = IOMMU_GSP;
827 ion_handle = buf->ion_handle;
828 if (sprd_map_iommu(ion_handle, domain_num,
829 (unsigned long *)&sprd_gem_obj->buffer->dma_addr)) {
830 DRM_ERROR("failed to map iommu\n");
831 return ERR_PTR(-EINVAL);
835 DRM_DEBUG("%s:name[%d]o[0x%x]idx[%d]a[0x%x]\n",
836 __func__, name, (int)obj, index, (int)buf->idx_addr[index]);
838 return &buf->idx_addr[index];
840 EXPORT_SYMBOL(sprd_drm_gem_get_obj_addr);
842 int sprd_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
843 struct drm_file *file_priv)
845 struct drm_sprd_gem_map_off *args = data;
847 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
848 args->handle, (unsigned long)args->offset);
850 if (!(dev->driver->driver_features & DRIVER_GEM)) {
851 DRM_ERROR("does not support GEM.\n");
855 return sprd_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
859 static int sprd_drm_gem_mmap_buffer(struct file *filp,
860 struct vm_area_struct *vma)
862 struct drm_gem_object *obj = filp->private_data;
863 struct sprd_drm_gem_obj *sprd_gem_obj = to_sprd_gem_obj(obj);
864 struct sprd_drm_gem_buf *buffer;
865 unsigned long pfn, vm_size;
867 vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
869 update_vm_cache_attr(sprd_gem_obj, vma);
871 vm_size = vma->vm_end - vma->vm_start;
874 * a buffer contains information to physically continuous memory
875 * allocated by user request or at framebuffer creation.
877 buffer = sprd_gem_obj->buffer;
879 /* check if user-requested size is valid. */
880 if (vm_size > buffer->size)
883 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
884 unsigned long addr = vma->vm_start;
885 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
886 struct scatterlist *sg;
889 for_each_sg(buffer->sgt->sgl, sg, buffer->sgt->nents, i) {
890 struct page *page = sg_page(sg);
891 unsigned long remainder = vma->vm_end - addr;
892 unsigned long len = sg_dma_len(sg);
894 if (offset >= sg_dma_len(sg)) {
895 offset -= sg_dma_len(sg);
898 page += offset / PAGE_SIZE;
899 len = sg_dma_len(sg) - offset;
902 len = min(len, remainder);
903 remap_pfn_range(vma, addr, page_to_pfn(page), len,
906 if (addr >= vma->vm_end) {
912 * get page frame number to physical memory to be mapped
915 pfn = ((unsigned long)sprd_gem_obj->buffer->dma_addr) >>
918 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
920 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
921 vma->vm_page_prot)) {
922 DRM_ERROR("failed to remap pfn range.\n");
930 static const struct file_operations sprd_drm_gem_fops = {
931 .mmap = sprd_drm_gem_mmap_buffer,
934 int sprd_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
935 struct drm_file *file_priv)
937 struct drm_sprd_gem_mmap *args = data;
938 struct drm_gem_object *obj;
941 if (!(dev->driver->driver_features & DRIVER_GEM)) {
942 DRM_ERROR("does not support GEM.\n");
946 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
948 DRM_ERROR("failed to lookup gem object:h[%d]\n", args->handle);
952 obj->filp->f_op = &sprd_drm_gem_fops;
953 obj->filp->private_data = obj;
955 addr = vm_mmap(obj->filp, 0, args->size,
956 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
958 drm_gem_object_unreference_unlocked(obj);
960 if (IS_ERR_VALUE(addr))
965 DRM_DEBUG("%s:h[%d]s[%d]o[0x%x]mapped[0x%x]\n", __func__,
966 args->handle, (int)args->size, (int)obj, (int)args->mapped);
971 int sprd_drm_gem_mmap_iommu_ioctl(struct drm_device *dev, void *data,
972 struct drm_file *file_priv)
974 struct drm_sprd_gem_mmap *args = data;
975 struct drm_gem_object *obj;
976 struct ion_handle *ion_handle;
978 struct sprd_drm_gem_obj *sprd_gem_obj;
979 struct sprd_drm_gem_buf *buf;
982 if (!(dev->driver->driver_features & DRIVER_GEM)) {
983 DRM_ERROR("does not support GEM.\n");
987 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
989 DRM_ERROR("failed to lookup gem object.\n");
993 sprd_gem_obj = to_sprd_gem_obj(obj);
994 buf = sprd_gem_obj->buffer;
995 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
996 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
997 domain_num = IOMMU_MM;
998 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
999 domain_num = IOMMU_GSP;
1001 ion_handle = buf->ion_handle;
1002 sprd_map_iommu(ion_handle, domain_num, &addr);
1004 DRM_ERROR("MMAP_IOMMU not applicable on CONTIG HEAP\n");
1005 drm_gem_object_unreference_unlocked(obj);
1009 args->mapped = addr;
1013 int sprd_drm_gem_unmap_iommu_ioctl(struct drm_device *dev, void *data,
1014 struct drm_file *file_priv)
1016 struct drm_sprd_gem_mmap *args = data;
1017 struct drm_gem_object *obj;
1018 struct ion_handle *ion_handle;
1019 struct sprd_drm_gem_obj *sprd_gem_obj;
1020 struct sprd_drm_gem_buf *buf;
1021 int ret = 0, domain_num = 0;
1023 if (!(dev->driver->driver_features & DRIVER_GEM)) {
1024 DRM_ERROR("does not support GEM.\n");
1028 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1030 DRM_ERROR("failed to lookup gem object.\n");
1034 sprd_gem_obj = to_sprd_gem_obj(obj);
1035 buf = sprd_gem_obj->buffer;
1036 if (sprd_gem_obj->flags & SPRD_BO_NONCONTIG) {
1037 if (IS_DEV_MM_BUFFER(sprd_gem_obj->flags))
1038 domain_num = IOMMU_MM;
1039 else if (IS_DEV_GSP_BUFFER(sprd_gem_obj->flags))
1040 domain_num = IOMMU_GSP;
1042 ion_handle = buf->ion_handle;
1043 sprd_unmap_iommu(ion_handle, domain_num);
1045 DRM_ERROR("UNMAP_IOMMU not applicable on CONTIG HEAP\n");
1049 drm_gem_object_unreference_unlocked(obj);
1051 * decrease obj->refcount one more time because we has already
1052 * increased it at sprd_drm_gem_mmap_iommu_ioctl().
1054 drm_gem_object_unreference_unlocked(obj);
1058 int sprd_drm_gem_get_ioctl(struct drm_device *dev, void *data,
1059 struct drm_file *file_priv)
1060 { struct sprd_drm_gem_obj *sprd_gem_obj;
1061 struct drm_sprd_gem_info *args = data;
1062 struct drm_gem_object *obj;
1064 mutex_lock(&dev->struct_mutex);
1066 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1068 DRM_ERROR("failed to lookup gem object.\n");
1069 mutex_unlock(&dev->struct_mutex);
1073 sprd_gem_obj = to_sprd_gem_obj(obj);
1075 args->flags = sprd_gem_obj->flags;
1076 args->size = sprd_gem_obj->size;
1078 drm_gem_object_unreference(obj);
1079 mutex_unlock(&dev->struct_mutex);
1084 int sprd_drm_gem_init_object(struct drm_gem_object *obj)
1089 void sprd_drm_gem_free_object(struct drm_gem_object *obj)
1091 struct sprd_drm_gem_obj *sprd_gem_obj;
1092 struct sprd_drm_gem_buf *buf;
1094 sprd_gem_obj = to_sprd_gem_obj(obj);
1095 buf = sprd_gem_obj->buffer;
1097 if (obj->import_attach)
1098 drm_prime_gem_destroy(obj, buf->sgt);
1100 sprd_drm_gem_destroy(to_sprd_gem_obj(obj));
1103 int sprd_drm_gem_dumb_create(struct drm_file *file_priv,
1104 struct drm_device *dev,
1105 struct drm_mode_create_dumb *args)
1107 struct sprd_drm_gem_obj *sprd_gem_obj;
1108 struct sprd_drm_gem_index gem_idx;
1112 * alocate memory to be used for framebuffer.
1113 * - this callback would be called by user application
1114 * with DRM_IOCTL_MODE_CREATE_DUMB command.
1117 args->pitch = args->width * args->bpp >> 3;
1118 args->size = PAGE_ALIGN(args->pitch * args->height);
1120 gem_idx.bufcount= 1;
1121 gem_idx.idx_size[0] = args->size;
1122 gem_idx.flags = args->flags;
1124 sprd_gem_obj = sprd_drm_gem_create(dev, &gem_idx);
1125 if (IS_ERR(sprd_gem_obj))
1126 return PTR_ERR(sprd_gem_obj);
1128 ret = sprd_drm_gem_handle_create(&sprd_gem_obj->base, file_priv,
1131 sprd_drm_gem_destroy(sprd_gem_obj);
1135 sprd_drm_gem_register_pid(file_priv);
1140 int sprd_drm_gem_dumb_map_offset(struct drm_file *file_priv,
1141 struct drm_device *dev, uint32_t handle,
1144 struct drm_gem_object *obj;
1147 mutex_lock(&dev->struct_mutex);
1150 * get offset of memory allocated for drm framebuffer.
1151 * - this callback would be called by user application
1152 * with DRM_IOCTL_MODE_MAP_DUMB command.
1155 obj = drm_gem_object_lookup(dev, file_priv, handle);
1157 DRM_ERROR("failed to lookup gem object.\n");
1162 if (!obj->map_list.map) {
1163 ret = drm_gem_create_mmap_offset(obj);
1168 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
1169 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
1172 drm_gem_object_unreference(obj);
1174 mutex_unlock(&dev->struct_mutex);
1178 int sprd_drm_gem_dumb_destroy(struct drm_file *file_priv,
1179 struct drm_device *dev,
1180 unsigned int handle)
1185 * obj->refcount and obj->handle_count are decreased and
1186 * if both them are 0 then sprd_drm_gem_free_object()
1187 * would be called by callback to release resources.
1189 ret = drm_gem_handle_delete(file_priv, handle);
1191 DRM_ERROR("failed to delete drm_gem_handle.\n");
1198 int sprd_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1200 struct drm_gem_object *obj = vma->vm_private_data;
1201 struct drm_device *dev = obj->dev;
1202 unsigned long f_vaddr;
1203 pgoff_t page_offset;
1206 page_offset = ((unsigned long)vmf->virtual_address -
1207 vma->vm_start) >> PAGE_SHIFT;
1208 f_vaddr = (unsigned long)vmf->virtual_address;
1210 mutex_lock(&dev->struct_mutex);
1212 ret = sprd_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
1214 DRM_ERROR("failed to map pages.\n");
1216 mutex_unlock(&dev->struct_mutex);
1218 return convert_to_vm_err_msg(ret);
1221 int sprd_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1223 struct sprd_drm_gem_obj *sprd_gem_obj;
1224 struct drm_gem_object *obj;
1227 /* set vm_area_struct. */
1228 ret = drm_gem_mmap(filp, vma);
1230 DRM_ERROR("failed to mmap.\n");
1234 obj = vma->vm_private_data;
1235 sprd_gem_obj = to_sprd_gem_obj(obj);
1237 ret = check_gem_flags(sprd_gem_obj->flags);
1239 drm_gem_vm_close(vma);
1240 drm_gem_free_mmap_offset(obj);
1244 vma->vm_flags &= ~VM_PFNMAP;
1245 vma->vm_flags |= VM_MIXEDMAP;
1247 update_vm_cache_attr(sprd_gem_obj, vma);
1252 int sprd_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1253 struct drm_file *file_priv)
1255 struct drm_sprd_gem_lock_handle *args = data;
1256 struct drm_gem_object *obj;
1257 struct sprd_drm_gem_obj *sprd_gem_obj;
1258 struct drm_sprd_gem_object_wait_list_entry *lock_item;
1262 DRM_DEBUG_DRIVER("%s line:%d\n", __func__, __LINE__);
1263 mutex_lock(&dev->struct_mutex);
1265 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1268 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1273 sprd_gem_obj = to_sprd_gem_obj(obj);
1275 if (sprd_gem_obj->lockpid) {
1276 /* if a pid already had it locked */
1277 /* create and add to wait list */
1278 for (i = 0; i < DRM_SPRD_HANDLE_WAIT_ENTRIES; i++) {
1279 if (sprd_gem_obj->wait_entries[i].in_use == 0) {
1280 /* this one is empty */
1281 lock_item = &sprd_gem_obj->wait_entries[i];
1282 lock_item->in_use = 1;
1283 lock_item->pid = args->pid;
1284 INIT_LIST_HEAD((struct list_head *)
1285 &sprd_gem_obj->wait_entries[i]);
1290 if (i == DRM_SPRD_HANDLE_WAIT_ENTRIES) {
1293 drm_gem_object_unreference(obj);
1296 list_add_tail((struct list_head *)&lock_item->list,
1297 &sprd_gem_obj->wait_list);
1298 mutex_unlock(&dev->struct_mutex);
1299 /* here we need to block */
1300 wait_event_interruptible_timeout(
1301 sprd_gem_obj->wait_entries[i].process_wait_q,
1302 (sprd_gem_obj->lockpid == 0),
1303 msecs_to_jiffies(20000));
1304 mutex_lock(&dev->struct_mutex);
1305 lock_item->in_use = 0;
1307 sprd_gem_obj->lockpid = args->pid;
1308 DRM_DEBUG_DRIVER("%s lockpid:%d\n", __func__, sprd_gem_obj->lockpid);
1311 mutex_unlock(&dev->struct_mutex);
1316 int sprd_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1317 struct drm_file *file_priv)
1320 struct drm_sprd_gem_unlock_handle *args = data;
1321 struct drm_gem_object *obj;
1322 struct sprd_drm_gem_obj *unlock_obj;
1323 struct drm_sprd_gem_object_wait_list_entry *lock_next;
1326 DRM_DEBUG_DRIVER("%s line:%d\n", __func__, __LINE__);
1327 mutex_lock(&dev->struct_mutex);
1329 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1332 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1337 unlock_obj = to_sprd_gem_obj(obj);
1338 if (!list_empty(&unlock_obj->wait_list)) {
1340 (struct drm_sprd_gem_object_wait_list_entry *)
1341 unlock_obj->wait_list.prev;
1343 list_del((struct list_head *)&lock_next->list);
1345 unlock_obj->lockpid = 0;
1346 wake_up_interruptible(
1347 &lock_next->process_wait_q);
1351 /* List is empty so set pid to 0 */
1352 unlock_obj->lockpid = 0;
1354 drm_gem_object_unreference(obj);
1356 drm_gem_object_unreference(obj);
1358 mutex_unlock(&dev->struct_mutex);
1363 int sprd_gem_cache_op_ioctl(struct drm_device *dev, void *data,
1364 struct drm_file *file_priv)
1366 struct drm_sprd_gem_cache_op *args = data;
1368 struct drm_gem_object *obj;
1369 struct sprd_drm_gem_obj *sprd_gem_obj;
1370 struct sprd_drm_gem_buf *buf;
1371 struct sg_table *sgt;
1372 unsigned int cache_op = args->flags &(~SPRD_DRM_ALL_CACHE);
1374 mutex_lock(&dev->struct_mutex);
1375 obj = drm_gem_object_lookup(dev, file_priv, args->gem_handle);
1378 DRM_ERROR("invalid handle[%d]\n", args->gem_handle);
1380 goto err_invalid_handle;
1383 sprd_gem_obj = to_sprd_gem_obj(obj);
1384 buf = sprd_gem_obj->buffer;
1387 DRM_DEBUG("%s:h[%d]s[%d]f[0x%x]a[0x%x]o[0x%x]\n",
1388 "gc",args->gem_handle, (int)args->size, args->flags,
1389 (int)args->usr_addr, (int)obj);
1391 if (!IS_CACHABLE_BUFFER(sprd_gem_obj->flags)) {
1392 DRM_ERROR("invalid flags[0x%x]for h[%d]\n",
1393 sprd_gem_obj->flags, args->gem_handle);
1398 case SPRD_DRM_CACHE_INV:
1399 dma_sync_sg_for_cpu(NULL, sgt->sgl, sgt->nents,
1402 case SPRD_DRM_CACHE_CLN:
1403 dma_sync_sg_for_device(NULL, sgt->sgl, sgt->nents,
1406 case SPRD_DRM_CACHE_FSH:
1407 dma_sync_sg_for_device(NULL, sgt->sgl, sgt->nents,
1409 dma_sync_sg_for_cpu(NULL, sgt->sgl, sgt->nents,
1413 DRM_ERROR("invalid op[0x%x]for h[%d]\n", cache_op, args->gem_handle);
1419 drm_gem_object_unreference(obj);
1422 mutex_unlock(&dev->struct_mutex);