2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
24 #include "msm_fence.h"
29 static dma_addr_t physaddr(struct drm_gem_object *obj)
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 struct msm_drm_private *priv = obj->dev->dev_private;
33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 static bool use_pages(struct drm_gem_object *obj)
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 return !msm_obj->vram_node;
43 /* allocate pages from VRAM carveout, used when no IOMMU: */
44 static struct page **get_pages_vram(struct drm_gem_object *obj,
47 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48 struct msm_drm_private *priv = obj->dev->dev_private;
53 p = drm_malloc_ab(npages, sizeof(struct page *));
55 return ERR_PTR(-ENOMEM);
57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
58 npages, 0, DRM_MM_SEARCH_DEFAULT);
64 paddr = physaddr(obj);
65 for (i = 0; i < npages; i++) {
66 p[i] = phys_to_page(paddr);
73 /* called with dev->struct_mutex held */
74 static struct page **get_pages(struct drm_gem_object *obj)
76 struct msm_gem_object *msm_obj = to_msm_bo(obj);
78 if (!msm_obj->pages) {
79 struct drm_device *dev = obj->dev;
81 int npages = obj->size >> PAGE_SHIFT;
84 p = drm_gem_get_pages(obj);
86 p = get_pages_vram(obj, npages);
89 dev_err(dev->dev, "could not get pages: %ld\n",
94 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
95 if (IS_ERR(msm_obj->sgt)) {
96 dev_err(dev->dev, "failed to allocate sgt\n");
97 return ERR_CAST(msm_obj->sgt);
102 /* For non-cached buffers, ensure the new pages are clean
103 * because display controller, GPU, etc. are not coherent:
105 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
106 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
107 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
110 return msm_obj->pages;
113 static void put_pages(struct drm_gem_object *obj)
115 struct msm_gem_object *msm_obj = to_msm_bo(obj);
117 if (msm_obj->pages) {
118 /* For non-cached buffers, ensure the new pages are clean
119 * because display controller, GPU, etc. are not coherent:
121 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
122 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
123 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
124 sg_free_table(msm_obj->sgt);
128 drm_gem_put_pages(obj, msm_obj->pages, true, false);
130 drm_mm_remove_node(msm_obj->vram_node);
131 drm_free_large(msm_obj->pages);
134 msm_obj->pages = NULL;
138 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
140 struct drm_device *dev = obj->dev;
142 mutex_lock(&dev->struct_mutex);
144 mutex_unlock(&dev->struct_mutex);
148 void msm_gem_put_pages(struct drm_gem_object *obj)
150 /* when we start tracking the pin count, then do something here */
153 int msm_gem_mmap_obj(struct drm_gem_object *obj,
154 struct vm_area_struct *vma)
156 struct msm_gem_object *msm_obj = to_msm_bo(obj);
158 vma->vm_flags &= ~VM_PFNMAP;
159 vma->vm_flags |= VM_MIXEDMAP;
161 if (msm_obj->flags & MSM_BO_WC) {
162 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
163 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
164 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
167 * Shunt off cached objs to shmem file so they have their own
168 * address_space (so unmap_mapping_range does what we want,
169 * in particular in the case of mmap'd dmabufs)
174 vma->vm_file = obj->filp;
176 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
182 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
186 ret = drm_gem_mmap(filp, vma);
188 DBG("mmap failed: %d", ret);
192 return msm_gem_mmap_obj(vma->vm_private_data, vma);
195 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
197 struct drm_gem_object *obj = vma->vm_private_data;
198 struct drm_device *dev = obj->dev;
199 struct msm_drm_private *priv = dev->dev_private;
205 /* This should only happen if userspace tries to pass a mmap'd
206 * but unfaulted gem bo vaddr into submit ioctl, triggering
207 * a page fault while struct_mutex is already held. This is
208 * not a valid use-case so just bail.
210 if (priv->struct_mutex_task == current)
211 return VM_FAULT_SIGBUS;
213 /* Make sure we don't parallel update on a fault, nor move or remove
214 * something from beneath our feet
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
220 /* make sure we have pages attached now */
221 pages = get_pages(obj);
223 ret = PTR_ERR(pages);
227 /* We don't use vmf->pgoff since that has the fake offset: */
228 pgoff = ((unsigned long)vmf->virtual_address -
229 vma->vm_start) >> PAGE_SHIFT;
231 pfn = page_to_pfn(pages[pgoff]);
233 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
234 pfn, pfn << PAGE_SHIFT);
236 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
237 __pfn_to_pfn_t(pfn, PFN_DEV));
240 mutex_unlock(&dev->struct_mutex);
249 * EBUSY is ok: this just means that another thread
250 * already did the job.
252 return VM_FAULT_NOPAGE;
256 return VM_FAULT_SIGBUS;
260 /** get mmap offset */
261 static uint64_t mmap_offset(struct drm_gem_object *obj)
263 struct drm_device *dev = obj->dev;
266 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
268 /* Make it mmapable */
269 ret = drm_gem_create_mmap_offset(obj);
272 dev_err(dev->dev, "could not allocate mmap offset\n");
276 return drm_vma_node_offset_addr(&obj->vma_node);
279 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
282 mutex_lock(&obj->dev->struct_mutex);
283 offset = mmap_offset(obj);
284 mutex_unlock(&obj->dev->struct_mutex);
289 put_iova(struct drm_gem_object *obj)
291 struct drm_device *dev = obj->dev;
292 struct msm_drm_private *priv = obj->dev->dev_private;
293 struct msm_gem_object *msm_obj = to_msm_bo(obj);
296 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
298 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
299 struct msm_mmu *mmu = priv->mmus[id];
300 if (mmu && msm_obj->domain[id].iova) {
301 uint32_t offset = msm_obj->domain[id].iova;
302 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
303 msm_obj->domain[id].iova = 0;
308 /* should be called under struct_mutex.. although it can be called
309 * from atomic context without struct_mutex to acquire an extra
310 * iova ref if you know one is already held.
312 * That means when I do eventually need to add support for unpinning
313 * the refcnt counter needs to be atomic_t.
315 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
318 struct msm_gem_object *msm_obj = to_msm_bo(obj);
321 if (!msm_obj->domain[id].iova) {
322 struct msm_drm_private *priv = obj->dev->dev_private;
323 struct page **pages = get_pages(obj);
326 return PTR_ERR(pages);
328 if (iommu_present(&platform_bus_type)) {
329 struct msm_mmu *mmu = priv->mmus[id];
335 offset = (uint32_t)mmap_offset(obj);
336 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
337 obj->size, IOMMU_READ | IOMMU_WRITE);
338 msm_obj->domain[id].iova = offset;
340 msm_obj->domain[id].iova = physaddr(obj);
345 *iova = msm_obj->domain[id].iova;
350 /* get iova, taking a reference. Should have a matching put */
351 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
353 struct msm_gem_object *msm_obj = to_msm_bo(obj);
356 /* this is safe right now because we don't unmap until the
359 if (msm_obj->domain[id].iova) {
360 *iova = msm_obj->domain[id].iova;
364 mutex_lock(&obj->dev->struct_mutex);
365 ret = msm_gem_get_iova_locked(obj, id, iova);
366 mutex_unlock(&obj->dev->struct_mutex);
370 /* get iova without taking a reference, used in places where you have
371 * already done a 'msm_gem_get_iova()'.
373 uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
375 struct msm_gem_object *msm_obj = to_msm_bo(obj);
376 WARN_ON(!msm_obj->domain[id].iova);
377 return msm_obj->domain[id].iova;
380 void msm_gem_put_iova(struct drm_gem_object *obj, int id)
383 // NOTE: probably don't need a _locked() version.. we wouldn't
384 // normally unmap here, but instead just mark that it could be
385 // unmapped (if the iova refcnt drops to zero), but then later
386 // if another _get_iova_locked() fails we can start unmapping
387 // things that are no longer needed..
390 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
391 struct drm_mode_create_dumb *args)
393 args->pitch = align_pitch(args->width, args->bpp);
394 args->size = PAGE_ALIGN(args->pitch * args->height);
395 return msm_gem_new_handle(dev, file, args->size,
396 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
399 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
400 uint32_t handle, uint64_t *offset)
402 struct drm_gem_object *obj;
405 /* GEM does all our handle to object mapping */
406 obj = drm_gem_object_lookup(file, handle);
412 *offset = msm_gem_mmap_offset(obj);
414 drm_gem_object_unreference_unlocked(obj);
420 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
422 struct msm_gem_object *msm_obj = to_msm_bo(obj);
423 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
424 if (!msm_obj->vaddr) {
425 struct page **pages = get_pages(obj);
427 return ERR_CAST(pages);
428 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
429 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
430 if (msm_obj->vaddr == NULL)
431 return ERR_PTR(-ENOMEM);
433 msm_obj->vmap_count++;
434 return msm_obj->vaddr;
437 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
440 mutex_lock(&obj->dev->struct_mutex);
441 ret = msm_gem_get_vaddr_locked(obj);
442 mutex_unlock(&obj->dev->struct_mutex);
446 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
448 struct msm_gem_object *msm_obj = to_msm_bo(obj);
449 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
450 WARN_ON(msm_obj->vmap_count < 1);
451 msm_obj->vmap_count--;
454 void msm_gem_put_vaddr(struct drm_gem_object *obj)
456 mutex_lock(&obj->dev->struct_mutex);
457 msm_gem_put_vaddr_locked(obj);
458 mutex_unlock(&obj->dev->struct_mutex);
461 /* Update madvise status, returns true if not purged, else
464 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
466 struct msm_gem_object *msm_obj = to_msm_bo(obj);
468 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
470 if (msm_obj->madv != __MSM_MADV_PURGED)
471 msm_obj->madv = madv;
473 return (msm_obj->madv != __MSM_MADV_PURGED);
476 void msm_gem_purge(struct drm_gem_object *obj)
478 struct drm_device *dev = obj->dev;
479 struct msm_gem_object *msm_obj = to_msm_bo(obj);
481 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
482 WARN_ON(!is_purgeable(msm_obj));
483 WARN_ON(obj->import_attach);
491 msm_obj->madv = __MSM_MADV_PURGED;
493 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
494 drm_gem_free_mmap_offset(obj);
496 /* Our goal here is to return as much of the memory as
497 * is possible back to the system as we are called from OOM.
498 * To do this we must instruct the shmfs to drop all of its
499 * backing pages, *now*.
501 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
503 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
507 void msm_gem_vunmap(struct drm_gem_object *obj)
509 struct msm_gem_object *msm_obj = to_msm_bo(obj);
511 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
514 vunmap(msm_obj->vaddr);
515 msm_obj->vaddr = NULL;
518 /* must be called before _move_to_active().. */
519 int msm_gem_sync_object(struct drm_gem_object *obj,
520 struct msm_fence_context *fctx, bool exclusive)
522 struct msm_gem_object *msm_obj = to_msm_bo(obj);
523 struct reservation_object_list *fobj;
528 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
529 * which makes this a slightly strange place to call it. OTOH this
530 * is a convenient can-fail point to hook it in. (And similar to
531 * how etnaviv and nouveau handle this.)
533 ret = reservation_object_reserve_shared(msm_obj->resv);
538 fobj = reservation_object_get_list(msm_obj->resv);
539 if (!fobj || (fobj->shared_count == 0)) {
540 fence = reservation_object_get_excl(msm_obj->resv);
541 /* don't need to wait on our own fences, since ring is fifo */
542 if (fence && (fence->context != fctx->context)) {
543 ret = fence_wait(fence, true);
549 if (!exclusive || !fobj)
552 for (i = 0; i < fobj->shared_count; i++) {
553 fence = rcu_dereference_protected(fobj->shared[i],
554 reservation_object_held(msm_obj->resv));
555 if (fence->context != fctx->context) {
556 ret = fence_wait(fence, true);
565 void msm_gem_move_to_active(struct drm_gem_object *obj,
566 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
568 struct msm_gem_object *msm_obj = to_msm_bo(obj);
569 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
572 reservation_object_add_excl_fence(msm_obj->resv, fence);
574 reservation_object_add_shared_fence(msm_obj->resv, fence);
575 list_del_init(&msm_obj->mm_list);
576 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
579 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
581 struct drm_device *dev = obj->dev;
582 struct msm_drm_private *priv = dev->dev_private;
583 struct msm_gem_object *msm_obj = to_msm_bo(obj);
585 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
588 list_del_init(&msm_obj->mm_list);
589 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
592 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
594 struct msm_gem_object *msm_obj = to_msm_bo(obj);
595 bool write = !!(op & MSM_PREP_WRITE);
597 if (op & MSM_PREP_NOSYNC) {
598 if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
603 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
604 true, timeout_to_jiffies(timeout));
606 return ret == 0 ? -ETIMEDOUT : ret;
609 /* TODO cache maintenance */
614 int msm_gem_cpu_fini(struct drm_gem_object *obj)
616 /* TODO cache maintenance */
620 #ifdef CONFIG_DEBUG_FS
621 static void describe_fence(struct fence *fence, const char *type,
624 if (!fence_is_signaled(fence))
625 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
626 fence->ops->get_driver_name(fence),
627 fence->ops->get_timeline_name(fence),
631 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
633 struct msm_gem_object *msm_obj = to_msm_bo(obj);
634 struct reservation_object *robj = msm_obj->resv;
635 struct reservation_object_list *fobj;
637 uint64_t off = drm_vma_node_start(&obj->vma_node);
640 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
642 switch (msm_obj->madv) {
643 case __MSM_MADV_PURGED:
646 case MSM_MADV_DONTNEED:
649 case MSM_MADV_WILLNEED:
655 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
656 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
657 obj->name, obj->refcount.refcount.counter,
658 off, msm_obj->vaddr, obj->size, madv);
661 fobj = rcu_dereference(robj->fence);
663 unsigned int i, shared_count = fobj->shared_count;
665 for (i = 0; i < shared_count; i++) {
666 fence = rcu_dereference(fobj->shared[i]);
667 describe_fence(fence, "Shared", m);
671 fence = rcu_dereference(robj->fence_excl);
673 describe_fence(fence, "Exclusive", m);
677 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
679 struct msm_gem_object *msm_obj;
683 list_for_each_entry(msm_obj, list, mm_list) {
684 struct drm_gem_object *obj = &msm_obj->base;
686 msm_gem_describe(obj, m);
691 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
695 void msm_gem_free_object(struct drm_gem_object *obj)
697 struct drm_device *dev = obj->dev;
698 struct msm_gem_object *msm_obj = to_msm_bo(obj);
700 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
702 /* object should not be on active list: */
703 WARN_ON(is_active(msm_obj));
705 list_del(&msm_obj->mm_list);
709 if (obj->import_attach) {
711 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
713 /* Don't drop the pages for imported dmabuf, as they are not
714 * ours, just free the array we allocated:
717 drm_free_large(msm_obj->pages);
719 drm_prime_gem_destroy(obj, msm_obj->sgt);
725 if (msm_obj->resv == &msm_obj->_resv)
726 reservation_object_fini(msm_obj->resv);
728 drm_gem_object_release(obj);
733 /* convenience method to construct a GEM buffer object, and userspace handle */
734 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
735 uint32_t size, uint32_t flags, uint32_t *handle)
737 struct drm_gem_object *obj;
740 ret = mutex_lock_interruptible(&dev->struct_mutex);
744 obj = msm_gem_new(dev, size, flags);
746 mutex_unlock(&dev->struct_mutex);
751 ret = drm_gem_handle_create(file, obj, handle);
753 /* drop reference from allocate - handle holds it now */
754 drm_gem_object_unreference_unlocked(obj);
759 static int msm_gem_new_impl(struct drm_device *dev,
760 uint32_t size, uint32_t flags,
761 struct reservation_object *resv,
762 struct drm_gem_object **obj)
764 struct msm_drm_private *priv = dev->dev_private;
765 struct msm_gem_object *msm_obj;
767 bool use_vram = false;
769 switch (flags & MSM_BO_CACHE_MASK) {
770 case MSM_BO_UNCACHED:
775 dev_err(dev->dev, "invalid cache flag: %x\n",
776 (flags & MSM_BO_CACHE_MASK));
780 if (!iommu_present(&platform_bus_type))
782 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
785 if (WARN_ON(use_vram && !priv->vram.size))
788 sz = sizeof(*msm_obj);
790 sz += sizeof(struct drm_mm_node);
792 msm_obj = kzalloc(sz, GFP_KERNEL);
797 msm_obj->vram_node = (void *)&msm_obj[1];
799 msm_obj->flags = flags;
800 msm_obj->madv = MSM_MADV_WILLNEED;
803 msm_obj->resv = resv;
805 msm_obj->resv = &msm_obj->_resv;
806 reservation_object_init(msm_obj->resv);
809 INIT_LIST_HEAD(&msm_obj->submit_entry);
810 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
812 *obj = &msm_obj->base;
817 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
818 uint32_t size, uint32_t flags)
820 struct drm_gem_object *obj = NULL;
823 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
825 size = PAGE_ALIGN(size);
827 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
831 if (use_pages(obj)) {
832 ret = drm_gem_object_init(dev, obj, size);
836 drm_gem_private_object_init(dev, obj, size);
842 drm_gem_object_unreference(obj);
846 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
847 struct dma_buf *dmabuf, struct sg_table *sgt)
849 struct msm_gem_object *msm_obj;
850 struct drm_gem_object *obj;
854 /* if we don't have IOMMU, don't bother pretending we can import: */
855 if (!iommu_present(&platform_bus_type)) {
856 dev_err(dev->dev, "cannot import without IOMMU\n");
857 return ERR_PTR(-EINVAL);
860 size = PAGE_ALIGN(dmabuf->size);
862 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
866 drm_gem_private_object_init(dev, obj, size);
868 npages = size / PAGE_SIZE;
870 msm_obj = to_msm_bo(obj);
872 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
873 if (!msm_obj->pages) {
878 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
885 drm_gem_object_unreference_unlocked(obj);