1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
6 #include <linux/dma-buf.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/set_memory.h>
10 #include <linux/xarray.h>
12 #include <drm/drm_cache.h>
13 #include <drm/drm_debugfs.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_utils.h>
21 #include "ivpu_mmu_context.h"
23 MODULE_IMPORT_NS(DMA_BUF);
25 static const struct drm_gem_object_funcs ivpu_gem_funcs;
27 static struct lock_class_key prime_bo_lock_class_key;
29 static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo)
31 /* Pages are managed by the underlying dma-buf */
35 static void prime_free_pages_locked(struct ivpu_bo *bo)
37 /* Pages are managed by the underlying dma-buf */
40 static int prime_map_pages_locked(struct ivpu_bo *bo)
42 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
45 sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
47 ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt));
55 static void prime_unmap_pages_locked(struct ivpu_bo *bo)
57 dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
61 static const struct ivpu_bo_ops prime_ops = {
62 .type = IVPU_BO_TYPE_PRIME,
64 .alloc_pages = prime_alloc_pages_locked,
65 .free_pages = prime_free_pages_locked,
66 .map_pages = prime_map_pages_locked,
67 .unmap_pages = prime_unmap_pages_locked,
70 static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
72 int npages = bo->base.size >> PAGE_SHIFT;
75 pages = drm_gem_get_pages(&bo->base);
77 return PTR_ERR(pages);
79 if (bo->flags & DRM_IVPU_BO_WC)
80 set_pages_array_wc(pages, npages);
81 else if (bo->flags & DRM_IVPU_BO_UNCACHED)
82 set_pages_array_uc(pages, npages);
88 static void shmem_free_pages_locked(struct ivpu_bo *bo)
90 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
91 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
93 drm_gem_put_pages(&bo->base, bo->pages, true, false);
97 static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
99 int npages = bo->base.size >> PAGE_SHIFT;
100 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
101 struct sg_table *sgt;
104 sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages);
106 ivpu_err(vdev, "Failed to allocate sgtable\n");
110 ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0);
112 ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
124 static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo)
126 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
128 dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0);
129 sg_free_table(bo->sgt);
134 static const struct ivpu_bo_ops shmem_ops = {
135 .type = IVPU_BO_TYPE_SHMEM,
137 .alloc_pages = shmem_alloc_pages_locked,
138 .free_pages = shmem_free_pages_locked,
139 .map_pages = ivpu_bo_map_pages_locked,
140 .unmap_pages = ivpu_bo_unmap_pages_locked,
143 static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
145 unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
149 pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL);
153 for (i = 0; i < npages; i++) {
154 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
172 static void internal_free_pages_locked(struct ivpu_bo *bo)
174 unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
176 for (i = 0; i < npages; i++)
177 put_page(bo->pages[i]);
183 static const struct ivpu_bo_ops internal_ops = {
184 .type = IVPU_BO_TYPE_INTERNAL,
186 .alloc_pages = internal_alloc_pages_locked,
187 .free_pages = internal_free_pages_locked,
188 .map_pages = ivpu_bo_map_pages_locked,
189 .unmap_pages = ivpu_bo_unmap_pages_locked,
192 static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo)
194 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
197 lockdep_assert_held(&bo->lock);
198 drm_WARN_ON(&vdev->drm, bo->sgt);
200 ret = bo->ops->alloc_pages(bo);
202 ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret);
206 ret = bo->ops->map_pages(bo);
208 ivpu_err(vdev, "Failed to map pages for BO: %d", ret);
214 bo->ops->free_pages(bo);
218 static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo)
220 mutex_lock(&bo->lock);
223 bo->ops->unmap_pages(bo);
225 bo->ops->free_pages(bo);
228 mutex_unlock(&bo->lock);
232 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
234 * This function pins physical memory pages, then maps the physical pages
235 * to IOMMU address space and finally updates the VPU MMU page tables
236 * to allow the VPU to translate VPU address to IOMMU address.
238 int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
240 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
243 mutex_lock(&bo->lock);
246 ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n",
247 bo->ctx->id, bo->handle);
253 ret = ivpu_bo_alloc_and_map_pages_locked(bo);
258 if (!bo->mmu_mapped) {
259 ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt,
260 ivpu_bo_is_snooped(bo));
262 ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
265 bo->mmu_mapped = true;
269 mutex_unlock(&bo->lock);
275 ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
276 const struct ivpu_addr_range *range)
278 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
282 if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
283 range = &vdev->hw->ranges.shave;
284 else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
285 range = &vdev->hw->ranges.dma;
287 range = &vdev->hw->ranges.user;
290 mutex_lock(&ctx->lock);
291 ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node);
294 bo->vpu_addr = bo->mm_node.start;
295 list_add_tail(&bo->ctx_node, &ctx->bo_list);
297 mutex_unlock(&ctx->lock);
302 static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo)
304 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
305 struct ivpu_mmu_context *ctx = bo->ctx;
307 ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
308 ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
310 mutex_lock(&bo->lock);
312 if (bo->mmu_mapped) {
313 drm_WARN_ON(&vdev->drm, !bo->sgt);
314 ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt);
315 bo->mmu_mapped = false;
318 mutex_lock(&ctx->lock);
319 list_del(&bo->ctx_node);
322 ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node);
323 mutex_unlock(&ctx->lock);
325 mutex_unlock(&bo->lock);
328 void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx)
330 struct ivpu_bo *bo, *tmp;
332 list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node)
333 ivpu_bo_free_vpu_addr(bo);
336 static struct ivpu_bo *
337 ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context,
338 u64 size, u32 flags, const struct ivpu_bo_ops *ops,
339 const struct ivpu_addr_range *range, u64 user_ptr)
344 if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size)))
345 return ERR_PTR(-EINVAL);
347 switch (flags & DRM_IVPU_BO_CACHE_MASK) {
348 case DRM_IVPU_BO_CACHED:
349 case DRM_IVPU_BO_UNCACHED:
353 return ERR_PTR(-EINVAL);
356 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
358 return ERR_PTR(-ENOMEM);
360 mutex_init(&bo->lock);
361 bo->base.funcs = &ivpu_gem_funcs;
364 bo->user_ptr = user_ptr;
366 if (ops->type == IVPU_BO_TYPE_SHMEM)
367 ret = drm_gem_object_init(&vdev->drm, &bo->base, size);
369 drm_gem_private_object_init(&vdev->drm, &bo->base, size);
372 ivpu_err(vdev, "Failed to initialize drm object\n");
376 if (flags & DRM_IVPU_BO_MAPPABLE) {
377 ret = drm_gem_create_mmap_offset(&bo->base);
379 ivpu_err(vdev, "Failed to allocate mmap offset\n");
385 ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range);
387 ivpu_err(vdev, "Failed to add BO to context: %d\n", ret);
395 drm_gem_object_release(&bo->base);
401 static void ivpu_bo_free(struct drm_gem_object *obj)
403 struct ivpu_bo *bo = to_ivpu_bo(obj);
404 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
407 ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
408 bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
410 ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n",
411 (bool)bo->sgt, bo->mmu_mapped);
413 drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
418 ivpu_bo_free_vpu_addr(bo);
421 ivpu_bo_unmap_and_free_pages(bo);
423 if (bo->base.import_attach)
424 drm_prime_gem_destroy(&bo->base, bo->sgt);
426 drm_gem_object_release(&bo->base);
428 mutex_destroy(&bo->lock);
432 static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
434 struct ivpu_bo *bo = to_ivpu_bo(obj);
435 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
437 ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s",
438 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name);
440 if (obj->import_attach) {
441 /* Drop the reference drm_gem_mmap_obj() acquired.*/
442 drm_gem_object_put(obj);
443 vma->vm_private_data = NULL;
444 return dma_buf_mmap(obj->dma_buf, vma, 0);
447 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND);
448 vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags));
453 static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj)
455 struct ivpu_bo *bo = to_ivpu_bo(obj);
456 loff_t npages = obj->size >> PAGE_SHIFT;
459 mutex_lock(&bo->lock);
462 ret = ivpu_bo_alloc_and_map_pages_locked(bo);
464 mutex_unlock(&bo->lock);
469 return drm_prime_pages_to_sg(obj->dev, bo->pages, npages);
472 static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf)
474 struct vm_area_struct *vma = vmf->vma;
475 struct drm_gem_object *obj = vma->vm_private_data;
476 struct ivpu_bo *bo = to_ivpu_bo(obj);
477 loff_t npages = obj->size >> PAGE_SHIFT;
483 mutex_lock(&bo->lock);
486 err = ivpu_bo_alloc_and_map_pages_locked(bo);
488 ret = vmf_error(err);
493 /* We don't use vmf->pgoff since that has the fake offset */
494 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
495 if (page_offset >= npages) {
496 ret = VM_FAULT_SIGBUS;
498 page = bo->pages[page_offset];
499 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
503 mutex_unlock(&bo->lock);
508 static const struct vm_operations_struct ivpu_vm_ops = {
509 .fault = ivpu_vm_fault,
510 .open = drm_gem_vm_open,
511 .close = drm_gem_vm_close,
514 static const struct drm_gem_object_funcs ivpu_gem_funcs = {
515 .free = ivpu_bo_free,
516 .mmap = ivpu_bo_mmap,
517 .vm_ops = &ivpu_vm_ops,
518 .get_sg_table = ivpu_bo_get_sg_table,
522 ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
524 struct ivpu_file_priv *file_priv = file->driver_priv;
525 struct ivpu_device *vdev = file_priv->vdev;
526 struct drm_ivpu_bo_create *args = data;
527 u64 size = PAGE_ALIGN(args->size);
531 if (args->flags & ~DRM_IVPU_BO_FLAGS)
537 bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0);
539 ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
540 bo, file_priv->ctx.id, args->size, args->flags);
544 ret = drm_gem_handle_create(file, &bo->base, &bo->handle);
546 args->vpu_addr = bo->vpu_addr;
547 args->handle = bo->handle;
550 drm_gem_object_put(&bo->base);
552 ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n",
553 file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags);
559 ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags)
561 const struct ivpu_addr_range *range;
562 struct ivpu_addr_range fixed_range;
567 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
568 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
571 fixed_range.start = vpu_addr;
572 fixed_range.end = vpu_addr + size;
573 range = &fixed_range;
575 range = &vdev->hw->ranges.global;
578 bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
580 ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
581 bo, vpu_addr, size, flags);
585 ret = ivpu_bo_pin(bo);
589 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
590 drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
592 prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
593 bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
595 ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n");
599 ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n",
600 bo->vpu_addr, bo->base.size, flags);
605 drm_gem_object_put(&bo->base);
609 void ivpu_bo_free_internal(struct ivpu_bo *bo)
611 drm_gem_object_put(&bo->base);
614 struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
616 struct ivpu_device *vdev = to_ivpu_device(dev);
617 struct dma_buf_attachment *attach;
620 attach = dma_buf_attach(buf, dev->dev);
622 return ERR_CAST(attach);
626 bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0);
628 ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size);
632 lockdep_set_class(&bo->lock, &prime_bo_lock_class_key);
634 bo->base.import_attach = attach;
639 dma_buf_detach(buf, attach);
644 int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
646 struct ivpu_file_priv *file_priv = file->driver_priv;
647 struct ivpu_device *vdev = to_ivpu_device(dev);
648 struct drm_ivpu_bo_info *args = data;
649 struct drm_gem_object *obj;
653 obj = drm_gem_object_lookup(file, args->handle);
657 bo = to_ivpu_bo(obj);
659 mutex_lock(&bo->lock);
662 ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL);
664 ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret);
669 args->flags = bo->flags;
670 args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
671 args->vpu_addr = bo->vpu_addr;
672 args->size = obj->size;
674 mutex_unlock(&bo->lock);
675 drm_gem_object_put(obj);
679 int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
681 struct drm_ivpu_bo_wait *args = data;
682 struct drm_gem_object *obj;
683 unsigned long timeout;
686 timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
688 obj = drm_gem_object_lookup(file, args->handle);
692 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout);
695 } else if (ret > 0) {
697 args->job_status = to_ivpu_bo(obj)->job_status;
700 drm_gem_object_put(obj);
705 static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
707 unsigned long dma_refcount = 0;
709 if (bo->base.dma_buf && bo->base.dma_buf->file)
710 dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
712 drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n",
713 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size,
714 kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
717 void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
719 struct ivpu_device *vdev = to_ivpu_device(dev);
720 struct ivpu_file_priv *file_priv;
721 unsigned long ctx_id;
724 drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n",
725 "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type");
727 mutex_lock(&vdev->gctx.lock);
728 list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node)
729 ivpu_bo_print_info(bo, p);
730 mutex_unlock(&vdev->gctx.lock);
732 xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
733 file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
737 mutex_lock(&file_priv->ctx.lock);
738 list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node)
739 ivpu_bo_print_info(bo, p);
740 mutex_unlock(&file_priv->ctx.lock);
742 ivpu_file_priv_put(&file_priv);
746 void ivpu_bo_list_print(struct drm_device *dev)
748 struct drm_printer p = drm_info_printer(dev->dev);
750 ivpu_bo_list(dev, &p);