1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
6 #include <linux/dma-buf.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/set_memory.h>
10 #include <linux/xarray.h>
12 #include <drm/drm_cache.h>
13 #include <drm/drm_debugfs.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_utils.h>
21 #include "ivpu_mmu_context.h"
23 MODULE_IMPORT_NS(DMA_BUF);
25 static const struct drm_gem_object_funcs ivpu_gem_funcs;
27 static struct lock_class_key prime_bo_lock_class_key;
29 static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo)
31 /* Pages are managed by the underlying dma-buf */
35 static void prime_free_pages_locked(struct ivpu_bo *bo)
37 /* Pages are managed by the underlying dma-buf */
40 static int prime_map_pages_locked(struct ivpu_bo *bo)
42 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
45 sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
47 ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt));
55 static void prime_unmap_pages_locked(struct ivpu_bo *bo)
57 dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
61 static const struct ivpu_bo_ops prime_ops = {
62 .type = IVPU_BO_TYPE_PRIME,
64 .alloc_pages = prime_alloc_pages_locked,
65 .free_pages = prime_free_pages_locked,
66 .map_pages = prime_map_pages_locked,
67 .unmap_pages = prime_unmap_pages_locked,
70 static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
72 int npages = bo->base.size >> PAGE_SHIFT;
75 pages = drm_gem_get_pages(&bo->base);
77 return PTR_ERR(pages);
79 if (bo->flags & DRM_IVPU_BO_WC)
80 set_pages_array_wc(pages, npages);
81 else if (bo->flags & DRM_IVPU_BO_UNCACHED)
82 set_pages_array_uc(pages, npages);
88 static void shmem_free_pages_locked(struct ivpu_bo *bo)
90 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
91 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
93 drm_gem_put_pages(&bo->base, bo->pages, true, false);
97 static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
99 int npages = bo->base.size >> PAGE_SHIFT;
100 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
101 struct sg_table *sgt;
104 sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages);
106 ivpu_err(vdev, "Failed to allocate sgtable\n");
110 ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0);
112 ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
124 static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo)
126 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
128 dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0);
129 sg_free_table(bo->sgt);
134 static const struct ivpu_bo_ops shmem_ops = {
135 .type = IVPU_BO_TYPE_SHMEM,
137 .alloc_pages = shmem_alloc_pages_locked,
138 .free_pages = shmem_free_pages_locked,
139 .map_pages = ivpu_bo_map_pages_locked,
140 .unmap_pages = ivpu_bo_unmap_pages_locked,
143 static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
145 unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
149 pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL);
153 for (i = 0; i < npages; i++) {
154 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
172 static void internal_free_pages_locked(struct ivpu_bo *bo)
174 unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
176 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
177 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
179 for (i = 0; i < npages; i++)
180 put_page(bo->pages[i]);
186 static const struct ivpu_bo_ops internal_ops = {
187 .type = IVPU_BO_TYPE_INTERNAL,
189 .alloc_pages = internal_alloc_pages_locked,
190 .free_pages = internal_free_pages_locked,
191 .map_pages = ivpu_bo_map_pages_locked,
192 .unmap_pages = ivpu_bo_unmap_pages_locked,
195 static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo)
197 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
200 lockdep_assert_held(&bo->lock);
201 drm_WARN_ON(&vdev->drm, bo->sgt);
203 ret = bo->ops->alloc_pages(bo);
205 ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret);
209 ret = bo->ops->map_pages(bo);
211 ivpu_err(vdev, "Failed to map pages for BO: %d", ret);
217 bo->ops->free_pages(bo);
221 static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo)
223 mutex_lock(&bo->lock);
226 bo->ops->unmap_pages(bo);
228 bo->ops->free_pages(bo);
231 mutex_unlock(&bo->lock);
235 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
237 * This function pins physical memory pages, then maps the physical pages
238 * to IOMMU address space and finally updates the VPU MMU page tables
239 * to allow the VPU to translate VPU address to IOMMU address.
241 int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
243 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
246 mutex_lock(&bo->lock);
249 ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n",
250 bo->ctx->id, bo->handle);
256 ret = ivpu_bo_alloc_and_map_pages_locked(bo);
261 if (!bo->mmu_mapped) {
262 ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt,
263 ivpu_bo_is_snooped(bo));
265 ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
268 bo->mmu_mapped = true;
272 mutex_unlock(&bo->lock);
278 ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
279 const struct ivpu_addr_range *range)
281 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
285 if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
286 range = &vdev->hw->ranges.shave;
287 else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
288 range = &vdev->hw->ranges.dma;
290 range = &vdev->hw->ranges.user;
293 mutex_lock(&ctx->lock);
294 ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node);
297 bo->vpu_addr = bo->mm_node.start;
298 list_add_tail(&bo->ctx_node, &ctx->bo_list);
300 mutex_unlock(&ctx->lock);
305 static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo)
307 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
308 struct ivpu_mmu_context *ctx = bo->ctx;
310 ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
311 ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
313 mutex_lock(&bo->lock);
315 if (bo->mmu_mapped) {
316 drm_WARN_ON(&vdev->drm, !bo->sgt);
317 ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt);
318 bo->mmu_mapped = false;
321 mutex_lock(&ctx->lock);
322 list_del(&bo->ctx_node);
325 ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node);
326 mutex_unlock(&ctx->lock);
328 mutex_unlock(&bo->lock);
331 void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx)
333 struct ivpu_bo *bo, *tmp;
335 list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node)
336 ivpu_bo_free_vpu_addr(bo);
339 static struct ivpu_bo *
340 ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context,
341 u64 size, u32 flags, const struct ivpu_bo_ops *ops,
342 const struct ivpu_addr_range *range, u64 user_ptr)
347 if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size)))
348 return ERR_PTR(-EINVAL);
350 switch (flags & DRM_IVPU_BO_CACHE_MASK) {
351 case DRM_IVPU_BO_CACHED:
352 case DRM_IVPU_BO_UNCACHED:
356 return ERR_PTR(-EINVAL);
359 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
361 return ERR_PTR(-ENOMEM);
363 mutex_init(&bo->lock);
364 bo->base.funcs = &ivpu_gem_funcs;
367 bo->user_ptr = user_ptr;
369 if (ops->type == IVPU_BO_TYPE_SHMEM)
370 ret = drm_gem_object_init(&vdev->drm, &bo->base, size);
372 drm_gem_private_object_init(&vdev->drm, &bo->base, size);
375 ivpu_err(vdev, "Failed to initialize drm object\n");
379 if (flags & DRM_IVPU_BO_MAPPABLE) {
380 ret = drm_gem_create_mmap_offset(&bo->base);
382 ivpu_err(vdev, "Failed to allocate mmap offset\n");
388 ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range);
390 ivpu_err(vdev, "Failed to add BO to context: %d\n", ret);
398 drm_gem_object_release(&bo->base);
404 static void ivpu_bo_free(struct drm_gem_object *obj)
406 struct ivpu_bo *bo = to_ivpu_bo(obj);
407 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
410 ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
411 bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
413 ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n",
414 (bool)bo->sgt, bo->mmu_mapped);
416 drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
421 ivpu_bo_free_vpu_addr(bo);
424 ivpu_bo_unmap_and_free_pages(bo);
426 if (bo->base.import_attach)
427 drm_prime_gem_destroy(&bo->base, bo->sgt);
429 drm_gem_object_release(&bo->base);
431 mutex_destroy(&bo->lock);
435 static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
437 struct ivpu_bo *bo = to_ivpu_bo(obj);
438 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
440 ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s",
441 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name);
443 if (obj->import_attach) {
444 /* Drop the reference drm_gem_mmap_obj() acquired.*/
445 drm_gem_object_put(obj);
446 vma->vm_private_data = NULL;
447 return dma_buf_mmap(obj->dma_buf, vma, 0);
450 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND);
451 vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags));
456 static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj)
458 struct ivpu_bo *bo = to_ivpu_bo(obj);
459 loff_t npages = obj->size >> PAGE_SHIFT;
462 mutex_lock(&bo->lock);
465 ret = ivpu_bo_alloc_and_map_pages_locked(bo);
467 mutex_unlock(&bo->lock);
472 return drm_prime_pages_to_sg(obj->dev, bo->pages, npages);
475 static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf)
477 struct vm_area_struct *vma = vmf->vma;
478 struct drm_gem_object *obj = vma->vm_private_data;
479 struct ivpu_bo *bo = to_ivpu_bo(obj);
480 loff_t npages = obj->size >> PAGE_SHIFT;
486 mutex_lock(&bo->lock);
489 err = ivpu_bo_alloc_and_map_pages_locked(bo);
491 ret = vmf_error(err);
496 /* We don't use vmf->pgoff since that has the fake offset */
497 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
498 if (page_offset >= npages) {
499 ret = VM_FAULT_SIGBUS;
501 page = bo->pages[page_offset];
502 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
506 mutex_unlock(&bo->lock);
511 static const struct vm_operations_struct ivpu_vm_ops = {
512 .fault = ivpu_vm_fault,
513 .open = drm_gem_vm_open,
514 .close = drm_gem_vm_close,
517 static const struct drm_gem_object_funcs ivpu_gem_funcs = {
518 .free = ivpu_bo_free,
519 .mmap = ivpu_bo_mmap,
520 .vm_ops = &ivpu_vm_ops,
521 .get_sg_table = ivpu_bo_get_sg_table,
525 ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
527 struct ivpu_file_priv *file_priv = file->driver_priv;
528 struct ivpu_device *vdev = file_priv->vdev;
529 struct drm_ivpu_bo_create *args = data;
530 u64 size = PAGE_ALIGN(args->size);
534 if (args->flags & ~DRM_IVPU_BO_FLAGS)
540 bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0);
542 ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
543 bo, file_priv->ctx.id, args->size, args->flags);
547 ret = drm_gem_handle_create(file, &bo->base, &bo->handle);
549 args->vpu_addr = bo->vpu_addr;
550 args->handle = bo->handle;
553 drm_gem_object_put(&bo->base);
555 ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n",
556 file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags);
562 ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags)
564 const struct ivpu_addr_range *range;
565 struct ivpu_addr_range fixed_range;
570 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
571 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
574 fixed_range.start = vpu_addr;
575 fixed_range.end = vpu_addr + size;
576 range = &fixed_range;
578 range = &vdev->hw->ranges.global;
581 bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
583 ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
584 bo, vpu_addr, size, flags);
588 ret = ivpu_bo_pin(bo);
592 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
593 drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
595 if (bo->flags & DRM_IVPU_BO_WC)
596 set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT);
597 else if (bo->flags & DRM_IVPU_BO_UNCACHED)
598 set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT);
600 prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
601 bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
603 ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n");
607 ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n",
608 bo->vpu_addr, bo->base.size, flags);
613 drm_gem_object_put(&bo->base);
617 void ivpu_bo_free_internal(struct ivpu_bo *bo)
619 drm_gem_object_put(&bo->base);
622 struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
624 struct ivpu_device *vdev = to_ivpu_device(dev);
625 struct dma_buf_attachment *attach;
628 attach = dma_buf_attach(buf, dev->dev);
630 return ERR_CAST(attach);
634 bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0);
636 ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size);
640 lockdep_set_class(&bo->lock, &prime_bo_lock_class_key);
642 bo->base.import_attach = attach;
647 dma_buf_detach(buf, attach);
652 int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
654 struct ivpu_file_priv *file_priv = file->driver_priv;
655 struct ivpu_device *vdev = to_ivpu_device(dev);
656 struct drm_ivpu_bo_info *args = data;
657 struct drm_gem_object *obj;
661 obj = drm_gem_object_lookup(file, args->handle);
665 bo = to_ivpu_bo(obj);
667 mutex_lock(&bo->lock);
670 ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL);
672 ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret);
677 args->flags = bo->flags;
678 args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
679 args->vpu_addr = bo->vpu_addr;
680 args->size = obj->size;
682 mutex_unlock(&bo->lock);
683 drm_gem_object_put(obj);
687 int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
689 struct drm_ivpu_bo_wait *args = data;
690 struct drm_gem_object *obj;
691 unsigned long timeout;
694 timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
696 obj = drm_gem_object_lookup(file, args->handle);
700 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout);
703 } else if (ret > 0) {
705 args->job_status = to_ivpu_bo(obj)->job_status;
708 drm_gem_object_put(obj);
713 static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
715 unsigned long dma_refcount = 0;
717 if (bo->base.dma_buf && bo->base.dma_buf->file)
718 dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
720 drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n",
721 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size,
722 kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
725 void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
727 struct ivpu_device *vdev = to_ivpu_device(dev);
728 struct ivpu_file_priv *file_priv;
729 unsigned long ctx_id;
732 drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n",
733 "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type");
735 mutex_lock(&vdev->gctx.lock);
736 list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node)
737 ivpu_bo_print_info(bo, p);
738 mutex_unlock(&vdev->gctx.lock);
740 xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
741 file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
745 mutex_lock(&file_priv->ctx.lock);
746 list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node)
747 ivpu_bo_print_info(bo, p);
748 mutex_unlock(&file_priv->ctx.lock);
750 ivpu_file_priv_put(&file_priv);
754 void ivpu_bo_list_print(struct drm_device *dev)
756 struct drm_printer p = drm_info_printer(dev->dev);
758 ivpu_bo_list(dev, &p);