1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/iommu.h>
21 #include <linux/uuid.h>
22 #include <linux/vdpa.h>
23 #include <linux/nospec.h>
24 #include <linux/vhost.h>
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
34 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
37 struct vhost_dev vdev;
38 struct iommu_domain *domain;
39 struct vhost_virtqueue *vqs;
40 struct completion completion;
41 struct vdpa_device *vdpa;
48 struct eventfd_ctx *config_ctx;
50 struct vdpa_iova_range range;
53 static DEFINE_IDA(vhost_vdpa_ida);
55 static dev_t vhost_vdpa_major;
57 static void handle_vq_kick(struct vhost_work *work)
59 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
61 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62 const struct vdpa_config_ops *ops = v->vdpa->config;
64 ops->kick_vq(v->vdpa, vq - v->vqs);
67 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
69 struct vhost_virtqueue *vq = private;
70 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
73 eventfd_signal(call_ctx, 1);
78 static irqreturn_t vhost_vdpa_config_cb(void *private)
80 struct vhost_vdpa *v = private;
81 struct eventfd_ctx *config_ctx = v->config_ctx;
84 eventfd_signal(config_ctx, 1);
89 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
91 struct vhost_virtqueue *vq = &v->vqs[qid];
92 const struct vdpa_config_ops *ops = v->vdpa->config;
93 struct vdpa_device *vdpa = v->vdpa;
99 irq = ops->get_vq_irq(vdpa, qid);
100 irq_bypass_unregister_producer(&vq->call_ctx.producer);
101 if (!vq->call_ctx.ctx || irq < 0)
104 vq->call_ctx.producer.token = vq->call_ctx.ctx;
105 vq->call_ctx.producer.irq = irq;
106 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
108 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
109 qid, vq->call_ctx.producer.token, ret);
112 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
114 struct vhost_virtqueue *vq = &v->vqs[qid];
116 irq_bypass_unregister_producer(&vq->call_ctx.producer);
119 static int vhost_vdpa_reset(struct vhost_vdpa *v)
121 struct vdpa_device *vdpa = v->vdpa;
125 return vdpa_reset(vdpa);
128 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
130 struct vdpa_device *vdpa = v->vdpa;
131 const struct vdpa_config_ops *ops = vdpa->config;
134 device_id = ops->get_device_id(vdpa);
136 if (copy_to_user(argp, &device_id, sizeof(device_id)))
142 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
144 struct vdpa_device *vdpa = v->vdpa;
145 const struct vdpa_config_ops *ops = vdpa->config;
148 status = ops->get_status(vdpa);
150 if (copy_to_user(statusp, &status, sizeof(status)))
156 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
158 struct vdpa_device *vdpa = v->vdpa;
159 const struct vdpa_config_ops *ops = vdpa->config;
160 u8 status, status_old;
161 int ret, nvqs = v->nvqs;
164 if (copy_from_user(&status, statusp, sizeof(status)))
167 status_old = ops->get_status(vdpa);
170 * Userspace shouldn't remove status bits unless reset the
173 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
177 ret = ops->reset(vdpa);
181 ops->set_status(vdpa, status);
183 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
184 for (i = 0; i < nvqs; i++)
185 vhost_vdpa_setup_vq_irq(v, i);
187 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
188 for (i = 0; i < nvqs; i++)
189 vhost_vdpa_unsetup_vq_irq(v, i);
194 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
195 struct vhost_vdpa_config *c)
197 struct vdpa_device *vdpa = v->vdpa;
198 long size = vdpa->config->get_config_size(vdpa);
203 if (c->len > size - c->off)
209 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
210 struct vhost_vdpa_config __user *c)
212 struct vdpa_device *vdpa = v->vdpa;
213 struct vhost_vdpa_config config;
214 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
217 if (copy_from_user(&config, c, size))
219 if (vhost_vdpa_config_validate(v, &config))
221 buf = kvzalloc(config.len, GFP_KERNEL);
225 vdpa_get_config(vdpa, config.off, buf, config.len);
227 if (copy_to_user(c->buf, buf, config.len)) {
236 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
237 struct vhost_vdpa_config __user *c)
239 struct vdpa_device *vdpa = v->vdpa;
240 const struct vdpa_config_ops *ops = vdpa->config;
241 struct vhost_vdpa_config config;
242 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
245 if (copy_from_user(&config, c, size))
247 if (vhost_vdpa_config_validate(v, &config))
250 buf = vmemdup_user(c->buf, config.len);
254 ops->set_config(vdpa, config.off, buf, config.len);
260 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
262 struct vdpa_device *vdpa = v->vdpa;
263 const struct vdpa_config_ops *ops = vdpa->config;
266 features = ops->get_features(vdpa);
268 if (copy_to_user(featurep, &features, sizeof(features)))
274 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
276 struct vdpa_device *vdpa = v->vdpa;
277 const struct vdpa_config_ops *ops = vdpa->config;
281 * It's not allowed to change the features after they have
284 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
287 if (copy_from_user(&features, featurep, sizeof(features)))
290 if (vdpa_set_features(vdpa, features))
296 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
298 struct vdpa_device *vdpa = v->vdpa;
299 const struct vdpa_config_ops *ops = vdpa->config;
302 num = ops->get_vq_num_max(vdpa);
304 if (copy_to_user(argp, &num, sizeof(num)))
310 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
313 eventfd_ctx_put(v->config_ctx);
314 v->config_ctx = NULL;
318 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
320 struct vdpa_callback cb;
322 struct eventfd_ctx *ctx;
324 cb.callback = vhost_vdpa_config_cb;
325 cb.private = v->vdpa;
326 if (copy_from_user(&fd, argp, sizeof(fd)))
329 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
330 swap(ctx, v->config_ctx);
332 if (!IS_ERR_OR_NULL(ctx))
333 eventfd_ctx_put(ctx);
335 if (IS_ERR(v->config_ctx)) {
336 long ret = PTR_ERR(v->config_ctx);
338 v->config_ctx = NULL;
342 v->vdpa->config->set_config_cb(v->vdpa, &cb);
347 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
349 struct vhost_vdpa_iova_range range = {
350 .first = v->range.first,
351 .last = v->range.last,
354 if (copy_to_user(argp, &range, sizeof(range)))
359 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
362 struct vdpa_device *vdpa = v->vdpa;
363 const struct vdpa_config_ops *ops = vdpa->config;
364 struct vdpa_vq_state vq_state;
365 struct vdpa_callback cb;
366 struct vhost_virtqueue *vq;
367 struct vhost_vring_state s;
371 r = get_user(idx, (u32 __user *)argp);
378 idx = array_index_nospec(idx, v->nvqs);
382 case VHOST_VDPA_SET_VRING_ENABLE:
383 if (copy_from_user(&s, argp, sizeof(s)))
385 ops->set_vq_ready(vdpa, idx, s.num);
387 case VHOST_GET_VRING_BASE:
388 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
392 vq->last_avail_idx = vq_state.split.avail_index;
396 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
401 case VHOST_SET_VRING_ADDR:
402 if (ops->set_vq_address(vdpa, idx,
403 (u64)(uintptr_t)vq->desc,
404 (u64)(uintptr_t)vq->avail,
405 (u64)(uintptr_t)vq->used))
409 case VHOST_SET_VRING_BASE:
410 vq_state.split.avail_index = vq->last_avail_idx;
411 if (ops->set_vq_state(vdpa, idx, &vq_state))
415 case VHOST_SET_VRING_CALL:
416 if (vq->call_ctx.ctx) {
417 cb.callback = vhost_vdpa_virtqueue_cb;
423 ops->set_vq_cb(vdpa, idx, &cb);
424 vhost_vdpa_setup_vq_irq(v, idx);
427 case VHOST_SET_VRING_NUM:
428 ops->set_vq_num(vdpa, idx, vq->num);
435 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
436 unsigned int cmd, unsigned long arg)
438 struct vhost_vdpa *v = filep->private_data;
439 struct vhost_dev *d = &v->vdev;
440 void __user *argp = (void __user *)arg;
441 u64 __user *featurep = argp;
445 if (cmd == VHOST_SET_BACKEND_FEATURES) {
446 if (copy_from_user(&features, featurep, sizeof(features)))
448 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
450 vhost_set_backend_features(&v->vdev, features);
454 mutex_lock(&d->mutex);
457 case VHOST_VDPA_GET_DEVICE_ID:
458 r = vhost_vdpa_get_device_id(v, argp);
460 case VHOST_VDPA_GET_STATUS:
461 r = vhost_vdpa_get_status(v, argp);
463 case VHOST_VDPA_SET_STATUS:
464 r = vhost_vdpa_set_status(v, argp);
466 case VHOST_VDPA_GET_CONFIG:
467 r = vhost_vdpa_get_config(v, argp);
469 case VHOST_VDPA_SET_CONFIG:
470 r = vhost_vdpa_set_config(v, argp);
472 case VHOST_GET_FEATURES:
473 r = vhost_vdpa_get_features(v, argp);
475 case VHOST_SET_FEATURES:
476 r = vhost_vdpa_set_features(v, argp);
478 case VHOST_VDPA_GET_VRING_NUM:
479 r = vhost_vdpa_get_vring_num(v, argp);
481 case VHOST_SET_LOG_BASE:
482 case VHOST_SET_LOG_FD:
485 case VHOST_VDPA_SET_CONFIG_CALL:
486 r = vhost_vdpa_set_config_call(v, argp);
488 case VHOST_GET_BACKEND_FEATURES:
489 features = VHOST_VDPA_BACKEND_FEATURES;
490 if (copy_to_user(featurep, &features, sizeof(features)))
493 case VHOST_VDPA_GET_IOVA_RANGE:
494 r = vhost_vdpa_get_iova_range(v, argp);
497 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
498 if (r == -ENOIOCTLCMD)
499 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
503 mutex_unlock(&d->mutex);
507 static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
509 struct vhost_dev *dev = &v->vdev;
510 struct vhost_iotlb *iotlb = dev->iotlb;
511 struct vhost_iotlb_map *map;
513 unsigned long pfn, pinned;
515 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
516 pinned = PFN_DOWN(map->size);
517 for (pfn = PFN_DOWN(map->addr);
518 pinned > 0; pfn++, pinned--) {
519 page = pfn_to_page(pfn);
520 if (map->perm & VHOST_ACCESS_WO)
521 set_page_dirty_lock(page);
522 unpin_user_page(page);
524 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
525 vhost_iotlb_map_free(iotlb, map);
529 static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
531 struct vhost_dev *dev = &v->vdev;
532 struct vhost_iotlb *iotlb = dev->iotlb;
533 struct vhost_iotlb_map *map;
534 struct vdpa_map_file *map_file;
536 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
537 map_file = (struct vdpa_map_file *)map->opaque;
538 fput(map_file->file);
540 vhost_iotlb_map_free(iotlb, map);
544 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
546 struct vdpa_device *vdpa = v->vdpa;
549 return vhost_vdpa_va_unmap(v, start, last);
551 return vhost_vdpa_pa_unmap(v, start, last);
554 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
556 struct vhost_dev *dev = &v->vdev;
558 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
563 static int perm_to_iommu_flags(u32 perm)
568 case VHOST_ACCESS_WO:
569 flags |= IOMMU_WRITE;
571 case VHOST_ACCESS_RO:
574 case VHOST_ACCESS_RW:
575 flags |= (IOMMU_WRITE | IOMMU_READ);
578 WARN(1, "invalidate vhost IOTLB permission\n");
582 return flags | IOMMU_CACHE;
585 static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
586 u64 size, u64 pa, u32 perm, void *opaque)
588 struct vhost_dev *dev = &v->vdev;
589 struct vdpa_device *vdpa = v->vdpa;
590 const struct vdpa_config_ops *ops = vdpa->config;
593 r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1,
599 r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
600 } else if (ops->set_map) {
602 r = ops->set_map(vdpa, dev->iotlb);
604 r = iommu_map(v->domain, iova, pa, size,
605 perm_to_iommu_flags(perm));
608 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
613 atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
618 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
620 struct vhost_dev *dev = &v->vdev;
621 struct vdpa_device *vdpa = v->vdpa;
622 const struct vdpa_config_ops *ops = vdpa->config;
624 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
627 ops->dma_unmap(vdpa, iova, size);
628 } else if (ops->set_map) {
630 ops->set_map(vdpa, dev->iotlb);
632 iommu_unmap(v->domain, iova, size);
636 static int vhost_vdpa_va_map(struct vhost_vdpa *v,
637 u64 iova, u64 size, u64 uaddr, u32 perm)
639 struct vhost_dev *dev = &v->vdev;
640 u64 offset, map_size, map_iova = iova;
641 struct vdpa_map_file *map_file;
642 struct vm_area_struct *vma;
645 mmap_read_lock(dev->mm);
648 vma = find_vma(dev->mm, uaddr);
653 map_size = min(size, vma->vm_end - uaddr);
654 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
655 !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
658 map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
663 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
664 map_file->offset = offset;
665 map_file->file = get_file(vma->vm_file);
666 ret = vhost_vdpa_map(v, map_iova, map_size, uaddr,
669 fput(map_file->file);
676 map_iova += map_size;
679 vhost_vdpa_unmap(v, iova, map_iova - iova);
681 mmap_read_unlock(dev->mm);
686 static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
687 u64 iova, u64 size, u64 uaddr, u32 perm)
689 struct vhost_dev *dev = &v->vdev;
690 struct page **page_list;
691 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
692 unsigned int gup_flags = FOLL_LONGTERM;
693 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
694 unsigned long lock_limit, sz2pin, nchunks, i;
699 /* Limit the use of memory for bookkeeping */
700 page_list = (struct page **) __get_free_page(GFP_KERNEL);
704 if (perm & VHOST_ACCESS_WO)
705 gup_flags |= FOLL_WRITE;
707 npages = PFN_UP(size + (iova & ~PAGE_MASK));
713 mmap_read_lock(dev->mm);
715 lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
716 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
721 cur_base = uaddr & PAGE_MASK;
726 sz2pin = min_t(unsigned long, npages, list_size);
727 pinned = pin_user_pages(cur_base, sz2pin,
728 gup_flags, page_list, NULL);
729 if (sz2pin != pinned) {
733 unpin_user_pages(page_list, pinned);
741 map_pfn = page_to_pfn(page_list[0]);
743 for (i = 0; i < pinned; i++) {
744 unsigned long this_pfn = page_to_pfn(page_list[i]);
747 if (last_pfn && (this_pfn != last_pfn + 1)) {
748 /* Pin a contiguous chunk of memory */
749 csize = PFN_PHYS(last_pfn - map_pfn + 1);
750 ret = vhost_vdpa_map(v, iova, csize,
755 * Unpin the pages that are left unmapped
756 * from this point on in the current
757 * page_list. The remaining outstanding
758 * ones which may stride across several
759 * chunks will be covered in the common
760 * error path subsequently.
762 unpin_user_pages(&page_list[i],
775 cur_base += PFN_PHYS(pinned);
779 /* Pin the rest chunk */
780 ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
781 PFN_PHYS(map_pfn), perm, NULL);
788 * Unpin the outstanding pages which are yet to be
789 * mapped but haven't due to vdpa_map() or
790 * pin_user_pages() failure.
792 * Mapped pages are accounted in vdpa_map(), hence
793 * the corresponding unpinning will be handled by
797 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
798 unpin_user_page(pfn_to_page(pfn));
800 vhost_vdpa_unmap(v, start, size);
803 mmap_read_unlock(dev->mm);
805 free_page((unsigned long)page_list);
810 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
811 struct vhost_iotlb_msg *msg)
813 struct vhost_dev *dev = &v->vdev;
814 struct vdpa_device *vdpa = v->vdpa;
815 struct vhost_iotlb *iotlb = dev->iotlb;
817 if (msg->iova < v->range.first || !msg->size ||
818 msg->iova > U64_MAX - msg->size + 1 ||
819 msg->iova + msg->size - 1 > v->range.last)
822 if (vhost_iotlb_itree_first(iotlb, msg->iova,
823 msg->iova + msg->size - 1))
827 return vhost_vdpa_va_map(v, msg->iova, msg->size,
828 msg->uaddr, msg->perm);
830 return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
834 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
835 struct vhost_iotlb_msg *msg)
837 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
838 struct vdpa_device *vdpa = v->vdpa;
839 const struct vdpa_config_ops *ops = vdpa->config;
842 mutex_lock(&dev->mutex);
844 r = vhost_dev_check_owner(dev);
849 case VHOST_IOTLB_UPDATE:
850 r = vhost_vdpa_process_iotlb_update(v, msg);
852 case VHOST_IOTLB_INVALIDATE:
853 vhost_vdpa_unmap(v, msg->iova, msg->size);
855 case VHOST_IOTLB_BATCH_BEGIN:
858 case VHOST_IOTLB_BATCH_END:
859 if (v->in_batch && ops->set_map)
860 ops->set_map(vdpa, dev->iotlb);
868 mutex_unlock(&dev->mutex);
873 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
874 struct iov_iter *from)
876 struct file *file = iocb->ki_filp;
877 struct vhost_vdpa *v = file->private_data;
878 struct vhost_dev *dev = &v->vdev;
880 return vhost_chr_write_iter(dev, from);
883 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
885 struct vdpa_device *vdpa = v->vdpa;
886 const struct vdpa_config_ops *ops = vdpa->config;
887 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
888 struct bus_type *bus;
891 /* Device want to do DMA by itself */
892 if (ops->set_map || ops->dma_map)
899 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
902 v->domain = iommu_domain_alloc(bus);
906 ret = iommu_attach_device(v->domain, dma_dev);
913 iommu_domain_free(v->domain);
917 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
919 struct vdpa_device *vdpa = v->vdpa;
920 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
923 iommu_detach_device(v->domain, dma_dev);
924 iommu_domain_free(v->domain);
930 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
932 struct vdpa_iova_range *range = &v->range;
933 struct vdpa_device *vdpa = v->vdpa;
934 const struct vdpa_config_ops *ops = vdpa->config;
936 if (ops->get_iova_range) {
937 *range = ops->get_iova_range(vdpa);
938 } else if (v->domain && v->domain->geometry.force_aperture) {
939 range->first = v->domain->geometry.aperture_start;
940 range->last = v->domain->geometry.aperture_end;
943 range->last = ULLONG_MAX;
947 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
949 struct vhost_vdpa *v;
950 struct vhost_dev *dev;
951 struct vhost_virtqueue **vqs;
952 int nvqs, i, r, opened;
954 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
956 opened = atomic_cmpxchg(&v->opened, 0, 1);
961 r = vhost_vdpa_reset(v);
965 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
972 for (i = 0; i < nvqs; i++) {
974 vqs[i]->handle_kick = handle_vq_kick;
976 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
977 vhost_vdpa_process_iotlb_msg);
979 dev->iotlb = vhost_iotlb_alloc(0, 0);
985 r = vhost_vdpa_alloc_domain(v);
989 vhost_vdpa_set_iova_range(v);
991 filep->private_data = v;
996 vhost_dev_cleanup(&v->vdev);
999 atomic_dec(&v->opened);
1003 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1007 for (i = 0; i < v->nvqs; i++)
1008 vhost_vdpa_unsetup_vq_irq(v, i);
1011 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1013 struct vhost_vdpa *v = filep->private_data;
1014 struct vhost_dev *d = &v->vdev;
1016 mutex_lock(&d->mutex);
1017 filep->private_data = NULL;
1018 vhost_vdpa_reset(v);
1019 vhost_dev_stop(&v->vdev);
1020 vhost_vdpa_iotlb_free(v);
1021 vhost_vdpa_free_domain(v);
1022 vhost_vdpa_config_put(v);
1023 vhost_vdpa_clean_irq(v);
1024 vhost_dev_cleanup(&v->vdev);
1026 mutex_unlock(&d->mutex);
1028 atomic_dec(&v->opened);
1029 complete(&v->completion);
1035 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1037 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1038 struct vdpa_device *vdpa = v->vdpa;
1039 const struct vdpa_config_ops *ops = vdpa->config;
1040 struct vdpa_notification_area notify;
1041 struct vm_area_struct *vma = vmf->vma;
1042 u16 index = vma->vm_pgoff;
1044 notify = ops->get_vq_notification(vdpa, index);
1046 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1047 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
1048 PFN_DOWN(notify.addr), PAGE_SIZE,
1050 return VM_FAULT_SIGBUS;
1052 return VM_FAULT_NOPAGE;
1055 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1056 .fault = vhost_vdpa_fault,
1059 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1061 struct vhost_vdpa *v = vma->vm_file->private_data;
1062 struct vdpa_device *vdpa = v->vdpa;
1063 const struct vdpa_config_ops *ops = vdpa->config;
1064 struct vdpa_notification_area notify;
1065 unsigned long index = vma->vm_pgoff;
1067 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1069 if ((vma->vm_flags & VM_SHARED) == 0)
1071 if (vma->vm_flags & VM_READ)
1075 if (!ops->get_vq_notification)
1078 /* To be safe and easily modelled by userspace, We only
1079 * support the doorbell which sits on the page boundary and
1080 * does not share the page with other registers.
1082 notify = ops->get_vq_notification(vdpa, index);
1083 if (notify.addr & (PAGE_SIZE - 1))
1085 if (vma->vm_end - vma->vm_start != notify.size)
1088 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1089 vma->vm_ops = &vhost_vdpa_vm_ops;
1092 #endif /* CONFIG_MMU */
1094 static const struct file_operations vhost_vdpa_fops = {
1095 .owner = THIS_MODULE,
1096 .open = vhost_vdpa_open,
1097 .release = vhost_vdpa_release,
1098 .write_iter = vhost_vdpa_chr_write_iter,
1099 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1101 .mmap = vhost_vdpa_mmap,
1102 #endif /* CONFIG_MMU */
1103 .compat_ioctl = compat_ptr_ioctl,
1106 static void vhost_vdpa_release_dev(struct device *device)
1108 struct vhost_vdpa *v =
1109 container_of(device, struct vhost_vdpa, dev);
1111 ida_simple_remove(&vhost_vdpa_ida, v->minor);
1116 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1118 const struct vdpa_config_ops *ops = vdpa->config;
1119 struct vhost_vdpa *v;
1123 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1127 minor = ida_simple_get(&vhost_vdpa_ida, 0,
1128 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1134 atomic_set(&v->opened, 0);
1137 v->nvqs = vdpa->nvqs;
1138 v->virtio_id = ops->get_device_id(vdpa);
1140 device_initialize(&v->dev);
1141 v->dev.release = vhost_vdpa_release_dev;
1142 v->dev.parent = &vdpa->dev;
1143 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1144 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1151 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1155 cdev_init(&v->cdev, &vhost_vdpa_fops);
1156 v->cdev.owner = THIS_MODULE;
1158 r = cdev_device_add(&v->cdev, &v->dev);
1162 init_completion(&v->completion);
1163 vdpa_set_drvdata(vdpa, v);
1168 put_device(&v->dev);
1172 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1174 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1177 cdev_device_del(&v->cdev, &v->dev);
1180 opened = atomic_cmpxchg(&v->opened, 0, 1);
1183 wait_for_completion(&v->completion);
1186 put_device(&v->dev);
1189 static struct vdpa_driver vhost_vdpa_driver = {
1191 .name = "vhost_vdpa",
1193 .probe = vhost_vdpa_probe,
1194 .remove = vhost_vdpa_remove,
1197 static int __init vhost_vdpa_init(void)
1201 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1204 goto err_alloc_chrdev;
1206 r = vdpa_register_driver(&vhost_vdpa_driver);
1208 goto err_vdpa_register_driver;
1212 err_vdpa_register_driver:
1213 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1217 module_init(vhost_vdpa_init);
1219 static void __exit vhost_vdpa_exit(void)
1221 vdpa_unregister_driver(&vhost_vdpa_driver);
1222 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1224 module_exit(vhost_vdpa_exit);
1226 MODULE_VERSION("0.0.1");
1227 MODULE_LICENSE("GPL v2");
1228 MODULE_AUTHOR("Intel Corporation");
1229 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");