1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
19 #include <linux/iommu.h>
20 #include <linux/uuid.h>
21 #include <linux/vdpa.h>
22 #include <linux/nospec.h>
23 #include <linux/vhost.h>
24 #include <linux/virtio_net.h>
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
34 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
37 struct vhost_dev vdev;
38 struct iommu_domain *domain;
39 struct vhost_virtqueue *vqs;
40 struct completion completion;
41 struct vdpa_device *vdpa;
48 struct eventfd_ctx *config_ctx;
52 static DEFINE_IDA(vhost_vdpa_ida);
54 static dev_t vhost_vdpa_major;
56 static void handle_vq_kick(struct vhost_work *work)
58 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
60 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
61 const struct vdpa_config_ops *ops = v->vdpa->config;
63 ops->kick_vq(v->vdpa, vq - v->vqs);
66 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
68 struct vhost_virtqueue *vq = private;
69 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
72 eventfd_signal(call_ctx, 1);
77 static irqreturn_t vhost_vdpa_config_cb(void *private)
79 struct vhost_vdpa *v = private;
80 struct eventfd_ctx *config_ctx = v->config_ctx;
83 eventfd_signal(config_ctx, 1);
88 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
90 struct vhost_virtqueue *vq = &v->vqs[qid];
91 const struct vdpa_config_ops *ops = v->vdpa->config;
92 struct vdpa_device *vdpa = v->vdpa;
98 irq = ops->get_vq_irq(vdpa, qid);
99 irq_bypass_unregister_producer(&vq->call_ctx.producer);
100 if (!vq->call_ctx.ctx || irq < 0)
103 vq->call_ctx.producer.token = vq->call_ctx.ctx;
104 vq->call_ctx.producer.irq = irq;
105 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
108 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
110 struct vhost_virtqueue *vq = &v->vqs[qid];
112 irq_bypass_unregister_producer(&vq->call_ctx.producer);
115 static void vhost_vdpa_reset(struct vhost_vdpa *v)
117 struct vdpa_device *vdpa = v->vdpa;
123 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
125 struct vdpa_device *vdpa = v->vdpa;
126 const struct vdpa_config_ops *ops = vdpa->config;
129 device_id = ops->get_device_id(vdpa);
131 if (copy_to_user(argp, &device_id, sizeof(device_id)))
137 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
139 struct vdpa_device *vdpa = v->vdpa;
140 const struct vdpa_config_ops *ops = vdpa->config;
143 status = ops->get_status(vdpa);
145 if (copy_to_user(statusp, &status, sizeof(status)))
151 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
153 struct vdpa_device *vdpa = v->vdpa;
154 const struct vdpa_config_ops *ops = vdpa->config;
155 u8 status, status_old;
159 if (copy_from_user(&status, statusp, sizeof(status)))
162 status_old = ops->get_status(vdpa);
165 * Userspace shouldn't remove status bits unless reset the
168 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
171 ops->set_status(vdpa, status);
173 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
174 for (i = 0; i < nvqs; i++)
175 vhost_vdpa_setup_vq_irq(v, i);
177 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
178 for (i = 0; i < nvqs; i++)
179 vhost_vdpa_unsetup_vq_irq(v, i);
184 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
185 struct vhost_vdpa_config *c)
189 switch (v->virtio_id) {
191 size = sizeof(struct virtio_net_config);
198 if (c->len > size - c->off)
204 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
205 struct vhost_vdpa_config __user *c)
207 struct vdpa_device *vdpa = v->vdpa;
208 struct vhost_vdpa_config config;
209 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
212 if (copy_from_user(&config, c, size))
214 if (vhost_vdpa_config_validate(v, &config))
216 buf = kvzalloc(config.len, GFP_KERNEL);
220 vdpa_get_config(vdpa, config.off, buf, config.len);
222 if (copy_to_user(c->buf, buf, config.len)) {
231 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
232 struct vhost_vdpa_config __user *c)
234 struct vdpa_device *vdpa = v->vdpa;
235 const struct vdpa_config_ops *ops = vdpa->config;
236 struct vhost_vdpa_config config;
237 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
240 if (copy_from_user(&config, c, size))
242 if (vhost_vdpa_config_validate(v, &config))
244 buf = kvzalloc(config.len, GFP_KERNEL);
248 if (copy_from_user(buf, c->buf, config.len)) {
253 ops->set_config(vdpa, config.off, buf, config.len);
259 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
261 struct vdpa_device *vdpa = v->vdpa;
262 const struct vdpa_config_ops *ops = vdpa->config;
265 features = ops->get_features(vdpa);
267 if (copy_to_user(featurep, &features, sizeof(features)))
273 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
275 struct vdpa_device *vdpa = v->vdpa;
276 const struct vdpa_config_ops *ops = vdpa->config;
280 * It's not allowed to change the features after they have
283 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
286 if (copy_from_user(&features, featurep, sizeof(features)))
289 if (vdpa_set_features(vdpa, features))
295 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
297 struct vdpa_device *vdpa = v->vdpa;
298 const struct vdpa_config_ops *ops = vdpa->config;
301 num = ops->get_vq_num_max(vdpa);
303 if (copy_to_user(argp, &num, sizeof(num)))
309 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
312 eventfd_ctx_put(v->config_ctx);
315 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
317 struct vdpa_callback cb;
319 struct eventfd_ctx *ctx;
321 cb.callback = vhost_vdpa_config_cb;
322 cb.private = v->vdpa;
323 if (copy_from_user(&fd, argp, sizeof(fd)))
326 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
327 swap(ctx, v->config_ctx);
329 if (!IS_ERR_OR_NULL(ctx))
330 eventfd_ctx_put(ctx);
332 if (IS_ERR(v->config_ctx))
333 return PTR_ERR(v->config_ctx);
335 v->vdpa->config->set_config_cb(v->vdpa, &cb);
340 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
343 struct vdpa_device *vdpa = v->vdpa;
344 const struct vdpa_config_ops *ops = vdpa->config;
345 struct vdpa_vq_state vq_state;
346 struct vdpa_callback cb;
347 struct vhost_virtqueue *vq;
348 struct vhost_vring_state s;
352 r = get_user(idx, (u32 __user *)argp);
359 idx = array_index_nospec(idx, v->nvqs);
363 case VHOST_VDPA_SET_VRING_ENABLE:
364 if (copy_from_user(&s, argp, sizeof(s)))
366 ops->set_vq_ready(vdpa, idx, s.num);
368 case VHOST_GET_VRING_BASE:
369 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
373 vq->last_avail_idx = vq_state.avail_index;
377 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
382 case VHOST_SET_VRING_ADDR:
383 if (ops->set_vq_address(vdpa, idx,
384 (u64)(uintptr_t)vq->desc,
385 (u64)(uintptr_t)vq->avail,
386 (u64)(uintptr_t)vq->used))
390 case VHOST_SET_VRING_BASE:
391 vq_state.avail_index = vq->last_avail_idx;
392 if (ops->set_vq_state(vdpa, idx, &vq_state))
396 case VHOST_SET_VRING_CALL:
397 if (vq->call_ctx.ctx) {
398 cb.callback = vhost_vdpa_virtqueue_cb;
404 ops->set_vq_cb(vdpa, idx, &cb);
405 vhost_vdpa_setup_vq_irq(v, idx);
408 case VHOST_SET_VRING_NUM:
409 ops->set_vq_num(vdpa, idx, vq->num);
416 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
417 unsigned int cmd, unsigned long arg)
419 struct vhost_vdpa *v = filep->private_data;
420 struct vhost_dev *d = &v->vdev;
421 void __user *argp = (void __user *)arg;
422 u64 __user *featurep = argp;
426 if (cmd == VHOST_SET_BACKEND_FEATURES) {
427 r = copy_from_user(&features, featurep, sizeof(features));
430 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
432 vhost_set_backend_features(&v->vdev, features);
436 mutex_lock(&d->mutex);
439 case VHOST_VDPA_GET_DEVICE_ID:
440 r = vhost_vdpa_get_device_id(v, argp);
442 case VHOST_VDPA_GET_STATUS:
443 r = vhost_vdpa_get_status(v, argp);
445 case VHOST_VDPA_SET_STATUS:
446 r = vhost_vdpa_set_status(v, argp);
448 case VHOST_VDPA_GET_CONFIG:
449 r = vhost_vdpa_get_config(v, argp);
451 case VHOST_VDPA_SET_CONFIG:
452 r = vhost_vdpa_set_config(v, argp);
454 case VHOST_GET_FEATURES:
455 r = vhost_vdpa_get_features(v, argp);
457 case VHOST_SET_FEATURES:
458 r = vhost_vdpa_set_features(v, argp);
460 case VHOST_VDPA_GET_VRING_NUM:
461 r = vhost_vdpa_get_vring_num(v, argp);
463 case VHOST_SET_LOG_BASE:
464 case VHOST_SET_LOG_FD:
467 case VHOST_VDPA_SET_CONFIG_CALL:
468 r = vhost_vdpa_set_config_call(v, argp);
470 case VHOST_GET_BACKEND_FEATURES:
471 features = VHOST_VDPA_BACKEND_FEATURES;
472 r = copy_to_user(featurep, &features, sizeof(features));
475 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
476 if (r == -ENOIOCTLCMD)
477 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
481 mutex_unlock(&d->mutex);
485 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
487 struct vhost_dev *dev = &v->vdev;
488 struct vhost_iotlb *iotlb = dev->iotlb;
489 struct vhost_iotlb_map *map;
491 unsigned long pfn, pinned;
493 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
494 pinned = map->size >> PAGE_SHIFT;
495 for (pfn = map->addr >> PAGE_SHIFT;
496 pinned > 0; pfn++, pinned--) {
497 page = pfn_to_page(pfn);
498 if (map->perm & VHOST_ACCESS_WO)
499 set_page_dirty_lock(page);
500 unpin_user_page(page);
502 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
503 vhost_iotlb_map_free(iotlb, map);
507 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
509 struct vhost_dev *dev = &v->vdev;
511 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
516 static int perm_to_iommu_flags(u32 perm)
521 case VHOST_ACCESS_WO:
522 flags |= IOMMU_WRITE;
524 case VHOST_ACCESS_RO:
527 case VHOST_ACCESS_RW:
528 flags |= (IOMMU_WRITE | IOMMU_READ);
531 WARN(1, "invalidate vhost IOTLB permission\n");
535 return flags | IOMMU_CACHE;
538 static int vhost_vdpa_map(struct vhost_vdpa *v,
539 u64 iova, u64 size, u64 pa, u32 perm)
541 struct vhost_dev *dev = &v->vdev;
542 struct vdpa_device *vdpa = v->vdpa;
543 const struct vdpa_config_ops *ops = vdpa->config;
546 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
552 r = ops->dma_map(vdpa, iova, size, pa, perm);
553 } else if (ops->set_map) {
555 r = ops->set_map(vdpa, dev->iotlb);
557 r = iommu_map(v->domain, iova, pa, size,
558 perm_to_iommu_flags(perm));
562 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
567 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
569 struct vhost_dev *dev = &v->vdev;
570 struct vdpa_device *vdpa = v->vdpa;
571 const struct vdpa_config_ops *ops = vdpa->config;
573 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
576 ops->dma_unmap(vdpa, iova, size);
577 } else if (ops->set_map) {
579 ops->set_map(vdpa, dev->iotlb);
581 iommu_unmap(v->domain, iova, size);
585 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
586 struct vhost_iotlb_msg *msg)
588 struct vhost_dev *dev = &v->vdev;
589 struct vhost_iotlb *iotlb = dev->iotlb;
590 struct page **page_list;
591 struct vm_area_struct **vmas;
592 unsigned int gup_flags = FOLL_LONGTERM;
593 unsigned long map_pfn, last_pfn = 0;
594 unsigned long npages, lock_limit;
595 unsigned long i, nmap = 0;
596 u64 iova = msg->iova;
600 if (vhost_iotlb_itree_first(iotlb, msg->iova,
601 msg->iova + msg->size - 1))
604 if (msg->perm & VHOST_ACCESS_WO)
605 gup_flags |= FOLL_WRITE;
607 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
611 page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
612 vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
614 if (!page_list || !vmas) {
619 mmap_read_lock(dev->mm);
621 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
622 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
627 pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
629 if (npages != pinned) {
633 unpin_user_pages(page_list, pinned);
640 map_pfn = page_to_pfn(page_list[0]);
642 /* One more iteration to avoid extra vdpa_map() call out of loop. */
643 for (i = 0; i <= npages; i++) {
644 unsigned long this_pfn;
647 /* The last chunk may have no valid PFN next to it */
648 this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;
650 if (last_pfn && (this_pfn == -1UL ||
651 this_pfn != last_pfn + 1)) {
652 /* Pin a contiguous chunk of memory */
653 csize = last_pfn - map_pfn + 1;
654 ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
655 map_pfn << PAGE_SHIFT,
659 * Unpin the rest chunks of memory on the
660 * flight with no corresponding vdpa_map()
661 * calls having been made yet. On the other
662 * hand, vdpa_unmap() in the failure path
663 * is in charge of accounting the number of
664 * pinned pages for its own.
665 * This asymmetrical pattern of accounting
666 * is for efficiency to pin all pages at
667 * once, while there is no other callsite
668 * of vdpa_map() than here above.
670 unpin_user_pages(&page_list[nmap],
674 atomic64_add(csize, &dev->mm->pinned_vm);
676 iova += csize << PAGE_SHIFT;
682 WARN_ON(nmap != npages);
685 vhost_vdpa_unmap(v, msg->iova, msg->size);
687 mmap_read_unlock(dev->mm);
694 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
695 struct vhost_iotlb_msg *msg)
697 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
698 struct vdpa_device *vdpa = v->vdpa;
699 const struct vdpa_config_ops *ops = vdpa->config;
702 r = vhost_dev_check_owner(dev);
707 case VHOST_IOTLB_UPDATE:
708 r = vhost_vdpa_process_iotlb_update(v, msg);
710 case VHOST_IOTLB_INVALIDATE:
711 vhost_vdpa_unmap(v, msg->iova, msg->size);
713 case VHOST_IOTLB_BATCH_BEGIN:
716 case VHOST_IOTLB_BATCH_END:
717 if (v->in_batch && ops->set_map)
718 ops->set_map(vdpa, dev->iotlb);
729 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
730 struct iov_iter *from)
732 struct file *file = iocb->ki_filp;
733 struct vhost_vdpa *v = file->private_data;
734 struct vhost_dev *dev = &v->vdev;
736 return vhost_chr_write_iter(dev, from);
739 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
741 struct vdpa_device *vdpa = v->vdpa;
742 const struct vdpa_config_ops *ops = vdpa->config;
743 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
744 struct bus_type *bus;
747 /* Device want to do DMA by itself */
748 if (ops->set_map || ops->dma_map)
755 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
758 v->domain = iommu_domain_alloc(bus);
762 ret = iommu_attach_device(v->domain, dma_dev);
769 iommu_domain_free(v->domain);
773 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
775 struct vdpa_device *vdpa = v->vdpa;
776 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
779 iommu_detach_device(v->domain, dma_dev);
780 iommu_domain_free(v->domain);
786 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
788 struct vhost_vdpa *v;
789 struct vhost_dev *dev;
790 struct vhost_virtqueue **vqs;
791 int nvqs, i, r, opened;
793 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
795 opened = atomic_cmpxchg(&v->opened, 0, 1);
802 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
809 for (i = 0; i < nvqs; i++) {
811 vqs[i]->handle_kick = handle_vq_kick;
813 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
814 vhost_vdpa_process_iotlb_msg);
816 dev->iotlb = vhost_iotlb_alloc(0, 0);
822 r = vhost_vdpa_alloc_domain(v);
826 filep->private_data = v;
831 vhost_dev_cleanup(&v->vdev);
834 atomic_dec(&v->opened);
838 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
840 struct vhost_virtqueue *vq;
843 for (i = 0; i < v->nvqs; i++) {
845 if (vq->call_ctx.producer.irq)
846 irq_bypass_unregister_producer(&vq->call_ctx.producer);
850 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
852 struct vhost_vdpa *v = filep->private_data;
853 struct vhost_dev *d = &v->vdev;
855 mutex_lock(&d->mutex);
856 filep->private_data = NULL;
858 vhost_dev_stop(&v->vdev);
859 vhost_vdpa_iotlb_free(v);
860 vhost_vdpa_free_domain(v);
861 vhost_vdpa_config_put(v);
862 vhost_vdpa_clean_irq(v);
863 vhost_dev_cleanup(&v->vdev);
865 mutex_unlock(&d->mutex);
867 atomic_dec(&v->opened);
868 complete(&v->completion);
874 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
876 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
877 struct vdpa_device *vdpa = v->vdpa;
878 const struct vdpa_config_ops *ops = vdpa->config;
879 struct vdpa_notification_area notify;
880 struct vm_area_struct *vma = vmf->vma;
881 u16 index = vma->vm_pgoff;
883 notify = ops->get_vq_notification(vdpa, index);
885 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
886 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
887 notify.addr >> PAGE_SHIFT, PAGE_SIZE,
889 return VM_FAULT_SIGBUS;
891 return VM_FAULT_NOPAGE;
894 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
895 .fault = vhost_vdpa_fault,
898 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
900 struct vhost_vdpa *v = vma->vm_file->private_data;
901 struct vdpa_device *vdpa = v->vdpa;
902 const struct vdpa_config_ops *ops = vdpa->config;
903 struct vdpa_notification_area notify;
904 unsigned long index = vma->vm_pgoff;
906 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
908 if ((vma->vm_flags & VM_SHARED) == 0)
910 if (vma->vm_flags & VM_READ)
914 if (!ops->get_vq_notification)
917 /* To be safe and easily modelled by userspace, We only
918 * support the doorbell which sits on the page boundary and
919 * does not share the page with other registers.
921 notify = ops->get_vq_notification(vdpa, index);
922 if (notify.addr & (PAGE_SIZE - 1))
924 if (vma->vm_end - vma->vm_start != notify.size)
927 vma->vm_ops = &vhost_vdpa_vm_ops;
930 #endif /* CONFIG_MMU */
932 static const struct file_operations vhost_vdpa_fops = {
933 .owner = THIS_MODULE,
934 .open = vhost_vdpa_open,
935 .release = vhost_vdpa_release,
936 .write_iter = vhost_vdpa_chr_write_iter,
937 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
939 .mmap = vhost_vdpa_mmap,
940 #endif /* CONFIG_MMU */
941 .compat_ioctl = compat_ptr_ioctl,
944 static void vhost_vdpa_release_dev(struct device *device)
946 struct vhost_vdpa *v =
947 container_of(device, struct vhost_vdpa, dev);
949 ida_simple_remove(&vhost_vdpa_ida, v->minor);
954 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
956 const struct vdpa_config_ops *ops = vdpa->config;
957 struct vhost_vdpa *v;
961 /* Currently, we only accept the network devices. */
962 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
965 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
969 minor = ida_simple_get(&vhost_vdpa_ida, 0,
970 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
976 atomic_set(&v->opened, 0);
979 v->nvqs = vdpa->nvqs;
980 v->virtio_id = ops->get_device_id(vdpa);
982 device_initialize(&v->dev);
983 v->dev.release = vhost_vdpa_release_dev;
984 v->dev.parent = &vdpa->dev;
985 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
986 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
993 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
997 cdev_init(&v->cdev, &vhost_vdpa_fops);
998 v->cdev.owner = THIS_MODULE;
1000 r = cdev_device_add(&v->cdev, &v->dev);
1004 init_completion(&v->completion);
1005 vdpa_set_drvdata(vdpa, v);
1010 put_device(&v->dev);
1014 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1016 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1019 cdev_device_del(&v->cdev, &v->dev);
1022 opened = atomic_cmpxchg(&v->opened, 0, 1);
1025 wait_for_completion(&v->completion);
1028 put_device(&v->dev);
1031 static struct vdpa_driver vhost_vdpa_driver = {
1033 .name = "vhost_vdpa",
1035 .probe = vhost_vdpa_probe,
1036 .remove = vhost_vdpa_remove,
1039 static int __init vhost_vdpa_init(void)
1043 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1046 goto err_alloc_chrdev;
1048 r = vdpa_register_driver(&vhost_vdpa_driver);
1050 goto err_vdpa_register_driver;
1054 err_vdpa_register_driver:
1055 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1059 module_init(vhost_vdpa_init);
1061 static void __exit vhost_vdpa_exit(void)
1063 vdpa_unregister_driver(&vhost_vdpa_driver);
1064 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1066 module_exit(vhost_vdpa_exit);
1068 MODULE_VERSION("0.0.1");
1069 MODULE_LICENSE("GPL v2");
1070 MODULE_AUTHOR("Intel Corporation");
1071 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");