1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
19 #include <linux/iommu.h>
20 #include <linux/uuid.h>
21 #include <linux/vdpa.h>
22 #include <linux/nospec.h>
23 #include <linux/vhost.h>
24 #include <linux/virtio_net.h>
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
34 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
37 struct vhost_dev vdev;
38 struct iommu_domain *domain;
39 struct vhost_virtqueue *vqs;
40 struct completion completion;
41 struct vdpa_device *vdpa;
48 struct eventfd_ctx *config_ctx;
50 struct vdpa_iova_range range;
53 static DEFINE_IDA(vhost_vdpa_ida);
55 static dev_t vhost_vdpa_major;
57 static void handle_vq_kick(struct vhost_work *work)
59 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
61 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62 const struct vdpa_config_ops *ops = v->vdpa->config;
64 ops->kick_vq(v->vdpa, vq - v->vqs);
67 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
69 struct vhost_virtqueue *vq = private;
70 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
73 eventfd_signal(call_ctx, 1);
78 static irqreturn_t vhost_vdpa_config_cb(void *private)
80 struct vhost_vdpa *v = private;
81 struct eventfd_ctx *config_ctx = v->config_ctx;
84 eventfd_signal(config_ctx, 1);
89 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
91 struct vhost_virtqueue *vq = &v->vqs[qid];
92 const struct vdpa_config_ops *ops = v->vdpa->config;
93 struct vdpa_device *vdpa = v->vdpa;
99 irq = ops->get_vq_irq(vdpa, qid);
100 irq_bypass_unregister_producer(&vq->call_ctx.producer);
101 if (!vq->call_ctx.ctx || irq < 0)
104 vq->call_ctx.producer.token = vq->call_ctx.ctx;
105 vq->call_ctx.producer.irq = irq;
106 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
108 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
109 qid, vq->call_ctx.producer.token, ret);
112 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
114 struct vhost_virtqueue *vq = &v->vqs[qid];
116 irq_bypass_unregister_producer(&vq->call_ctx.producer);
119 static void vhost_vdpa_reset(struct vhost_vdpa *v)
121 struct vdpa_device *vdpa = v->vdpa;
127 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
129 struct vdpa_device *vdpa = v->vdpa;
130 const struct vdpa_config_ops *ops = vdpa->config;
133 device_id = ops->get_device_id(vdpa);
135 if (copy_to_user(argp, &device_id, sizeof(device_id)))
141 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
143 struct vdpa_device *vdpa = v->vdpa;
144 const struct vdpa_config_ops *ops = vdpa->config;
147 status = ops->get_status(vdpa);
149 if (copy_to_user(statusp, &status, sizeof(status)))
155 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
157 struct vdpa_device *vdpa = v->vdpa;
158 const struct vdpa_config_ops *ops = vdpa->config;
159 u8 status, status_old;
163 if (copy_from_user(&status, statusp, sizeof(status)))
166 status_old = ops->get_status(vdpa);
169 * Userspace shouldn't remove status bits unless reset the
172 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
175 ops->set_status(vdpa, status);
177 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
178 for (i = 0; i < nvqs; i++)
179 vhost_vdpa_setup_vq_irq(v, i);
181 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
182 for (i = 0; i < nvqs; i++)
183 vhost_vdpa_unsetup_vq_irq(v, i);
188 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
189 struct vhost_vdpa_config *c)
193 switch (v->virtio_id) {
195 size = sizeof(struct virtio_net_config);
202 if (c->len > size - c->off)
208 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
209 struct vhost_vdpa_config __user *c)
211 struct vdpa_device *vdpa = v->vdpa;
212 struct vhost_vdpa_config config;
213 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
216 if (copy_from_user(&config, c, size))
218 if (vhost_vdpa_config_validate(v, &config))
220 buf = kvzalloc(config.len, GFP_KERNEL);
224 vdpa_get_config(vdpa, config.off, buf, config.len);
226 if (copy_to_user(c->buf, buf, config.len)) {
235 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
236 struct vhost_vdpa_config __user *c)
238 struct vdpa_device *vdpa = v->vdpa;
239 const struct vdpa_config_ops *ops = vdpa->config;
240 struct vhost_vdpa_config config;
241 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
244 if (copy_from_user(&config, c, size))
246 if (vhost_vdpa_config_validate(v, &config))
249 buf = vmemdup_user(c->buf, config.len);
253 ops->set_config(vdpa, config.off, buf, config.len);
259 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
261 struct vdpa_device *vdpa = v->vdpa;
262 const struct vdpa_config_ops *ops = vdpa->config;
265 features = ops->get_features(vdpa);
267 if (copy_to_user(featurep, &features, sizeof(features)))
273 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
275 struct vdpa_device *vdpa = v->vdpa;
276 const struct vdpa_config_ops *ops = vdpa->config;
280 * It's not allowed to change the features after they have
283 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
286 if (copy_from_user(&features, featurep, sizeof(features)))
289 if (vdpa_set_features(vdpa, features))
295 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
297 struct vdpa_device *vdpa = v->vdpa;
298 const struct vdpa_config_ops *ops = vdpa->config;
301 num = ops->get_vq_num_max(vdpa);
303 if (copy_to_user(argp, &num, sizeof(num)))
309 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
312 eventfd_ctx_put(v->config_ctx);
313 v->config_ctx = NULL;
317 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
319 struct vdpa_callback cb;
321 struct eventfd_ctx *ctx;
323 cb.callback = vhost_vdpa_config_cb;
324 cb.private = v->vdpa;
325 if (copy_from_user(&fd, argp, sizeof(fd)))
328 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
329 swap(ctx, v->config_ctx);
331 if (!IS_ERR_OR_NULL(ctx))
332 eventfd_ctx_put(ctx);
334 if (IS_ERR(v->config_ctx)) {
335 long ret = PTR_ERR(v->config_ctx);
337 v->config_ctx = NULL;
341 v->vdpa->config->set_config_cb(v->vdpa, &cb);
346 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
348 struct vhost_vdpa_iova_range range = {
349 .first = v->range.first,
350 .last = v->range.last,
353 if (copy_to_user(argp, &range, sizeof(range)))
358 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
361 struct vdpa_device *vdpa = v->vdpa;
362 const struct vdpa_config_ops *ops = vdpa->config;
363 struct vdpa_vq_state vq_state;
364 struct vdpa_callback cb;
365 struct vhost_virtqueue *vq;
366 struct vhost_vring_state s;
370 r = get_user(idx, (u32 __user *)argp);
377 idx = array_index_nospec(idx, v->nvqs);
381 case VHOST_VDPA_SET_VRING_ENABLE:
382 if (copy_from_user(&s, argp, sizeof(s)))
384 ops->set_vq_ready(vdpa, idx, s.num);
386 case VHOST_GET_VRING_BASE:
387 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
391 vq->last_avail_idx = vq_state.avail_index;
395 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
400 case VHOST_SET_VRING_ADDR:
401 if (ops->set_vq_address(vdpa, idx,
402 (u64)(uintptr_t)vq->desc,
403 (u64)(uintptr_t)vq->avail,
404 (u64)(uintptr_t)vq->used))
408 case VHOST_SET_VRING_BASE:
409 vq_state.avail_index = vq->last_avail_idx;
410 if (ops->set_vq_state(vdpa, idx, &vq_state))
414 case VHOST_SET_VRING_CALL:
415 if (vq->call_ctx.ctx) {
416 cb.callback = vhost_vdpa_virtqueue_cb;
422 ops->set_vq_cb(vdpa, idx, &cb);
423 vhost_vdpa_setup_vq_irq(v, idx);
426 case VHOST_SET_VRING_NUM:
427 ops->set_vq_num(vdpa, idx, vq->num);
434 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
435 unsigned int cmd, unsigned long arg)
437 struct vhost_vdpa *v = filep->private_data;
438 struct vhost_dev *d = &v->vdev;
439 void __user *argp = (void __user *)arg;
440 u64 __user *featurep = argp;
444 if (cmd == VHOST_SET_BACKEND_FEATURES) {
445 if (copy_from_user(&features, featurep, sizeof(features)))
447 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
449 vhost_set_backend_features(&v->vdev, features);
453 mutex_lock(&d->mutex);
456 case VHOST_VDPA_GET_DEVICE_ID:
457 r = vhost_vdpa_get_device_id(v, argp);
459 case VHOST_VDPA_GET_STATUS:
460 r = vhost_vdpa_get_status(v, argp);
462 case VHOST_VDPA_SET_STATUS:
463 r = vhost_vdpa_set_status(v, argp);
465 case VHOST_VDPA_GET_CONFIG:
466 r = vhost_vdpa_get_config(v, argp);
468 case VHOST_VDPA_SET_CONFIG:
469 r = vhost_vdpa_set_config(v, argp);
471 case VHOST_GET_FEATURES:
472 r = vhost_vdpa_get_features(v, argp);
474 case VHOST_SET_FEATURES:
475 r = vhost_vdpa_set_features(v, argp);
477 case VHOST_VDPA_GET_VRING_NUM:
478 r = vhost_vdpa_get_vring_num(v, argp);
480 case VHOST_SET_LOG_BASE:
481 case VHOST_SET_LOG_FD:
484 case VHOST_VDPA_SET_CONFIG_CALL:
485 r = vhost_vdpa_set_config_call(v, argp);
487 case VHOST_GET_BACKEND_FEATURES:
488 features = VHOST_VDPA_BACKEND_FEATURES;
489 if (copy_to_user(featurep, &features, sizeof(features)))
492 case VHOST_VDPA_GET_IOVA_RANGE:
493 r = vhost_vdpa_get_iova_range(v, argp);
496 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
497 if (r == -ENOIOCTLCMD)
498 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
502 mutex_unlock(&d->mutex);
506 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
508 struct vhost_dev *dev = &v->vdev;
509 struct vhost_iotlb *iotlb = dev->iotlb;
510 struct vhost_iotlb_map *map;
512 unsigned long pfn, pinned;
514 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
515 pinned = map->size >> PAGE_SHIFT;
516 for (pfn = map->addr >> PAGE_SHIFT;
517 pinned > 0; pfn++, pinned--) {
518 page = pfn_to_page(pfn);
519 if (map->perm & VHOST_ACCESS_WO)
520 set_page_dirty_lock(page);
521 unpin_user_page(page);
523 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
524 vhost_iotlb_map_free(iotlb, map);
528 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
530 struct vhost_dev *dev = &v->vdev;
532 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
537 static int perm_to_iommu_flags(u32 perm)
542 case VHOST_ACCESS_WO:
543 flags |= IOMMU_WRITE;
545 case VHOST_ACCESS_RO:
548 case VHOST_ACCESS_RW:
549 flags |= (IOMMU_WRITE | IOMMU_READ);
552 WARN(1, "invalidate vhost IOTLB permission\n");
556 return flags | IOMMU_CACHE;
559 static int vhost_vdpa_map(struct vhost_vdpa *v,
560 u64 iova, u64 size, u64 pa, u32 perm)
562 struct vhost_dev *dev = &v->vdev;
563 struct vdpa_device *vdpa = v->vdpa;
564 const struct vdpa_config_ops *ops = vdpa->config;
567 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
573 r = ops->dma_map(vdpa, iova, size, pa, perm);
574 } else if (ops->set_map) {
576 r = ops->set_map(vdpa, dev->iotlb);
578 r = iommu_map(v->domain, iova, pa, size,
579 perm_to_iommu_flags(perm));
583 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
585 atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
590 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
592 struct vhost_dev *dev = &v->vdev;
593 struct vdpa_device *vdpa = v->vdpa;
594 const struct vdpa_config_ops *ops = vdpa->config;
596 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
599 ops->dma_unmap(vdpa, iova, size);
600 } else if (ops->set_map) {
602 ops->set_map(vdpa, dev->iotlb);
604 iommu_unmap(v->domain, iova, size);
608 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
609 struct vhost_iotlb_msg *msg)
611 struct vhost_dev *dev = &v->vdev;
612 struct vhost_iotlb *iotlb = dev->iotlb;
613 struct page **page_list;
614 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
615 unsigned int gup_flags = FOLL_LONGTERM;
616 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
617 unsigned long lock_limit, sz2pin, nchunks, i;
618 u64 iova = msg->iova;
622 if (msg->iova < v->range.first ||
623 msg->iova + msg->size - 1 > v->range.last)
626 if (vhost_iotlb_itree_first(iotlb, msg->iova,
627 msg->iova + msg->size - 1))
630 /* Limit the use of memory for bookkeeping */
631 page_list = (struct page **) __get_free_page(GFP_KERNEL);
635 if (msg->perm & VHOST_ACCESS_WO)
636 gup_flags |= FOLL_WRITE;
638 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
644 mmap_read_lock(dev->mm);
646 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
647 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
652 cur_base = msg->uaddr & PAGE_MASK;
657 sz2pin = min_t(unsigned long, npages, list_size);
658 pinned = pin_user_pages(cur_base, sz2pin,
659 gup_flags, page_list, NULL);
660 if (sz2pin != pinned) {
664 unpin_user_pages(page_list, pinned);
672 map_pfn = page_to_pfn(page_list[0]);
674 for (i = 0; i < pinned; i++) {
675 unsigned long this_pfn = page_to_pfn(page_list[i]);
678 if (last_pfn && (this_pfn != last_pfn + 1)) {
679 /* Pin a contiguous chunk of memory */
680 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
681 ret = vhost_vdpa_map(v, iova, csize,
682 map_pfn << PAGE_SHIFT,
686 * Unpin the pages that are left unmapped
687 * from this point on in the current
688 * page_list. The remaining outstanding
689 * ones which may stride across several
690 * chunks will be covered in the common
691 * error path subsequently.
693 unpin_user_pages(&page_list[i],
706 cur_base += pinned << PAGE_SHIFT;
710 /* Pin the rest chunk */
711 ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
712 map_pfn << PAGE_SHIFT, msg->perm);
719 * Unpin the outstanding pages which are yet to be
720 * mapped but haven't due to vdpa_map() or
721 * pin_user_pages() failure.
723 * Mapped pages are accounted in vdpa_map(), hence
724 * the corresponding unpinning will be handled by
728 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
729 unpin_user_page(pfn_to_page(pfn));
731 vhost_vdpa_unmap(v, msg->iova, msg->size);
734 mmap_read_unlock(dev->mm);
736 free_page((unsigned long)page_list);
740 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
741 struct vhost_iotlb_msg *msg)
743 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
744 struct vdpa_device *vdpa = v->vdpa;
745 const struct vdpa_config_ops *ops = vdpa->config;
748 mutex_lock(&dev->mutex);
750 r = vhost_dev_check_owner(dev);
755 case VHOST_IOTLB_UPDATE:
756 r = vhost_vdpa_process_iotlb_update(v, msg);
758 case VHOST_IOTLB_INVALIDATE:
759 vhost_vdpa_unmap(v, msg->iova, msg->size);
761 case VHOST_IOTLB_BATCH_BEGIN:
764 case VHOST_IOTLB_BATCH_END:
765 if (v->in_batch && ops->set_map)
766 ops->set_map(vdpa, dev->iotlb);
774 mutex_unlock(&dev->mutex);
779 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
780 struct iov_iter *from)
782 struct file *file = iocb->ki_filp;
783 struct vhost_vdpa *v = file->private_data;
784 struct vhost_dev *dev = &v->vdev;
786 return vhost_chr_write_iter(dev, from);
789 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
791 struct vdpa_device *vdpa = v->vdpa;
792 const struct vdpa_config_ops *ops = vdpa->config;
793 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
794 struct bus_type *bus;
797 /* Device want to do DMA by itself */
798 if (ops->set_map || ops->dma_map)
805 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
808 v->domain = iommu_domain_alloc(bus);
812 ret = iommu_attach_device(v->domain, dma_dev);
819 iommu_domain_free(v->domain);
823 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
825 struct vdpa_device *vdpa = v->vdpa;
826 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
829 iommu_detach_device(v->domain, dma_dev);
830 iommu_domain_free(v->domain);
836 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
838 struct vdpa_iova_range *range = &v->range;
839 struct iommu_domain_geometry geo;
840 struct vdpa_device *vdpa = v->vdpa;
841 const struct vdpa_config_ops *ops = vdpa->config;
843 if (ops->get_iova_range) {
844 *range = ops->get_iova_range(vdpa);
845 } else if (v->domain &&
846 !iommu_domain_get_attr(v->domain,
847 DOMAIN_ATTR_GEOMETRY, &geo) &&
848 geo.force_aperture) {
849 range->first = geo.aperture_start;
850 range->last = geo.aperture_end;
853 range->last = ULLONG_MAX;
857 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
859 struct vhost_vdpa *v;
860 struct vhost_dev *dev;
861 struct vhost_virtqueue **vqs;
862 int nvqs, i, r, opened;
864 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
866 opened = atomic_cmpxchg(&v->opened, 0, 1);
873 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
880 for (i = 0; i < nvqs; i++) {
882 vqs[i]->handle_kick = handle_vq_kick;
884 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
885 vhost_vdpa_process_iotlb_msg);
887 dev->iotlb = vhost_iotlb_alloc(0, 0);
893 r = vhost_vdpa_alloc_domain(v);
897 vhost_vdpa_set_iova_range(v);
899 filep->private_data = v;
904 vhost_dev_cleanup(&v->vdev);
907 atomic_dec(&v->opened);
911 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
915 for (i = 0; i < v->nvqs; i++)
916 vhost_vdpa_unsetup_vq_irq(v, i);
919 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
921 struct vhost_vdpa *v = filep->private_data;
922 struct vhost_dev *d = &v->vdev;
924 mutex_lock(&d->mutex);
925 filep->private_data = NULL;
927 vhost_dev_stop(&v->vdev);
928 vhost_vdpa_iotlb_free(v);
929 vhost_vdpa_free_domain(v);
930 vhost_vdpa_config_put(v);
931 vhost_vdpa_clean_irq(v);
932 vhost_dev_cleanup(&v->vdev);
934 mutex_unlock(&d->mutex);
936 atomic_dec(&v->opened);
937 complete(&v->completion);
943 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
945 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
946 struct vdpa_device *vdpa = v->vdpa;
947 const struct vdpa_config_ops *ops = vdpa->config;
948 struct vdpa_notification_area notify;
949 struct vm_area_struct *vma = vmf->vma;
950 u16 index = vma->vm_pgoff;
952 notify = ops->get_vq_notification(vdpa, index);
954 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
955 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
956 notify.addr >> PAGE_SHIFT, PAGE_SIZE,
958 return VM_FAULT_SIGBUS;
960 return VM_FAULT_NOPAGE;
963 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
964 .fault = vhost_vdpa_fault,
967 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
969 struct vhost_vdpa *v = vma->vm_file->private_data;
970 struct vdpa_device *vdpa = v->vdpa;
971 const struct vdpa_config_ops *ops = vdpa->config;
972 struct vdpa_notification_area notify;
973 unsigned long index = vma->vm_pgoff;
975 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
977 if ((vma->vm_flags & VM_SHARED) == 0)
979 if (vma->vm_flags & VM_READ)
983 if (!ops->get_vq_notification)
986 /* To be safe and easily modelled by userspace, We only
987 * support the doorbell which sits on the page boundary and
988 * does not share the page with other registers.
990 notify = ops->get_vq_notification(vdpa, index);
991 if (notify.addr & (PAGE_SIZE - 1))
993 if (vma->vm_end - vma->vm_start != notify.size)
996 vma->vm_ops = &vhost_vdpa_vm_ops;
999 #endif /* CONFIG_MMU */
1001 static const struct file_operations vhost_vdpa_fops = {
1002 .owner = THIS_MODULE,
1003 .open = vhost_vdpa_open,
1004 .release = vhost_vdpa_release,
1005 .write_iter = vhost_vdpa_chr_write_iter,
1006 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1008 .mmap = vhost_vdpa_mmap,
1009 #endif /* CONFIG_MMU */
1010 .compat_ioctl = compat_ptr_ioctl,
1013 static void vhost_vdpa_release_dev(struct device *device)
1015 struct vhost_vdpa *v =
1016 container_of(device, struct vhost_vdpa, dev);
1018 ida_simple_remove(&vhost_vdpa_ida, v->minor);
1023 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1025 const struct vdpa_config_ops *ops = vdpa->config;
1026 struct vhost_vdpa *v;
1030 /* Currently, we only accept the network devices. */
1031 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
1034 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1038 minor = ida_simple_get(&vhost_vdpa_ida, 0,
1039 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1045 atomic_set(&v->opened, 0);
1048 v->nvqs = vdpa->nvqs;
1049 v->virtio_id = ops->get_device_id(vdpa);
1051 device_initialize(&v->dev);
1052 v->dev.release = vhost_vdpa_release_dev;
1053 v->dev.parent = &vdpa->dev;
1054 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1055 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1062 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1066 cdev_init(&v->cdev, &vhost_vdpa_fops);
1067 v->cdev.owner = THIS_MODULE;
1069 r = cdev_device_add(&v->cdev, &v->dev);
1073 init_completion(&v->completion);
1074 vdpa_set_drvdata(vdpa, v);
1079 put_device(&v->dev);
1083 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1085 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1088 cdev_device_del(&v->cdev, &v->dev);
1091 opened = atomic_cmpxchg(&v->opened, 0, 1);
1094 wait_for_completion(&v->completion);
1097 put_device(&v->dev);
1100 static struct vdpa_driver vhost_vdpa_driver = {
1102 .name = "vhost_vdpa",
1104 .probe = vhost_vdpa_probe,
1105 .remove = vhost_vdpa_remove,
1108 static int __init vhost_vdpa_init(void)
1112 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1115 goto err_alloc_chrdev;
1117 r = vdpa_register_driver(&vhost_vdpa_driver);
1119 goto err_vdpa_register_driver;
1123 err_vdpa_register_driver:
1124 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1128 module_init(vhost_vdpa_init);
1130 static void __exit vhost_vdpa_exit(void)
1132 vdpa_unregister_driver(&vhost_vdpa_driver);
1133 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1135 module_exit(vhost_vdpa_exit);
1137 MODULE_VERSION("0.0.1");
1138 MODULE_LICENSE("GPL v2");
1139 MODULE_AUTHOR("Intel Corporation");
1140 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");