1 // SPDX-License-Identifier: GPL-2.0-only
3 * vhost transport for vsock
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
9 #include <linux/miscdevice.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
15 #include <linux/virtio_vsock.h>
16 #include <linux/vhost.h>
17 #include <linux/hashtable.h>
19 #include <net/af_vsock.h>
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
23 /* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_VSOCK_WEIGHT 0x80000
26 /* Max number of packets transferred before requeueing the job.
27 * Using this limit prevents one virtqueue from starving others with
30 #define VHOST_VSOCK_PKT_WEIGHT 256
33 VHOST_VSOCK_FEATURES = VHOST_FEATURES |
34 (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
35 (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
39 VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
42 /* Used to track all the vhost_vsock instances on the system. */
43 static DEFINE_MUTEX(vhost_vsock_mutex);
44 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
48 struct vhost_virtqueue vqs[2];
50 /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
51 struct hlist_node hash;
53 struct vhost_work send_pkt_work;
54 spinlock_t send_pkt_list_lock;
55 struct list_head send_pkt_list; /* host->guest pending packets */
57 atomic_t queued_replies;
63 static u32 vhost_transport_get_local_cid(void)
65 return VHOST_VSOCK_DEFAULT_HOST_CID;
68 /* Callers that dereference the return value must hold vhost_vsock_mutex or the
71 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
73 struct vhost_vsock *vsock;
75 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
76 u32 other_cid = vsock->guest_cid;
78 /* Skip instances that have no CID yet */
82 if (other_cid == guest_cid)
91 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
92 struct vhost_virtqueue *vq)
94 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
95 int pkts = 0, total_len = 0;
97 bool restart_tx = false;
99 mutex_lock(&vq->mutex);
101 if (!vhost_vq_get_backend(vq))
104 if (!vq_meta_prefetch(vq))
107 /* Avoid further vmexits, we're already processing the virtqueue */
108 vhost_disable_notify(&vsock->dev, vq);
111 struct virtio_vsock_pkt *pkt;
112 struct iov_iter iov_iter;
115 size_t iov_len, payload_len;
117 bool restore_flag = false;
119 spin_lock_bh(&vsock->send_pkt_list_lock);
120 if (list_empty(&vsock->send_pkt_list)) {
121 spin_unlock_bh(&vsock->send_pkt_list_lock);
122 vhost_enable_notify(&vsock->dev, vq);
126 pkt = list_first_entry(&vsock->send_pkt_list,
127 struct virtio_vsock_pkt, list);
128 list_del_init(&pkt->list);
129 spin_unlock_bh(&vsock->send_pkt_list_lock);
131 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
132 &out, &in, NULL, NULL);
134 spin_lock_bh(&vsock->send_pkt_list_lock);
135 list_add(&pkt->list, &vsock->send_pkt_list);
136 spin_unlock_bh(&vsock->send_pkt_list_lock);
140 if (head == vq->num) {
141 spin_lock_bh(&vsock->send_pkt_list_lock);
142 list_add(&pkt->list, &vsock->send_pkt_list);
143 spin_unlock_bh(&vsock->send_pkt_list_lock);
145 /* We cannot finish yet if more buffers snuck in while
146 * re-enabling notify.
148 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
149 vhost_disable_notify(&vsock->dev, vq);
156 virtio_transport_free_pkt(pkt);
157 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
161 iov_len = iov_length(&vq->iov[out], in);
162 if (iov_len < sizeof(pkt->hdr)) {
163 virtio_transport_free_pkt(pkt);
164 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
168 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
169 payload_len = pkt->len - pkt->off;
171 /* If the packet is greater than the space available in the
172 * buffer, we split it using multiple buffers.
174 if (payload_len > iov_len - sizeof(pkt->hdr)) {
175 payload_len = iov_len - sizeof(pkt->hdr);
177 /* As we are copying pieces of large packet's buffer to
178 * small rx buffers, headers of packets in rx queue are
179 * created dynamically and are initialized with header
180 * of current packet(except length). But in case of
181 * SOCK_SEQPACKET, we also must clear record delimeter
182 * bit(VIRTIO_VSOCK_SEQ_EOR). Otherwise, instead of one
183 * packet with delimeter(which marks end of record),
184 * there will be sequence of packets with delimeter
185 * bit set. After initialized header will be copied to
186 * rx buffer, this bit will be restored.
188 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
189 pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
194 /* Set the correct length in the header */
195 pkt->hdr.len = cpu_to_le32(payload_len);
197 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
198 if (nbytes != sizeof(pkt->hdr)) {
199 virtio_transport_free_pkt(pkt);
200 vq_err(vq, "Faulted on copying pkt hdr\n");
204 nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
206 if (nbytes != payload_len) {
207 virtio_transport_free_pkt(pkt);
208 vq_err(vq, "Faulted on copying pkt buf\n");
212 /* Deliver to monitoring devices all packets that we
215 virtio_transport_deliver_tap_pkt(pkt);
217 vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
220 pkt->off += payload_len;
221 total_len += payload_len;
223 /* If we didn't send all the payload we can requeue the packet
224 * to send it with the next available buffer.
226 if (pkt->off < pkt->len) {
228 pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
230 /* We are queueing the same virtio_vsock_pkt to handle
231 * the remaining bytes, and we want to deliver it
232 * to monitoring devices in the next iteration.
234 pkt->tap_delivered = false;
236 spin_lock_bh(&vsock->send_pkt_list_lock);
237 list_add(&pkt->list, &vsock->send_pkt_list);
238 spin_unlock_bh(&vsock->send_pkt_list_lock);
243 val = atomic_dec_return(&vsock->queued_replies);
245 /* Do we have resources to resume tx
248 if (val + 1 == tx_vq->num)
252 virtio_transport_free_pkt(pkt);
254 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
256 vhost_signal(&vsock->dev, vq);
259 mutex_unlock(&vq->mutex);
262 vhost_poll_queue(&tx_vq->poll);
265 static void vhost_transport_send_pkt_work(struct vhost_work *work)
267 struct vhost_virtqueue *vq;
268 struct vhost_vsock *vsock;
270 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
271 vq = &vsock->vqs[VSOCK_VQ_RX];
273 vhost_transport_do_send_pkt(vsock, vq);
277 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
279 struct vhost_vsock *vsock;
284 /* Find the vhost_vsock according to guest context id */
285 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
288 virtio_transport_free_pkt(pkt);
293 atomic_inc(&vsock->queued_replies);
295 spin_lock_bh(&vsock->send_pkt_list_lock);
296 list_add_tail(&pkt->list, &vsock->send_pkt_list);
297 spin_unlock_bh(&vsock->send_pkt_list_lock);
299 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
306 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
308 struct vhost_vsock *vsock;
309 struct virtio_vsock_pkt *pkt, *n;
316 /* Find the vhost_vsock according to guest context id */
317 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
321 spin_lock_bh(&vsock->send_pkt_list_lock);
322 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
325 list_move(&pkt->list, &freeme);
327 spin_unlock_bh(&vsock->send_pkt_list_lock);
329 list_for_each_entry_safe(pkt, n, &freeme, list) {
332 list_del(&pkt->list);
333 virtio_transport_free_pkt(pkt);
337 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
340 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
341 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
342 vhost_poll_queue(&tx_vq->poll);
351 static struct virtio_vsock_pkt *
352 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
353 unsigned int out, unsigned int in)
355 struct virtio_vsock_pkt *pkt;
356 struct iov_iter iov_iter;
361 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
365 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
369 len = iov_length(vq->iov, out);
370 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
372 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
373 if (nbytes != sizeof(pkt->hdr)) {
374 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
375 sizeof(pkt->hdr), nbytes);
380 pkt->len = le32_to_cpu(pkt->hdr.len);
386 /* The pkt is too big */
387 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
392 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
398 pkt->buf_len = pkt->len;
400 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
401 if (nbytes != pkt->len) {
402 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
404 virtio_transport_free_pkt(pkt);
411 /* Is there space left for replies to rx packets? */
412 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
414 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
417 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
418 val = atomic_read(&vsock->queued_replies);
420 return val < vq->num;
423 static bool vhost_transport_seqpacket_allow(u32 remote_cid);
425 static struct virtio_transport vhost_transport = {
427 .module = THIS_MODULE,
429 .get_local_cid = vhost_transport_get_local_cid,
431 .init = virtio_transport_do_socket_init,
432 .destruct = virtio_transport_destruct,
433 .release = virtio_transport_release,
434 .connect = virtio_transport_connect,
435 .shutdown = virtio_transport_shutdown,
436 .cancel_pkt = vhost_transport_cancel_pkt,
438 .dgram_enqueue = virtio_transport_dgram_enqueue,
439 .dgram_dequeue = virtio_transport_dgram_dequeue,
440 .dgram_bind = virtio_transport_dgram_bind,
441 .dgram_allow = virtio_transport_dgram_allow,
443 .stream_enqueue = virtio_transport_stream_enqueue,
444 .stream_dequeue = virtio_transport_stream_dequeue,
445 .stream_has_data = virtio_transport_stream_has_data,
446 .stream_has_space = virtio_transport_stream_has_space,
447 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
448 .stream_is_active = virtio_transport_stream_is_active,
449 .stream_allow = virtio_transport_stream_allow,
451 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
452 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
453 .seqpacket_allow = vhost_transport_seqpacket_allow,
454 .seqpacket_has_data = virtio_transport_seqpacket_has_data,
456 .notify_poll_in = virtio_transport_notify_poll_in,
457 .notify_poll_out = virtio_transport_notify_poll_out,
458 .notify_recv_init = virtio_transport_notify_recv_init,
459 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
460 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
461 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
462 .notify_send_init = virtio_transport_notify_send_init,
463 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
464 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
465 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
466 .notify_buffer_size = virtio_transport_notify_buffer_size,
470 .send_pkt = vhost_transport_send_pkt,
473 static bool vhost_transport_seqpacket_allow(u32 remote_cid)
475 struct vhost_vsock *vsock;
476 bool seqpacket_allow = false;
479 vsock = vhost_vsock_get(remote_cid);
482 seqpacket_allow = vsock->seqpacket_allow;
486 return seqpacket_allow;
489 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
491 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
493 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
495 struct virtio_vsock_pkt *pkt;
496 int head, pkts = 0, total_len = 0;
497 unsigned int out, in;
500 mutex_lock(&vq->mutex);
502 if (!vhost_vq_get_backend(vq))
505 if (!vq_meta_prefetch(vq))
508 vhost_disable_notify(&vsock->dev, vq);
512 if (!vhost_vsock_more_replies(vsock)) {
513 /* Stop tx until the device processes already
514 * pending replies. Leave tx virtqueue
515 * callbacks disabled.
517 goto no_more_replies;
520 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
521 &out, &in, NULL, NULL);
525 if (head == vq->num) {
526 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
527 vhost_disable_notify(&vsock->dev, vq);
533 pkt = vhost_vsock_alloc_pkt(vq, out, in);
535 vq_err(vq, "Faulted on pkt\n");
541 /* Deliver to monitoring devices all received packets */
542 virtio_transport_deliver_tap_pkt(pkt);
544 /* Only accept correctly addressed packets */
545 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
546 le64_to_cpu(pkt->hdr.dst_cid) ==
547 vhost_transport_get_local_cid())
548 virtio_transport_recv_pkt(&vhost_transport, pkt);
550 virtio_transport_free_pkt(pkt);
552 len += sizeof(pkt->hdr);
553 vhost_add_used(vq, head, len);
556 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
560 vhost_signal(&vsock->dev, vq);
563 mutex_unlock(&vq->mutex);
566 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
568 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
570 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
573 vhost_transport_do_send_pkt(vsock, vq);
576 static int vhost_vsock_start(struct vhost_vsock *vsock)
578 struct vhost_virtqueue *vq;
582 mutex_lock(&vsock->dev.mutex);
584 ret = vhost_dev_check_owner(&vsock->dev);
588 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
591 mutex_lock(&vq->mutex);
593 if (!vhost_vq_access_ok(vq)) {
598 if (!vhost_vq_get_backend(vq)) {
599 vhost_vq_set_backend(vq, vsock);
600 ret = vhost_vq_init_access(vq);
605 mutex_unlock(&vq->mutex);
608 /* Some packets may have been queued before the device was started,
609 * let's kick the send worker to send them.
611 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
613 mutex_unlock(&vsock->dev.mutex);
617 vhost_vq_set_backend(vq, NULL);
618 mutex_unlock(&vq->mutex);
620 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
623 mutex_lock(&vq->mutex);
624 vhost_vq_set_backend(vq, NULL);
625 mutex_unlock(&vq->mutex);
628 mutex_unlock(&vsock->dev.mutex);
632 static int vhost_vsock_stop(struct vhost_vsock *vsock)
637 mutex_lock(&vsock->dev.mutex);
639 ret = vhost_dev_check_owner(&vsock->dev);
643 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
644 struct vhost_virtqueue *vq = &vsock->vqs[i];
646 mutex_lock(&vq->mutex);
647 vhost_vq_set_backend(vq, NULL);
648 mutex_unlock(&vq->mutex);
652 mutex_unlock(&vsock->dev.mutex);
656 static void vhost_vsock_free(struct vhost_vsock *vsock)
661 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
663 struct vhost_virtqueue **vqs;
664 struct vhost_vsock *vsock;
667 /* This struct is large and allocation could fail, fall back to vmalloc
668 * if there is no other way.
670 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
674 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
680 vsock->guest_cid = 0; /* no CID assigned yet */
682 atomic_set(&vsock->queued_replies, 0);
684 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
685 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
686 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
687 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
689 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
690 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
691 VHOST_VSOCK_WEIGHT, true, NULL);
693 file->private_data = vsock;
694 spin_lock_init(&vsock->send_pkt_list_lock);
695 INIT_LIST_HEAD(&vsock->send_pkt_list);
696 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
700 vhost_vsock_free(vsock);
704 static void vhost_vsock_flush(struct vhost_vsock *vsock)
708 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
709 if (vsock->vqs[i].handle_kick)
710 vhost_poll_flush(&vsock->vqs[i].poll);
711 vhost_work_dev_flush(&vsock->dev);
714 static void vhost_vsock_reset_orphans(struct sock *sk)
716 struct vsock_sock *vsk = vsock_sk(sk);
718 /* vmci_transport.c doesn't take sk_lock here either. At least we're
719 * under vsock_table_lock so the sock cannot disappear while we're
723 /* If the peer is still valid, no need to reset connection */
724 if (vhost_vsock_get(vsk->remote_addr.svm_cid))
727 /* If the close timeout is pending, let it expire. This avoids races
728 * with the timeout callback.
730 if (vsk->close_work_scheduled)
733 sock_set_flag(sk, SOCK_DONE);
734 vsk->peer_shutdown = SHUTDOWN_MASK;
735 sk->sk_state = SS_UNCONNECTED;
736 sk->sk_err = ECONNRESET;
740 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
742 struct vhost_vsock *vsock = file->private_data;
744 mutex_lock(&vhost_vsock_mutex);
745 if (vsock->guest_cid)
746 hash_del_rcu(&vsock->hash);
747 mutex_unlock(&vhost_vsock_mutex);
749 /* Wait for other CPUs to finish using vsock */
752 /* Iterating over all connections for all CIDs to find orphans is
753 * inefficient. Room for improvement here. */
754 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
756 vhost_vsock_stop(vsock);
757 vhost_vsock_flush(vsock);
758 vhost_dev_stop(&vsock->dev);
760 spin_lock_bh(&vsock->send_pkt_list_lock);
761 while (!list_empty(&vsock->send_pkt_list)) {
762 struct virtio_vsock_pkt *pkt;
764 pkt = list_first_entry(&vsock->send_pkt_list,
765 struct virtio_vsock_pkt, list);
766 list_del_init(&pkt->list);
767 virtio_transport_free_pkt(pkt);
769 spin_unlock_bh(&vsock->send_pkt_list_lock);
771 vhost_dev_cleanup(&vsock->dev);
772 kfree(vsock->dev.vqs);
773 vhost_vsock_free(vsock);
777 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
779 struct vhost_vsock *other;
781 /* Refuse reserved CIDs */
782 if (guest_cid <= VMADDR_CID_HOST ||
783 guest_cid == U32_MAX)
786 /* 64-bit CIDs are not yet supported */
787 if (guest_cid > U32_MAX)
790 /* Refuse if CID is assigned to the guest->host transport (i.e. nested
791 * VM), to make the loopback work.
793 if (vsock_find_cid(guest_cid))
796 /* Refuse if CID is already in use */
797 mutex_lock(&vhost_vsock_mutex);
798 other = vhost_vsock_get(guest_cid);
799 if (other && other != vsock) {
800 mutex_unlock(&vhost_vsock_mutex);
804 if (vsock->guest_cid)
805 hash_del_rcu(&vsock->hash);
807 vsock->guest_cid = guest_cid;
808 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
809 mutex_unlock(&vhost_vsock_mutex);
814 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
816 struct vhost_virtqueue *vq;
819 if (features & ~VHOST_VSOCK_FEATURES)
822 mutex_lock(&vsock->dev.mutex);
823 if ((features & (1 << VHOST_F_LOG_ALL)) &&
824 !vhost_log_access_ok(&vsock->dev)) {
828 if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
829 if (vhost_init_device_iotlb(&vsock->dev, true))
833 if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
834 vsock->seqpacket_allow = true;
836 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
838 mutex_lock(&vq->mutex);
839 vq->acked_features = features;
840 mutex_unlock(&vq->mutex);
842 mutex_unlock(&vsock->dev.mutex);
846 mutex_unlock(&vsock->dev.mutex);
850 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
853 struct vhost_vsock *vsock = f->private_data;
854 void __user *argp = (void __user *)arg;
861 case VHOST_VSOCK_SET_GUEST_CID:
862 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
864 return vhost_vsock_set_cid(vsock, guest_cid);
865 case VHOST_VSOCK_SET_RUNNING:
866 if (copy_from_user(&start, argp, sizeof(start)))
869 return vhost_vsock_start(vsock);
871 return vhost_vsock_stop(vsock);
872 case VHOST_GET_FEATURES:
873 features = VHOST_VSOCK_FEATURES;
874 if (copy_to_user(argp, &features, sizeof(features)))
877 case VHOST_SET_FEATURES:
878 if (copy_from_user(&features, argp, sizeof(features)))
880 return vhost_vsock_set_features(vsock, features);
881 case VHOST_GET_BACKEND_FEATURES:
882 features = VHOST_VSOCK_BACKEND_FEATURES;
883 if (copy_to_user(argp, &features, sizeof(features)))
886 case VHOST_SET_BACKEND_FEATURES:
887 if (copy_from_user(&features, argp, sizeof(features)))
889 if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
891 vhost_set_backend_features(&vsock->dev, features);
894 mutex_lock(&vsock->dev.mutex);
895 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
896 if (r == -ENOIOCTLCMD)
897 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
899 vhost_vsock_flush(vsock);
900 mutex_unlock(&vsock->dev.mutex);
905 static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
907 struct file *file = iocb->ki_filp;
908 struct vhost_vsock *vsock = file->private_data;
909 struct vhost_dev *dev = &vsock->dev;
910 int noblock = file->f_flags & O_NONBLOCK;
912 return vhost_chr_read_iter(dev, to, noblock);
915 static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
916 struct iov_iter *from)
918 struct file *file = iocb->ki_filp;
919 struct vhost_vsock *vsock = file->private_data;
920 struct vhost_dev *dev = &vsock->dev;
922 return vhost_chr_write_iter(dev, from);
925 static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
927 struct vhost_vsock *vsock = file->private_data;
928 struct vhost_dev *dev = &vsock->dev;
930 return vhost_chr_poll(file, dev, wait);
933 static const struct file_operations vhost_vsock_fops = {
934 .owner = THIS_MODULE,
935 .open = vhost_vsock_dev_open,
936 .release = vhost_vsock_dev_release,
937 .llseek = noop_llseek,
938 .unlocked_ioctl = vhost_vsock_dev_ioctl,
939 .compat_ioctl = compat_ptr_ioctl,
940 .read_iter = vhost_vsock_chr_read_iter,
941 .write_iter = vhost_vsock_chr_write_iter,
942 .poll = vhost_vsock_chr_poll,
945 static struct miscdevice vhost_vsock_misc = {
946 .minor = VHOST_VSOCK_MINOR,
947 .name = "vhost-vsock",
948 .fops = &vhost_vsock_fops,
951 static int __init vhost_vsock_init(void)
955 ret = vsock_core_register(&vhost_transport.transport,
956 VSOCK_TRANSPORT_F_H2G);
959 return misc_register(&vhost_vsock_misc);
962 static void __exit vhost_vsock_exit(void)
964 misc_deregister(&vhost_vsock_misc);
965 vsock_core_unregister(&vhost_transport.transport);
968 module_init(vhost_vsock_init);
969 module_exit(vhost_vsock_exit);
970 MODULE_LICENSE("GPL v2");
971 MODULE_AUTHOR("Asias He");
972 MODULE_DESCRIPTION("vhost transport for vsock ");
973 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
974 MODULE_ALIAS("devname:vhost-vsock");