1 // SPDX-License-Identifier: GPL-2.0-only
3 * vhost transport for vsock
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
9 #include <linux/miscdevice.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
15 #include <linux/virtio_vsock.h>
16 #include <linux/vhost.h>
17 #include <linux/hashtable.h>
19 #include <net/af_vsock.h>
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
23 /* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_VSOCK_WEIGHT 0x80000
26 /* Max number of packets transferred before requeueing the job.
27 * Using this limit prevents one virtqueue from starving others with
30 #define VHOST_VSOCK_PKT_WEIGHT 256
33 VHOST_VSOCK_FEATURES = VHOST_FEATURES |
34 (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
35 (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
39 VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
42 /* Used to track all the vhost_vsock instances on the system. */
43 static DEFINE_MUTEX(vhost_vsock_mutex);
44 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
48 struct vhost_virtqueue vqs[2];
50 /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
51 struct hlist_node hash;
53 struct vhost_work send_pkt_work;
54 spinlock_t send_pkt_list_lock;
55 struct list_head send_pkt_list; /* host->guest pending packets */
57 atomic_t queued_replies;
63 static u32 vhost_transport_get_local_cid(void)
65 return VHOST_VSOCK_DEFAULT_HOST_CID;
68 /* Callers that dereference the return value must hold vhost_vsock_mutex or the
71 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
73 struct vhost_vsock *vsock;
75 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
76 u32 other_cid = vsock->guest_cid;
78 /* Skip instances that have no CID yet */
82 if (other_cid == guest_cid)
91 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
92 struct vhost_virtqueue *vq)
94 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
95 int pkts = 0, total_len = 0;
97 bool restart_tx = false;
99 mutex_lock(&vq->mutex);
101 if (!vhost_vq_get_backend(vq))
104 if (!vq_meta_prefetch(vq))
107 /* Avoid further vmexits, we're already processing the virtqueue */
108 vhost_disable_notify(&vsock->dev, vq);
111 struct virtio_vsock_pkt *pkt;
112 struct iov_iter iov_iter;
115 size_t iov_len, payload_len;
117 u32 flags_to_restore = 0;
119 spin_lock_bh(&vsock->send_pkt_list_lock);
120 if (list_empty(&vsock->send_pkt_list)) {
121 spin_unlock_bh(&vsock->send_pkt_list_lock);
122 vhost_enable_notify(&vsock->dev, vq);
126 pkt = list_first_entry(&vsock->send_pkt_list,
127 struct virtio_vsock_pkt, list);
128 list_del_init(&pkt->list);
129 spin_unlock_bh(&vsock->send_pkt_list_lock);
131 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
132 &out, &in, NULL, NULL);
134 spin_lock_bh(&vsock->send_pkt_list_lock);
135 list_add(&pkt->list, &vsock->send_pkt_list);
136 spin_unlock_bh(&vsock->send_pkt_list_lock);
140 if (head == vq->num) {
141 spin_lock_bh(&vsock->send_pkt_list_lock);
142 list_add(&pkt->list, &vsock->send_pkt_list);
143 spin_unlock_bh(&vsock->send_pkt_list_lock);
145 /* We cannot finish yet if more buffers snuck in while
146 * re-enabling notify.
148 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
149 vhost_disable_notify(&vsock->dev, vq);
156 virtio_transport_free_pkt(pkt);
157 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
161 iov_len = iov_length(&vq->iov[out], in);
162 if (iov_len < sizeof(pkt->hdr)) {
163 virtio_transport_free_pkt(pkt);
164 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
168 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
169 payload_len = pkt->len - pkt->off;
171 /* If the packet is greater than the space available in the
172 * buffer, we split it using multiple buffers.
174 if (payload_len > iov_len - sizeof(pkt->hdr)) {
175 payload_len = iov_len - sizeof(pkt->hdr);
177 /* As we are copying pieces of large packet's buffer to
178 * small rx buffers, headers of packets in rx queue are
179 * created dynamically and are initialized with header
180 * of current packet(except length). But in case of
181 * SOCK_SEQPACKET, we also must clear message delimeter
182 * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
183 * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
184 * there will be sequence of packets with these
185 * bits set. After initialized header will be copied to
186 * rx buffer, these required bits will be restored.
188 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
189 pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
190 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
192 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
193 pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
194 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
199 /* Set the correct length in the header */
200 pkt->hdr.len = cpu_to_le32(payload_len);
202 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
203 if (nbytes != sizeof(pkt->hdr)) {
204 virtio_transport_free_pkt(pkt);
205 vq_err(vq, "Faulted on copying pkt hdr\n");
209 nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
211 if (nbytes != payload_len) {
212 virtio_transport_free_pkt(pkt);
213 vq_err(vq, "Faulted on copying pkt buf\n");
217 /* Deliver to monitoring devices all packets that we
220 virtio_transport_deliver_tap_pkt(pkt);
222 vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
225 pkt->off += payload_len;
226 total_len += payload_len;
228 /* If we didn't send all the payload we can requeue the packet
229 * to send it with the next available buffer.
231 if (pkt->off < pkt->len) {
232 pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
234 /* We are queueing the same virtio_vsock_pkt to handle
235 * the remaining bytes, and we want to deliver it
236 * to monitoring devices in the next iteration.
238 pkt->tap_delivered = false;
240 spin_lock_bh(&vsock->send_pkt_list_lock);
241 list_add(&pkt->list, &vsock->send_pkt_list);
242 spin_unlock_bh(&vsock->send_pkt_list_lock);
247 val = atomic_dec_return(&vsock->queued_replies);
249 /* Do we have resources to resume tx
252 if (val + 1 == tx_vq->num)
256 virtio_transport_free_pkt(pkt);
258 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
260 vhost_signal(&vsock->dev, vq);
263 mutex_unlock(&vq->mutex);
266 vhost_poll_queue(&tx_vq->poll);
269 static void vhost_transport_send_pkt_work(struct vhost_work *work)
271 struct vhost_virtqueue *vq;
272 struct vhost_vsock *vsock;
274 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
275 vq = &vsock->vqs[VSOCK_VQ_RX];
277 vhost_transport_do_send_pkt(vsock, vq);
281 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
283 struct vhost_vsock *vsock;
288 /* Find the vhost_vsock according to guest context id */
289 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
292 virtio_transport_free_pkt(pkt);
297 atomic_inc(&vsock->queued_replies);
299 spin_lock_bh(&vsock->send_pkt_list_lock);
300 list_add_tail(&pkt->list, &vsock->send_pkt_list);
301 spin_unlock_bh(&vsock->send_pkt_list_lock);
303 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
310 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
312 struct vhost_vsock *vsock;
313 struct virtio_vsock_pkt *pkt, *n;
320 /* Find the vhost_vsock according to guest context id */
321 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
325 spin_lock_bh(&vsock->send_pkt_list_lock);
326 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
329 list_move(&pkt->list, &freeme);
331 spin_unlock_bh(&vsock->send_pkt_list_lock);
333 list_for_each_entry_safe(pkt, n, &freeme, list) {
336 list_del(&pkt->list);
337 virtio_transport_free_pkt(pkt);
341 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
344 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
345 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
346 vhost_poll_queue(&tx_vq->poll);
355 static struct virtio_vsock_pkt *
356 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
357 unsigned int out, unsigned int in)
359 struct virtio_vsock_pkt *pkt;
360 struct iov_iter iov_iter;
365 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
369 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
373 len = iov_length(vq->iov, out);
374 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
376 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
377 if (nbytes != sizeof(pkt->hdr)) {
378 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
379 sizeof(pkt->hdr), nbytes);
384 pkt->len = le32_to_cpu(pkt->hdr.len);
390 /* The pkt is too big */
391 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
396 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
402 pkt->buf_len = pkt->len;
404 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
405 if (nbytes != pkt->len) {
406 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
408 virtio_transport_free_pkt(pkt);
415 /* Is there space left for replies to rx packets? */
416 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
418 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
421 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
422 val = atomic_read(&vsock->queued_replies);
424 return val < vq->num;
427 static bool vhost_transport_seqpacket_allow(u32 remote_cid);
429 static struct virtio_transport vhost_transport = {
431 .module = THIS_MODULE,
433 .get_local_cid = vhost_transport_get_local_cid,
435 .init = virtio_transport_do_socket_init,
436 .destruct = virtio_transport_destruct,
437 .release = virtio_transport_release,
438 .connect = virtio_transport_connect,
439 .shutdown = virtio_transport_shutdown,
440 .cancel_pkt = vhost_transport_cancel_pkt,
442 .dgram_enqueue = virtio_transport_dgram_enqueue,
443 .dgram_dequeue = virtio_transport_dgram_dequeue,
444 .dgram_bind = virtio_transport_dgram_bind,
445 .dgram_allow = virtio_transport_dgram_allow,
447 .stream_enqueue = virtio_transport_stream_enqueue,
448 .stream_dequeue = virtio_transport_stream_dequeue,
449 .stream_has_data = virtio_transport_stream_has_data,
450 .stream_has_space = virtio_transport_stream_has_space,
451 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
452 .stream_is_active = virtio_transport_stream_is_active,
453 .stream_allow = virtio_transport_stream_allow,
455 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
456 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
457 .seqpacket_allow = vhost_transport_seqpacket_allow,
458 .seqpacket_has_data = virtio_transport_seqpacket_has_data,
460 .notify_poll_in = virtio_transport_notify_poll_in,
461 .notify_poll_out = virtio_transport_notify_poll_out,
462 .notify_recv_init = virtio_transport_notify_recv_init,
463 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
464 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
465 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
466 .notify_send_init = virtio_transport_notify_send_init,
467 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
468 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
469 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
470 .notify_buffer_size = virtio_transport_notify_buffer_size,
474 .send_pkt = vhost_transport_send_pkt,
477 static bool vhost_transport_seqpacket_allow(u32 remote_cid)
479 struct vhost_vsock *vsock;
480 bool seqpacket_allow = false;
483 vsock = vhost_vsock_get(remote_cid);
486 seqpacket_allow = vsock->seqpacket_allow;
490 return seqpacket_allow;
493 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
495 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
497 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
499 struct virtio_vsock_pkt *pkt;
500 int head, pkts = 0, total_len = 0;
501 unsigned int out, in;
504 mutex_lock(&vq->mutex);
506 if (!vhost_vq_get_backend(vq))
509 if (!vq_meta_prefetch(vq))
512 vhost_disable_notify(&vsock->dev, vq);
516 if (!vhost_vsock_more_replies(vsock)) {
517 /* Stop tx until the device processes already
518 * pending replies. Leave tx virtqueue
519 * callbacks disabled.
521 goto no_more_replies;
524 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
525 &out, &in, NULL, NULL);
529 if (head == vq->num) {
530 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
531 vhost_disable_notify(&vsock->dev, vq);
537 pkt = vhost_vsock_alloc_pkt(vq, out, in);
539 vq_err(vq, "Faulted on pkt\n");
545 /* Deliver to monitoring devices all received packets */
546 virtio_transport_deliver_tap_pkt(pkt);
548 /* Only accept correctly addressed packets */
549 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
550 le64_to_cpu(pkt->hdr.dst_cid) ==
551 vhost_transport_get_local_cid())
552 virtio_transport_recv_pkt(&vhost_transport, pkt);
554 virtio_transport_free_pkt(pkt);
556 len += sizeof(pkt->hdr);
557 vhost_add_used(vq, head, len);
560 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
564 vhost_signal(&vsock->dev, vq);
567 mutex_unlock(&vq->mutex);
570 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
572 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
574 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
577 vhost_transport_do_send_pkt(vsock, vq);
580 static int vhost_vsock_start(struct vhost_vsock *vsock)
582 struct vhost_virtqueue *vq;
586 mutex_lock(&vsock->dev.mutex);
588 ret = vhost_dev_check_owner(&vsock->dev);
592 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
595 mutex_lock(&vq->mutex);
597 if (!vhost_vq_access_ok(vq)) {
602 if (!vhost_vq_get_backend(vq)) {
603 vhost_vq_set_backend(vq, vsock);
604 ret = vhost_vq_init_access(vq);
609 mutex_unlock(&vq->mutex);
612 /* Some packets may have been queued before the device was started,
613 * let's kick the send worker to send them.
615 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
617 mutex_unlock(&vsock->dev.mutex);
621 vhost_vq_set_backend(vq, NULL);
622 mutex_unlock(&vq->mutex);
624 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
627 mutex_lock(&vq->mutex);
628 vhost_vq_set_backend(vq, NULL);
629 mutex_unlock(&vq->mutex);
632 mutex_unlock(&vsock->dev.mutex);
636 static int vhost_vsock_stop(struct vhost_vsock *vsock)
641 mutex_lock(&vsock->dev.mutex);
643 ret = vhost_dev_check_owner(&vsock->dev);
647 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
648 struct vhost_virtqueue *vq = &vsock->vqs[i];
650 mutex_lock(&vq->mutex);
651 vhost_vq_set_backend(vq, NULL);
652 mutex_unlock(&vq->mutex);
656 mutex_unlock(&vsock->dev.mutex);
660 static void vhost_vsock_free(struct vhost_vsock *vsock)
665 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
667 struct vhost_virtqueue **vqs;
668 struct vhost_vsock *vsock;
671 /* This struct is large and allocation could fail, fall back to vmalloc
672 * if there is no other way.
674 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
678 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
684 vsock->guest_cid = 0; /* no CID assigned yet */
686 atomic_set(&vsock->queued_replies, 0);
688 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
689 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
690 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
691 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
693 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
694 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
695 VHOST_VSOCK_WEIGHT, true, NULL);
697 file->private_data = vsock;
698 spin_lock_init(&vsock->send_pkt_list_lock);
699 INIT_LIST_HEAD(&vsock->send_pkt_list);
700 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
704 vhost_vsock_free(vsock);
708 static void vhost_vsock_flush(struct vhost_vsock *vsock)
712 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
713 if (vsock->vqs[i].handle_kick)
714 vhost_poll_flush(&vsock->vqs[i].poll);
715 vhost_work_dev_flush(&vsock->dev);
718 static void vhost_vsock_reset_orphans(struct sock *sk)
720 struct vsock_sock *vsk = vsock_sk(sk);
722 /* vmci_transport.c doesn't take sk_lock here either. At least we're
723 * under vsock_table_lock so the sock cannot disappear while we're
727 /* If the peer is still valid, no need to reset connection */
728 if (vhost_vsock_get(vsk->remote_addr.svm_cid))
731 /* If the close timeout is pending, let it expire. This avoids races
732 * with the timeout callback.
734 if (vsk->close_work_scheduled)
737 sock_set_flag(sk, SOCK_DONE);
738 vsk->peer_shutdown = SHUTDOWN_MASK;
739 sk->sk_state = SS_UNCONNECTED;
740 sk->sk_err = ECONNRESET;
744 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
746 struct vhost_vsock *vsock = file->private_data;
748 mutex_lock(&vhost_vsock_mutex);
749 if (vsock->guest_cid)
750 hash_del_rcu(&vsock->hash);
751 mutex_unlock(&vhost_vsock_mutex);
753 /* Wait for other CPUs to finish using vsock */
756 /* Iterating over all connections for all CIDs to find orphans is
757 * inefficient. Room for improvement here. */
758 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
760 vhost_vsock_stop(vsock);
761 vhost_vsock_flush(vsock);
762 vhost_dev_stop(&vsock->dev);
764 spin_lock_bh(&vsock->send_pkt_list_lock);
765 while (!list_empty(&vsock->send_pkt_list)) {
766 struct virtio_vsock_pkt *pkt;
768 pkt = list_first_entry(&vsock->send_pkt_list,
769 struct virtio_vsock_pkt, list);
770 list_del_init(&pkt->list);
771 virtio_transport_free_pkt(pkt);
773 spin_unlock_bh(&vsock->send_pkt_list_lock);
775 vhost_dev_cleanup(&vsock->dev);
776 kfree(vsock->dev.vqs);
777 vhost_vsock_free(vsock);
781 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
783 struct vhost_vsock *other;
785 /* Refuse reserved CIDs */
786 if (guest_cid <= VMADDR_CID_HOST ||
787 guest_cid == U32_MAX)
790 /* 64-bit CIDs are not yet supported */
791 if (guest_cid > U32_MAX)
794 /* Refuse if CID is assigned to the guest->host transport (i.e. nested
795 * VM), to make the loopback work.
797 if (vsock_find_cid(guest_cid))
800 /* Refuse if CID is already in use */
801 mutex_lock(&vhost_vsock_mutex);
802 other = vhost_vsock_get(guest_cid);
803 if (other && other != vsock) {
804 mutex_unlock(&vhost_vsock_mutex);
808 if (vsock->guest_cid)
809 hash_del_rcu(&vsock->hash);
811 vsock->guest_cid = guest_cid;
812 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
813 mutex_unlock(&vhost_vsock_mutex);
818 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
820 struct vhost_virtqueue *vq;
823 if (features & ~VHOST_VSOCK_FEATURES)
826 mutex_lock(&vsock->dev.mutex);
827 if ((features & (1 << VHOST_F_LOG_ALL)) &&
828 !vhost_log_access_ok(&vsock->dev)) {
832 if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
833 if (vhost_init_device_iotlb(&vsock->dev, true))
837 if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
838 vsock->seqpacket_allow = true;
840 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
842 mutex_lock(&vq->mutex);
843 vq->acked_features = features;
844 mutex_unlock(&vq->mutex);
846 mutex_unlock(&vsock->dev.mutex);
850 mutex_unlock(&vsock->dev.mutex);
854 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
857 struct vhost_vsock *vsock = f->private_data;
858 void __user *argp = (void __user *)arg;
865 case VHOST_VSOCK_SET_GUEST_CID:
866 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
868 return vhost_vsock_set_cid(vsock, guest_cid);
869 case VHOST_VSOCK_SET_RUNNING:
870 if (copy_from_user(&start, argp, sizeof(start)))
873 return vhost_vsock_start(vsock);
875 return vhost_vsock_stop(vsock);
876 case VHOST_GET_FEATURES:
877 features = VHOST_VSOCK_FEATURES;
878 if (copy_to_user(argp, &features, sizeof(features)))
881 case VHOST_SET_FEATURES:
882 if (copy_from_user(&features, argp, sizeof(features)))
884 return vhost_vsock_set_features(vsock, features);
885 case VHOST_GET_BACKEND_FEATURES:
886 features = VHOST_VSOCK_BACKEND_FEATURES;
887 if (copy_to_user(argp, &features, sizeof(features)))
890 case VHOST_SET_BACKEND_FEATURES:
891 if (copy_from_user(&features, argp, sizeof(features)))
893 if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
895 vhost_set_backend_features(&vsock->dev, features);
898 mutex_lock(&vsock->dev.mutex);
899 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
900 if (r == -ENOIOCTLCMD)
901 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
903 vhost_vsock_flush(vsock);
904 mutex_unlock(&vsock->dev.mutex);
909 static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
911 struct file *file = iocb->ki_filp;
912 struct vhost_vsock *vsock = file->private_data;
913 struct vhost_dev *dev = &vsock->dev;
914 int noblock = file->f_flags & O_NONBLOCK;
916 return vhost_chr_read_iter(dev, to, noblock);
919 static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
920 struct iov_iter *from)
922 struct file *file = iocb->ki_filp;
923 struct vhost_vsock *vsock = file->private_data;
924 struct vhost_dev *dev = &vsock->dev;
926 return vhost_chr_write_iter(dev, from);
929 static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
931 struct vhost_vsock *vsock = file->private_data;
932 struct vhost_dev *dev = &vsock->dev;
934 return vhost_chr_poll(file, dev, wait);
937 static const struct file_operations vhost_vsock_fops = {
938 .owner = THIS_MODULE,
939 .open = vhost_vsock_dev_open,
940 .release = vhost_vsock_dev_release,
941 .llseek = noop_llseek,
942 .unlocked_ioctl = vhost_vsock_dev_ioctl,
943 .compat_ioctl = compat_ptr_ioctl,
944 .read_iter = vhost_vsock_chr_read_iter,
945 .write_iter = vhost_vsock_chr_write_iter,
946 .poll = vhost_vsock_chr_poll,
949 static struct miscdevice vhost_vsock_misc = {
950 .minor = VHOST_VSOCK_MINOR,
951 .name = "vhost-vsock",
952 .fops = &vhost_vsock_fops,
955 static int __init vhost_vsock_init(void)
959 ret = vsock_core_register(&vhost_transport.transport,
960 VSOCK_TRANSPORT_F_H2G);
963 return misc_register(&vhost_vsock_misc);
966 static void __exit vhost_vsock_exit(void)
968 misc_deregister(&vhost_vsock_misc);
969 vsock_core_unregister(&vhost_transport.transport);
972 module_init(vhost_vsock_init);
973 module_exit(vhost_vsock_exit);
974 MODULE_LICENSE("GPL v2");
975 MODULE_AUTHOR("Asias He");
976 MODULE_DESCRIPTION("vhost transport for vsock ");
977 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
978 MODULE_ALIAS("devname:vhost-vsock");