af_vsock: rest of SEQPACKET support
[platform/kernel/linux-starfive.git] / net / vmw_vsock / af_vsock.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VMware vSockets Driver
4  *
5  * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6  */
7
8 /* Implementation notes:
9  *
10  * - There are two kinds of sockets: those created by user action (such as
11  * calling socket(2)) and those created by incoming connection request packets.
12  *
13  * - There are two "global" tables, one for bound sockets (sockets that have
14  * specified an address that they are responsible for) and one for connected
15  * sockets (sockets that have established a connection with another socket).
16  * These tables are "global" in that all sockets on the system are placed
17  * within them. - Note, though, that the bound table contains an extra entry
18  * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
19  * that list. The bound table is used solely for lookup of sockets when packets
20  * are received and that's not necessary for SOCK_DGRAM sockets since we create
21  * a datagram handle for each and need not perform a lookup.  Keeping SOCK_DGRAM
22  * sockets out of the bound hash buckets will reduce the chance of collisions
23  * when looking for SOCK_STREAM sockets and prevents us from having to check the
24  * socket type in the hash table lookups.
25  *
26  * - Sockets created by user action will either be "client" sockets that
27  * initiate a connection or "server" sockets that listen for connections; we do
28  * not support simultaneous connects (two "client" sockets connecting).
29  *
30  * - "Server" sockets are referred to as listener sockets throughout this
31  * implementation because they are in the TCP_LISTEN state.  When a
32  * connection request is received (the second kind of socket mentioned above),
33  * we create a new socket and refer to it as a pending socket.  These pending
34  * sockets are placed on the pending connection list of the listener socket.
35  * When future packets are received for the address the listener socket is
36  * bound to, we check if the source of the packet is from one that has an
37  * existing pending connection.  If it does, we process the packet for the
38  * pending socket.  When that socket reaches the connected state, it is removed
39  * from the listener socket's pending list and enqueued in the listener
40  * socket's accept queue.  Callers of accept(2) will accept connected sockets
41  * from the listener socket's accept queue.  If the socket cannot be accepted
42  * for some reason then it is marked rejected.  Once the connection is
43  * accepted, it is owned by the user process and the responsibility for cleanup
44  * falls with that user process.
45  *
46  * - It is possible that these pending sockets will never reach the connected
47  * state; in fact, we may never receive another packet after the connection
48  * request.  Because of this, we must schedule a cleanup function to run in the
49  * future, after some amount of time passes where a connection should have been
50  * established.  This function ensures that the socket is off all lists so it
51  * cannot be retrieved, then drops all references to the socket so it is cleaned
52  * up (sock_put() -> sk_free() -> our sk_destruct implementation).  Note this
53  * function will also cleanup rejected sockets, those that reach the connected
54  * state but leave it before they have been accepted.
55  *
56  * - Lock ordering for pending or accept queue sockets is:
57  *
58  *     lock_sock(listener);
59  *     lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
60  *
61  * Using explicit nested locking keeps lockdep happy since normally only one
62  * lock of a given class may be taken at a time.
63  *
64  * - Sockets created by user action will be cleaned up when the user process
65  * calls close(2), causing our release implementation to be called. Our release
66  * implementation will perform some cleanup then drop the last reference so our
67  * sk_destruct implementation is invoked.  Our sk_destruct implementation will
68  * perform additional cleanup that's common for both types of sockets.
69  *
70  * - A socket's reference count is what ensures that the structure won't be
71  * freed.  Each entry in a list (such as the "global" bound and connected tables
72  * and the listener socket's pending list and connected queue) ensures a
73  * reference.  When we defer work until process context and pass a socket as our
74  * argument, we must ensure the reference count is increased to ensure the
75  * socket isn't freed before the function is run; the deferred function will
76  * then drop the reference.
77  *
78  * - sk->sk_state uses the TCP state constants because they are widely used by
79  * other address families and exposed to userspace tools like ss(8):
80  *
81  *   TCP_CLOSE - unconnected
82  *   TCP_SYN_SENT - connecting
83  *   TCP_ESTABLISHED - connected
84  *   TCP_CLOSING - disconnecting
85  *   TCP_LISTEN - listening
86  */
87
88 #include <linux/types.h>
89 #include <linux/bitops.h>
90 #include <linux/cred.h>
91 #include <linux/init.h>
92 #include <linux/io.h>
93 #include <linux/kernel.h>
94 #include <linux/sched/signal.h>
95 #include <linux/kmod.h>
96 #include <linux/list.h>
97 #include <linux/miscdevice.h>
98 #include <linux/module.h>
99 #include <linux/mutex.h>
100 #include <linux/net.h>
101 #include <linux/poll.h>
102 #include <linux/random.h>
103 #include <linux/skbuff.h>
104 #include <linux/smp.h>
105 #include <linux/socket.h>
106 #include <linux/stddef.h>
107 #include <linux/unistd.h>
108 #include <linux/wait.h>
109 #include <linux/workqueue.h>
110 #include <net/sock.h>
111 #include <net/af_vsock.h>
112
113 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
114 static void vsock_sk_destruct(struct sock *sk);
115 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
116
117 /* Protocol family. */
118 static struct proto vsock_proto = {
119         .name = "AF_VSOCK",
120         .owner = THIS_MODULE,
121         .obj_size = sizeof(struct vsock_sock),
122 };
123
124 /* The default peer timeout indicates how long we will wait for a peer response
125  * to a control message.
126  */
127 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
128
129 #define VSOCK_DEFAULT_BUFFER_SIZE     (1024 * 256)
130 #define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
131 #define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
132
133 /* Transport used for host->guest communication */
134 static const struct vsock_transport *transport_h2g;
135 /* Transport used for guest->host communication */
136 static const struct vsock_transport *transport_g2h;
137 /* Transport used for DGRAM communication */
138 static const struct vsock_transport *transport_dgram;
139 /* Transport used for local communication */
140 static const struct vsock_transport *transport_local;
141 static DEFINE_MUTEX(vsock_register_mutex);
142
143 /**** UTILS ****/
144
145 /* Each bound VSocket is stored in the bind hash table and each connected
146  * VSocket is stored in the connected hash table.
147  *
148  * Unbound sockets are all put on the same list attached to the end of the hash
149  * table (vsock_unbound_sockets).  Bound sockets are added to the hash table in
150  * the bucket that their local address hashes to (vsock_bound_sockets(addr)
151  * represents the list that addr hashes to).
152  *
153  * Specifically, we initialize the vsock_bind_table array to a size of
154  * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
155  * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
156  * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets.  The hash function
157  * mods with VSOCK_HASH_SIZE to ensure this.
158  */
159 #define MAX_PORT_RETRIES        24
160
161 #define VSOCK_HASH(addr)        ((addr)->svm_port % VSOCK_HASH_SIZE)
162 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
163 #define vsock_unbound_sockets     (&vsock_bind_table[VSOCK_HASH_SIZE])
164
165 /* XXX This can probably be implemented in a better way. */
166 #define VSOCK_CONN_HASH(src, dst)                               \
167         (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
168 #define vsock_connected_sockets(src, dst)               \
169         (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
170 #define vsock_connected_sockets_vsk(vsk)                                \
171         vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
172
173 struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
174 EXPORT_SYMBOL_GPL(vsock_bind_table);
175 struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
176 EXPORT_SYMBOL_GPL(vsock_connected_table);
177 DEFINE_SPINLOCK(vsock_table_lock);
178 EXPORT_SYMBOL_GPL(vsock_table_lock);
179
180 /* Autobind this socket to the local address if necessary. */
181 static int vsock_auto_bind(struct vsock_sock *vsk)
182 {
183         struct sock *sk = sk_vsock(vsk);
184         struct sockaddr_vm local_addr;
185
186         if (vsock_addr_bound(&vsk->local_addr))
187                 return 0;
188         vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
189         return __vsock_bind(sk, &local_addr);
190 }
191
192 static void vsock_init_tables(void)
193 {
194         int i;
195
196         for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
197                 INIT_LIST_HEAD(&vsock_bind_table[i]);
198
199         for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
200                 INIT_LIST_HEAD(&vsock_connected_table[i]);
201 }
202
203 static void __vsock_insert_bound(struct list_head *list,
204                                  struct vsock_sock *vsk)
205 {
206         sock_hold(&vsk->sk);
207         list_add(&vsk->bound_table, list);
208 }
209
210 static void __vsock_insert_connected(struct list_head *list,
211                                      struct vsock_sock *vsk)
212 {
213         sock_hold(&vsk->sk);
214         list_add(&vsk->connected_table, list);
215 }
216
217 static void __vsock_remove_bound(struct vsock_sock *vsk)
218 {
219         list_del_init(&vsk->bound_table);
220         sock_put(&vsk->sk);
221 }
222
223 static void __vsock_remove_connected(struct vsock_sock *vsk)
224 {
225         list_del_init(&vsk->connected_table);
226         sock_put(&vsk->sk);
227 }
228
229 static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
230 {
231         struct vsock_sock *vsk;
232
233         list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) {
234                 if (vsock_addr_equals_addr(addr, &vsk->local_addr))
235                         return sk_vsock(vsk);
236
237                 if (addr->svm_port == vsk->local_addr.svm_port &&
238                     (vsk->local_addr.svm_cid == VMADDR_CID_ANY ||
239                      addr->svm_cid == VMADDR_CID_ANY))
240                         return sk_vsock(vsk);
241         }
242
243         return NULL;
244 }
245
246 static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
247                                                   struct sockaddr_vm *dst)
248 {
249         struct vsock_sock *vsk;
250
251         list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
252                             connected_table) {
253                 if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
254                     dst->svm_port == vsk->local_addr.svm_port) {
255                         return sk_vsock(vsk);
256                 }
257         }
258
259         return NULL;
260 }
261
262 static void vsock_insert_unbound(struct vsock_sock *vsk)
263 {
264         spin_lock_bh(&vsock_table_lock);
265         __vsock_insert_bound(vsock_unbound_sockets, vsk);
266         spin_unlock_bh(&vsock_table_lock);
267 }
268
269 void vsock_insert_connected(struct vsock_sock *vsk)
270 {
271         struct list_head *list = vsock_connected_sockets(
272                 &vsk->remote_addr, &vsk->local_addr);
273
274         spin_lock_bh(&vsock_table_lock);
275         __vsock_insert_connected(list, vsk);
276         spin_unlock_bh(&vsock_table_lock);
277 }
278 EXPORT_SYMBOL_GPL(vsock_insert_connected);
279
280 void vsock_remove_bound(struct vsock_sock *vsk)
281 {
282         spin_lock_bh(&vsock_table_lock);
283         if (__vsock_in_bound_table(vsk))
284                 __vsock_remove_bound(vsk);
285         spin_unlock_bh(&vsock_table_lock);
286 }
287 EXPORT_SYMBOL_GPL(vsock_remove_bound);
288
289 void vsock_remove_connected(struct vsock_sock *vsk)
290 {
291         spin_lock_bh(&vsock_table_lock);
292         if (__vsock_in_connected_table(vsk))
293                 __vsock_remove_connected(vsk);
294         spin_unlock_bh(&vsock_table_lock);
295 }
296 EXPORT_SYMBOL_GPL(vsock_remove_connected);
297
298 struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
299 {
300         struct sock *sk;
301
302         spin_lock_bh(&vsock_table_lock);
303         sk = __vsock_find_bound_socket(addr);
304         if (sk)
305                 sock_hold(sk);
306
307         spin_unlock_bh(&vsock_table_lock);
308
309         return sk;
310 }
311 EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
312
313 struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
314                                          struct sockaddr_vm *dst)
315 {
316         struct sock *sk;
317
318         spin_lock_bh(&vsock_table_lock);
319         sk = __vsock_find_connected_socket(src, dst);
320         if (sk)
321                 sock_hold(sk);
322
323         spin_unlock_bh(&vsock_table_lock);
324
325         return sk;
326 }
327 EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
328
329 void vsock_remove_sock(struct vsock_sock *vsk)
330 {
331         vsock_remove_bound(vsk);
332         vsock_remove_connected(vsk);
333 }
334 EXPORT_SYMBOL_GPL(vsock_remove_sock);
335
336 void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
337 {
338         int i;
339
340         spin_lock_bh(&vsock_table_lock);
341
342         for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
343                 struct vsock_sock *vsk;
344                 list_for_each_entry(vsk, &vsock_connected_table[i],
345                                     connected_table)
346                         fn(sk_vsock(vsk));
347         }
348
349         spin_unlock_bh(&vsock_table_lock);
350 }
351 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
352
353 void vsock_add_pending(struct sock *listener, struct sock *pending)
354 {
355         struct vsock_sock *vlistener;
356         struct vsock_sock *vpending;
357
358         vlistener = vsock_sk(listener);
359         vpending = vsock_sk(pending);
360
361         sock_hold(pending);
362         sock_hold(listener);
363         list_add_tail(&vpending->pending_links, &vlistener->pending_links);
364 }
365 EXPORT_SYMBOL_GPL(vsock_add_pending);
366
367 void vsock_remove_pending(struct sock *listener, struct sock *pending)
368 {
369         struct vsock_sock *vpending = vsock_sk(pending);
370
371         list_del_init(&vpending->pending_links);
372         sock_put(listener);
373         sock_put(pending);
374 }
375 EXPORT_SYMBOL_GPL(vsock_remove_pending);
376
377 void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
378 {
379         struct vsock_sock *vlistener;
380         struct vsock_sock *vconnected;
381
382         vlistener = vsock_sk(listener);
383         vconnected = vsock_sk(connected);
384
385         sock_hold(connected);
386         sock_hold(listener);
387         list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
388 }
389 EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
390
391 static bool vsock_use_local_transport(unsigned int remote_cid)
392 {
393         if (!transport_local)
394                 return false;
395
396         if (remote_cid == VMADDR_CID_LOCAL)
397                 return true;
398
399         if (transport_g2h) {
400                 return remote_cid == transport_g2h->get_local_cid();
401         } else {
402                 return remote_cid == VMADDR_CID_HOST;
403         }
404 }
405
406 static void vsock_deassign_transport(struct vsock_sock *vsk)
407 {
408         if (!vsk->transport)
409                 return;
410
411         vsk->transport->destruct(vsk);
412         module_put(vsk->transport->module);
413         vsk->transport = NULL;
414 }
415
416 /* Assign a transport to a socket and call the .init transport callback.
417  *
418  * Note: for stream socket this must be called when vsk->remote_addr is set
419  * (e.g. during the connect() or when a connection request on a listener
420  * socket is received).
421  * The vsk->remote_addr is used to decide which transport to use:
422  *  - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
423  *    g2h is not loaded, will use local transport;
424  *  - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field
425  *    includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport;
426  *  - remote CID > VMADDR_CID_HOST will use host->guest transport;
427  */
428 int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
429 {
430         const struct vsock_transport *new_transport;
431         struct sock *sk = sk_vsock(vsk);
432         unsigned int remote_cid = vsk->remote_addr.svm_cid;
433         __u8 remote_flags;
434         int ret;
435
436         /* If the packet is coming with the source and destination CIDs higher
437          * than VMADDR_CID_HOST, then a vsock channel where all the packets are
438          * forwarded to the host should be established. Then the host will
439          * need to forward the packets to the guest.
440          *
441          * The flag is set on the (listen) receive path (psk is not NULL). On
442          * the connect path the flag can be set by the user space application.
443          */
444         if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST &&
445             vsk->remote_addr.svm_cid > VMADDR_CID_HOST)
446                 vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST;
447
448         remote_flags = vsk->remote_addr.svm_flags;
449
450         switch (sk->sk_type) {
451         case SOCK_DGRAM:
452                 new_transport = transport_dgram;
453                 break;
454         case SOCK_STREAM:
455         case SOCK_SEQPACKET:
456                 if (vsock_use_local_transport(remote_cid))
457                         new_transport = transport_local;
458                 else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g ||
459                          (remote_flags & VMADDR_FLAG_TO_HOST))
460                         new_transport = transport_g2h;
461                 else
462                         new_transport = transport_h2g;
463                 break;
464         default:
465                 return -ESOCKTNOSUPPORT;
466         }
467
468         if (vsk->transport) {
469                 if (vsk->transport == new_transport)
470                         return 0;
471
472                 /* transport->release() must be called with sock lock acquired.
473                  * This path can only be taken during vsock_stream_connect(),
474                  * where we have already held the sock lock.
475                  * In the other cases, this function is called on a new socket
476                  * which is not assigned to any transport.
477                  */
478                 vsk->transport->release(vsk);
479                 vsock_deassign_transport(vsk);
480         }
481
482         /* We increase the module refcnt to prevent the transport unloading
483          * while there are open sockets assigned to it.
484          */
485         if (!new_transport || !try_module_get(new_transport->module))
486                 return -ENODEV;
487
488         if (sk->sk_type == SOCK_SEQPACKET) {
489                 if (!new_transport->seqpacket_allow ||
490                     !new_transport->seqpacket_allow(remote_cid)) {
491                         module_put(new_transport->module);
492                         return -ESOCKTNOSUPPORT;
493                 }
494         }
495
496         ret = new_transport->init(vsk, psk);
497         if (ret) {
498                 module_put(new_transport->module);
499                 return ret;
500         }
501
502         vsk->transport = new_transport;
503
504         return 0;
505 }
506 EXPORT_SYMBOL_GPL(vsock_assign_transport);
507
508 bool vsock_find_cid(unsigned int cid)
509 {
510         if (transport_g2h && cid == transport_g2h->get_local_cid())
511                 return true;
512
513         if (transport_h2g && cid == VMADDR_CID_HOST)
514                 return true;
515
516         if (transport_local && cid == VMADDR_CID_LOCAL)
517                 return true;
518
519         return false;
520 }
521 EXPORT_SYMBOL_GPL(vsock_find_cid);
522
523 static struct sock *vsock_dequeue_accept(struct sock *listener)
524 {
525         struct vsock_sock *vlistener;
526         struct vsock_sock *vconnected;
527
528         vlistener = vsock_sk(listener);
529
530         if (list_empty(&vlistener->accept_queue))
531                 return NULL;
532
533         vconnected = list_entry(vlistener->accept_queue.next,
534                                 struct vsock_sock, accept_queue);
535
536         list_del_init(&vconnected->accept_queue);
537         sock_put(listener);
538         /* The caller will need a reference on the connected socket so we let
539          * it call sock_put().
540          */
541
542         return sk_vsock(vconnected);
543 }
544
545 static bool vsock_is_accept_queue_empty(struct sock *sk)
546 {
547         struct vsock_sock *vsk = vsock_sk(sk);
548         return list_empty(&vsk->accept_queue);
549 }
550
551 static bool vsock_is_pending(struct sock *sk)
552 {
553         struct vsock_sock *vsk = vsock_sk(sk);
554         return !list_empty(&vsk->pending_links);
555 }
556
557 static int vsock_send_shutdown(struct sock *sk, int mode)
558 {
559         struct vsock_sock *vsk = vsock_sk(sk);
560
561         if (!vsk->transport)
562                 return -ENODEV;
563
564         return vsk->transport->shutdown(vsk, mode);
565 }
566
567 static void vsock_pending_work(struct work_struct *work)
568 {
569         struct sock *sk;
570         struct sock *listener;
571         struct vsock_sock *vsk;
572         bool cleanup;
573
574         vsk = container_of(work, struct vsock_sock, pending_work.work);
575         sk = sk_vsock(vsk);
576         listener = vsk->listener;
577         cleanup = true;
578
579         lock_sock(listener);
580         lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
581
582         if (vsock_is_pending(sk)) {
583                 vsock_remove_pending(listener, sk);
584
585                 sk_acceptq_removed(listener);
586         } else if (!vsk->rejected) {
587                 /* We are not on the pending list and accept() did not reject
588                  * us, so we must have been accepted by our user process.  We
589                  * just need to drop our references to the sockets and be on
590                  * our way.
591                  */
592                 cleanup = false;
593                 goto out;
594         }
595
596         /* We need to remove ourself from the global connected sockets list so
597          * incoming packets can't find this socket, and to reduce the reference
598          * count.
599          */
600         vsock_remove_connected(vsk);
601
602         sk->sk_state = TCP_CLOSE;
603
604 out:
605         release_sock(sk);
606         release_sock(listener);
607         if (cleanup)
608                 sock_put(sk);
609
610         sock_put(sk);
611         sock_put(listener);
612 }
613
614 /**** SOCKET OPERATIONS ****/
615
616 static int __vsock_bind_connectible(struct vsock_sock *vsk,
617                                     struct sockaddr_vm *addr)
618 {
619         static u32 port;
620         struct sockaddr_vm new_addr;
621
622         if (!port)
623                 port = LAST_RESERVED_PORT + 1 +
624                         prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
625
626         vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
627
628         if (addr->svm_port == VMADDR_PORT_ANY) {
629                 bool found = false;
630                 unsigned int i;
631
632                 for (i = 0; i < MAX_PORT_RETRIES; i++) {
633                         if (port <= LAST_RESERVED_PORT)
634                                 port = LAST_RESERVED_PORT + 1;
635
636                         new_addr.svm_port = port++;
637
638                         if (!__vsock_find_bound_socket(&new_addr)) {
639                                 found = true;
640                                 break;
641                         }
642                 }
643
644                 if (!found)
645                         return -EADDRNOTAVAIL;
646         } else {
647                 /* If port is in reserved range, ensure caller
648                  * has necessary privileges.
649                  */
650                 if (addr->svm_port <= LAST_RESERVED_PORT &&
651                     !capable(CAP_NET_BIND_SERVICE)) {
652                         return -EACCES;
653                 }
654
655                 if (__vsock_find_bound_socket(&new_addr))
656                         return -EADDRINUSE;
657         }
658
659         vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
660
661         /* Remove stream sockets from the unbound list and add them to the hash
662          * table for easy lookup by its address.  The unbound list is simply an
663          * extra entry at the end of the hash table, a trick used by AF_UNIX.
664          */
665         __vsock_remove_bound(vsk);
666         __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
667
668         return 0;
669 }
670
671 static int __vsock_bind_dgram(struct vsock_sock *vsk,
672                               struct sockaddr_vm *addr)
673 {
674         return vsk->transport->dgram_bind(vsk, addr);
675 }
676
677 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
678 {
679         struct vsock_sock *vsk = vsock_sk(sk);
680         int retval;
681
682         /* First ensure this socket isn't already bound. */
683         if (vsock_addr_bound(&vsk->local_addr))
684                 return -EINVAL;
685
686         /* Now bind to the provided address or select appropriate values if
687          * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY).  Note that
688          * like AF_INET prevents binding to a non-local IP address (in most
689          * cases), we only allow binding to a local CID.
690          */
691         if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
692                 return -EADDRNOTAVAIL;
693
694         switch (sk->sk_socket->type) {
695         case SOCK_STREAM:
696         case SOCK_SEQPACKET:
697                 spin_lock_bh(&vsock_table_lock);
698                 retval = __vsock_bind_connectible(vsk, addr);
699                 spin_unlock_bh(&vsock_table_lock);
700                 break;
701
702         case SOCK_DGRAM:
703                 retval = __vsock_bind_dgram(vsk, addr);
704                 break;
705
706         default:
707                 retval = -EINVAL;
708                 break;
709         }
710
711         return retval;
712 }
713
714 static void vsock_connect_timeout(struct work_struct *work);
715
716 static struct sock *__vsock_create(struct net *net,
717                                    struct socket *sock,
718                                    struct sock *parent,
719                                    gfp_t priority,
720                                    unsigned short type,
721                                    int kern)
722 {
723         struct sock *sk;
724         struct vsock_sock *psk;
725         struct vsock_sock *vsk;
726
727         sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
728         if (!sk)
729                 return NULL;
730
731         sock_init_data(sock, sk);
732
733         /* sk->sk_type is normally set in sock_init_data, but only if sock is
734          * non-NULL. We make sure that our sockets always have a type by
735          * setting it here if needed.
736          */
737         if (!sock)
738                 sk->sk_type = type;
739
740         vsk = vsock_sk(sk);
741         vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
742         vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
743
744         sk->sk_destruct = vsock_sk_destruct;
745         sk->sk_backlog_rcv = vsock_queue_rcv_skb;
746         sock_reset_flag(sk, SOCK_DONE);
747
748         INIT_LIST_HEAD(&vsk->bound_table);
749         INIT_LIST_HEAD(&vsk->connected_table);
750         vsk->listener = NULL;
751         INIT_LIST_HEAD(&vsk->pending_links);
752         INIT_LIST_HEAD(&vsk->accept_queue);
753         vsk->rejected = false;
754         vsk->sent_request = false;
755         vsk->ignore_connecting_rst = false;
756         vsk->peer_shutdown = 0;
757         INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
758         INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
759
760         psk = parent ? vsock_sk(parent) : NULL;
761         if (parent) {
762                 vsk->trusted = psk->trusted;
763                 vsk->owner = get_cred(psk->owner);
764                 vsk->connect_timeout = psk->connect_timeout;
765                 vsk->buffer_size = psk->buffer_size;
766                 vsk->buffer_min_size = psk->buffer_min_size;
767                 vsk->buffer_max_size = psk->buffer_max_size;
768                 security_sk_clone(parent, sk);
769         } else {
770                 vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
771                 vsk->owner = get_current_cred();
772                 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
773                 vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
774                 vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE;
775                 vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE;
776         }
777
778         return sk;
779 }
780
781 static bool sock_type_connectible(u16 type)
782 {
783         return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET);
784 }
785
786 static void __vsock_release(struct sock *sk, int level)
787 {
788         if (sk) {
789                 struct sock *pending;
790                 struct vsock_sock *vsk;
791
792                 vsk = vsock_sk(sk);
793                 pending = NULL; /* Compiler warning. */
794
795                 /* When "level" is SINGLE_DEPTH_NESTING, use the nested
796                  * version to avoid the warning "possible recursive locking
797                  * detected". When "level" is 0, lock_sock_nested(sk, level)
798                  * is the same as lock_sock(sk).
799                  */
800                 lock_sock_nested(sk, level);
801
802                 if (vsk->transport)
803                         vsk->transport->release(vsk);
804                 else if (sock_type_connectible(sk->sk_type))
805                         vsock_remove_sock(vsk);
806
807                 sock_orphan(sk);
808                 sk->sk_shutdown = SHUTDOWN_MASK;
809
810                 skb_queue_purge(&sk->sk_receive_queue);
811
812                 /* Clean up any sockets that never were accepted. */
813                 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
814                         __vsock_release(pending, SINGLE_DEPTH_NESTING);
815                         sock_put(pending);
816                 }
817
818                 release_sock(sk);
819                 sock_put(sk);
820         }
821 }
822
823 static void vsock_sk_destruct(struct sock *sk)
824 {
825         struct vsock_sock *vsk = vsock_sk(sk);
826
827         vsock_deassign_transport(vsk);
828
829         /* When clearing these addresses, there's no need to set the family and
830          * possibly register the address family with the kernel.
831          */
832         vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
833         vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
834
835         put_cred(vsk->owner);
836 }
837
838 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
839 {
840         int err;
841
842         err = sock_queue_rcv_skb(sk, skb);
843         if (err)
844                 kfree_skb(skb);
845
846         return err;
847 }
848
849 struct sock *vsock_create_connected(struct sock *parent)
850 {
851         return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL,
852                               parent->sk_type, 0);
853 }
854 EXPORT_SYMBOL_GPL(vsock_create_connected);
855
856 s64 vsock_stream_has_data(struct vsock_sock *vsk)
857 {
858         return vsk->transport->stream_has_data(vsk);
859 }
860 EXPORT_SYMBOL_GPL(vsock_stream_has_data);
861
862 static s64 vsock_has_data(struct vsock_sock *vsk)
863 {
864         struct sock *sk = sk_vsock(vsk);
865
866         if (sk->sk_type == SOCK_SEQPACKET)
867                 return vsk->transport->seqpacket_has_data(vsk);
868         else
869                 return vsock_stream_has_data(vsk);
870 }
871
872 s64 vsock_stream_has_space(struct vsock_sock *vsk)
873 {
874         return vsk->transport->stream_has_space(vsk);
875 }
876 EXPORT_SYMBOL_GPL(vsock_stream_has_space);
877
878 static int vsock_release(struct socket *sock)
879 {
880         __vsock_release(sock->sk, 0);
881         sock->sk = NULL;
882         sock->state = SS_FREE;
883
884         return 0;
885 }
886
887 static int
888 vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
889 {
890         int err;
891         struct sock *sk;
892         struct sockaddr_vm *vm_addr;
893
894         sk = sock->sk;
895
896         if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
897                 return -EINVAL;
898
899         lock_sock(sk);
900         err = __vsock_bind(sk, vm_addr);
901         release_sock(sk);
902
903         return err;
904 }
905
906 static int vsock_getname(struct socket *sock,
907                          struct sockaddr *addr, int peer)
908 {
909         int err;
910         struct sock *sk;
911         struct vsock_sock *vsk;
912         struct sockaddr_vm *vm_addr;
913
914         sk = sock->sk;
915         vsk = vsock_sk(sk);
916         err = 0;
917
918         lock_sock(sk);
919
920         if (peer) {
921                 if (sock->state != SS_CONNECTED) {
922                         err = -ENOTCONN;
923                         goto out;
924                 }
925                 vm_addr = &vsk->remote_addr;
926         } else {
927                 vm_addr = &vsk->local_addr;
928         }
929
930         if (!vm_addr) {
931                 err = -EINVAL;
932                 goto out;
933         }
934
935         /* sys_getsockname() and sys_getpeername() pass us a
936          * MAX_SOCK_ADDR-sized buffer and don't set addr_len.  Unfortunately
937          * that macro is defined in socket.c instead of .h, so we hardcode its
938          * value here.
939          */
940         BUILD_BUG_ON(sizeof(*vm_addr) > 128);
941         memcpy(addr, vm_addr, sizeof(*vm_addr));
942         err = sizeof(*vm_addr);
943
944 out:
945         release_sock(sk);
946         return err;
947 }
948
949 static int vsock_shutdown(struct socket *sock, int mode)
950 {
951         int err;
952         struct sock *sk;
953
954         /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
955          * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
956          * here like the other address families do.  Note also that the
957          * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
958          * which is what we want.
959          */
960         mode++;
961
962         if ((mode & ~SHUTDOWN_MASK) || !mode)
963                 return -EINVAL;
964
965         /* If this is a STREAM socket and it is not connected then bail out
966          * immediately.  If it is a DGRAM socket then we must first kick the
967          * socket so that it wakes up from any sleeping calls, for example
968          * recv(), and then afterwards return the error.
969          */
970
971         sk = sock->sk;
972
973         lock_sock(sk);
974         if (sock->state == SS_UNCONNECTED) {
975                 err = -ENOTCONN;
976                 if (sock_type_connectible(sk->sk_type))
977                         goto out;
978         } else {
979                 sock->state = SS_DISCONNECTING;
980                 err = 0;
981         }
982
983         /* Receive and send shutdowns are treated alike. */
984         mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
985         if (mode) {
986                 sk->sk_shutdown |= mode;
987                 sk->sk_state_change(sk);
988
989                 if (sock_type_connectible(sk->sk_type)) {
990                         sock_reset_flag(sk, SOCK_DONE);
991                         vsock_send_shutdown(sk, mode);
992                 }
993         }
994
995 out:
996         release_sock(sk);
997         return err;
998 }
999
1000 static __poll_t vsock_poll(struct file *file, struct socket *sock,
1001                                poll_table *wait)
1002 {
1003         struct sock *sk;
1004         __poll_t mask;
1005         struct vsock_sock *vsk;
1006
1007         sk = sock->sk;
1008         vsk = vsock_sk(sk);
1009
1010         poll_wait(file, sk_sleep(sk), wait);
1011         mask = 0;
1012
1013         if (sk->sk_err)
1014                 /* Signify that there has been an error on this socket. */
1015                 mask |= EPOLLERR;
1016
1017         /* INET sockets treat local write shutdown and peer write shutdown as a
1018          * case of EPOLLHUP set.
1019          */
1020         if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1021             ((sk->sk_shutdown & SEND_SHUTDOWN) &&
1022              (vsk->peer_shutdown & SEND_SHUTDOWN))) {
1023                 mask |= EPOLLHUP;
1024         }
1025
1026         if (sk->sk_shutdown & RCV_SHUTDOWN ||
1027             vsk->peer_shutdown & SEND_SHUTDOWN) {
1028                 mask |= EPOLLRDHUP;
1029         }
1030
1031         if (sock->type == SOCK_DGRAM) {
1032                 /* For datagram sockets we can read if there is something in
1033                  * the queue and write as long as the socket isn't shutdown for
1034                  * sending.
1035                  */
1036                 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
1037                     (sk->sk_shutdown & RCV_SHUTDOWN)) {
1038                         mask |= EPOLLIN | EPOLLRDNORM;
1039                 }
1040
1041                 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1042                         mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1043
1044         } else if (sock_type_connectible(sk->sk_type)) {
1045                 const struct vsock_transport *transport;
1046
1047                 lock_sock(sk);
1048
1049                 transport = vsk->transport;
1050
1051                 /* Listening sockets that have connections in their accept
1052                  * queue can be read.
1053                  */
1054                 if (sk->sk_state == TCP_LISTEN
1055                     && !vsock_is_accept_queue_empty(sk))
1056                         mask |= EPOLLIN | EPOLLRDNORM;
1057
1058                 /* If there is something in the queue then we can read. */
1059                 if (transport && transport->stream_is_active(vsk) &&
1060                     !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1061                         bool data_ready_now = false;
1062                         int ret = transport->notify_poll_in(
1063                                         vsk, 1, &data_ready_now);
1064                         if (ret < 0) {
1065                                 mask |= EPOLLERR;
1066                         } else {
1067                                 if (data_ready_now)
1068                                         mask |= EPOLLIN | EPOLLRDNORM;
1069
1070                         }
1071                 }
1072
1073                 /* Sockets whose connections have been closed, reset, or
1074                  * terminated should also be considered read, and we check the
1075                  * shutdown flag for that.
1076                  */
1077                 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1078                     vsk->peer_shutdown & SEND_SHUTDOWN) {
1079                         mask |= EPOLLIN | EPOLLRDNORM;
1080                 }
1081
1082                 /* Connected sockets that can produce data can be written. */
1083                 if (transport && sk->sk_state == TCP_ESTABLISHED) {
1084                         if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1085                                 bool space_avail_now = false;
1086                                 int ret = transport->notify_poll_out(
1087                                                 vsk, 1, &space_avail_now);
1088                                 if (ret < 0) {
1089                                         mask |= EPOLLERR;
1090                                 } else {
1091                                         if (space_avail_now)
1092                                                 /* Remove EPOLLWRBAND since INET
1093                                                  * sockets are not setting it.
1094                                                  */
1095                                                 mask |= EPOLLOUT | EPOLLWRNORM;
1096
1097                                 }
1098                         }
1099                 }
1100
1101                 /* Simulate INET socket poll behaviors, which sets
1102                  * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
1103                  * but local send is not shutdown.
1104                  */
1105                 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
1106                         if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1107                                 mask |= EPOLLOUT | EPOLLWRNORM;
1108
1109                 }
1110
1111                 release_sock(sk);
1112         }
1113
1114         return mask;
1115 }
1116
1117 static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1118                                size_t len)
1119 {
1120         int err;
1121         struct sock *sk;
1122         struct vsock_sock *vsk;
1123         struct sockaddr_vm *remote_addr;
1124         const struct vsock_transport *transport;
1125
1126         if (msg->msg_flags & MSG_OOB)
1127                 return -EOPNOTSUPP;
1128
1129         /* For now, MSG_DONTWAIT is always assumed... */
1130         err = 0;
1131         sk = sock->sk;
1132         vsk = vsock_sk(sk);
1133
1134         lock_sock(sk);
1135
1136         transport = vsk->transport;
1137
1138         err = vsock_auto_bind(vsk);
1139         if (err)
1140                 goto out;
1141
1142
1143         /* If the provided message contains an address, use that.  Otherwise
1144          * fall back on the socket's remote handle (if it has been connected).
1145          */
1146         if (msg->msg_name &&
1147             vsock_addr_cast(msg->msg_name, msg->msg_namelen,
1148                             &remote_addr) == 0) {
1149                 /* Ensure this address is of the right type and is a valid
1150                  * destination.
1151                  */
1152
1153                 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1154                         remote_addr->svm_cid = transport->get_local_cid();
1155
1156                 if (!vsock_addr_bound(remote_addr)) {
1157                         err = -EINVAL;
1158                         goto out;
1159                 }
1160         } else if (sock->state == SS_CONNECTED) {
1161                 remote_addr = &vsk->remote_addr;
1162
1163                 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1164                         remote_addr->svm_cid = transport->get_local_cid();
1165
1166                 /* XXX Should connect() or this function ensure remote_addr is
1167                  * bound?
1168                  */
1169                 if (!vsock_addr_bound(&vsk->remote_addr)) {
1170                         err = -EINVAL;
1171                         goto out;
1172                 }
1173         } else {
1174                 err = -EINVAL;
1175                 goto out;
1176         }
1177
1178         if (!transport->dgram_allow(remote_addr->svm_cid,
1179                                     remote_addr->svm_port)) {
1180                 err = -EINVAL;
1181                 goto out;
1182         }
1183
1184         err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
1185
1186 out:
1187         release_sock(sk);
1188         return err;
1189 }
1190
1191 static int vsock_dgram_connect(struct socket *sock,
1192                                struct sockaddr *addr, int addr_len, int flags)
1193 {
1194         int err;
1195         struct sock *sk;
1196         struct vsock_sock *vsk;
1197         struct sockaddr_vm *remote_addr;
1198
1199         sk = sock->sk;
1200         vsk = vsock_sk(sk);
1201
1202         err = vsock_addr_cast(addr, addr_len, &remote_addr);
1203         if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1204                 lock_sock(sk);
1205                 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1206                                 VMADDR_PORT_ANY);
1207                 sock->state = SS_UNCONNECTED;
1208                 release_sock(sk);
1209                 return 0;
1210         } else if (err != 0)
1211                 return -EINVAL;
1212
1213         lock_sock(sk);
1214
1215         err = vsock_auto_bind(vsk);
1216         if (err)
1217                 goto out;
1218
1219         if (!vsk->transport->dgram_allow(remote_addr->svm_cid,
1220                                          remote_addr->svm_port)) {
1221                 err = -EINVAL;
1222                 goto out;
1223         }
1224
1225         memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1226         sock->state = SS_CONNECTED;
1227
1228 out:
1229         release_sock(sk);
1230         return err;
1231 }
1232
1233 static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1234                                size_t len, int flags)
1235 {
1236         struct vsock_sock *vsk = vsock_sk(sock->sk);
1237
1238         return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
1239 }
1240
1241 static const struct proto_ops vsock_dgram_ops = {
1242         .family = PF_VSOCK,
1243         .owner = THIS_MODULE,
1244         .release = vsock_release,
1245         .bind = vsock_bind,
1246         .connect = vsock_dgram_connect,
1247         .socketpair = sock_no_socketpair,
1248         .accept = sock_no_accept,
1249         .getname = vsock_getname,
1250         .poll = vsock_poll,
1251         .ioctl = sock_no_ioctl,
1252         .listen = sock_no_listen,
1253         .shutdown = vsock_shutdown,
1254         .sendmsg = vsock_dgram_sendmsg,
1255         .recvmsg = vsock_dgram_recvmsg,
1256         .mmap = sock_no_mmap,
1257         .sendpage = sock_no_sendpage,
1258 };
1259
1260 static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1261 {
1262         const struct vsock_transport *transport = vsk->transport;
1263
1264         if (!transport || !transport->cancel_pkt)
1265                 return -EOPNOTSUPP;
1266
1267         return transport->cancel_pkt(vsk);
1268 }
1269
1270 static void vsock_connect_timeout(struct work_struct *work)
1271 {
1272         struct sock *sk;
1273         struct vsock_sock *vsk;
1274
1275         vsk = container_of(work, struct vsock_sock, connect_work.work);
1276         sk = sk_vsock(vsk);
1277
1278         lock_sock(sk);
1279         if (sk->sk_state == TCP_SYN_SENT &&
1280             (sk->sk_shutdown != SHUTDOWN_MASK)) {
1281                 sk->sk_state = TCP_CLOSE;
1282                 sk->sk_err = ETIMEDOUT;
1283                 sk->sk_error_report(sk);
1284                 vsock_transport_cancel_pkt(vsk);
1285         }
1286         release_sock(sk);
1287
1288         sock_put(sk);
1289 }
1290
1291 static int vsock_connect(struct socket *sock, struct sockaddr *addr,
1292                          int addr_len, int flags)
1293 {
1294         int err;
1295         struct sock *sk;
1296         struct vsock_sock *vsk;
1297         const struct vsock_transport *transport;
1298         struct sockaddr_vm *remote_addr;
1299         long timeout;
1300         DEFINE_WAIT(wait);
1301
1302         err = 0;
1303         sk = sock->sk;
1304         vsk = vsock_sk(sk);
1305
1306         lock_sock(sk);
1307
1308         /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1309         switch (sock->state) {
1310         case SS_CONNECTED:
1311                 err = -EISCONN;
1312                 goto out;
1313         case SS_DISCONNECTING:
1314                 err = -EINVAL;
1315                 goto out;
1316         case SS_CONNECTING:
1317                 /* This continues on so we can move sock into the SS_CONNECTED
1318                  * state once the connection has completed (at which point err
1319                  * will be set to zero also).  Otherwise, we will either wait
1320                  * for the connection or return -EALREADY should this be a
1321                  * non-blocking call.
1322                  */
1323                 err = -EALREADY;
1324                 break;
1325         default:
1326                 if ((sk->sk_state == TCP_LISTEN) ||
1327                     vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1328                         err = -EINVAL;
1329                         goto out;
1330                 }
1331
1332                 /* Set the remote address that we are connecting to. */
1333                 memcpy(&vsk->remote_addr, remote_addr,
1334                        sizeof(vsk->remote_addr));
1335
1336                 err = vsock_assign_transport(vsk, NULL);
1337                 if (err)
1338                         goto out;
1339
1340                 transport = vsk->transport;
1341
1342                 /* The hypervisor and well-known contexts do not have socket
1343                  * endpoints.
1344                  */
1345                 if (!transport ||
1346                     !transport->stream_allow(remote_addr->svm_cid,
1347                                              remote_addr->svm_port)) {
1348                         err = -ENETUNREACH;
1349                         goto out;
1350                 }
1351
1352                 err = vsock_auto_bind(vsk);
1353                 if (err)
1354                         goto out;
1355
1356                 sk->sk_state = TCP_SYN_SENT;
1357
1358                 err = transport->connect(vsk);
1359                 if (err < 0)
1360                         goto out;
1361
1362                 /* Mark sock as connecting and set the error code to in
1363                  * progress in case this is a non-blocking connect.
1364                  */
1365                 sock->state = SS_CONNECTING;
1366                 err = -EINPROGRESS;
1367         }
1368
1369         /* The receive path will handle all communication until we are able to
1370          * enter the connected state.  Here we wait for the connection to be
1371          * completed or a notification of an error.
1372          */
1373         timeout = vsk->connect_timeout;
1374         prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1375
1376         while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
1377                 if (flags & O_NONBLOCK) {
1378                         /* If we're not going to block, we schedule a timeout
1379                          * function to generate a timeout on the connection
1380                          * attempt, in case the peer doesn't respond in a
1381                          * timely manner. We hold on to the socket until the
1382                          * timeout fires.
1383                          */
1384                         sock_hold(sk);
1385                         schedule_delayed_work(&vsk->connect_work, timeout);
1386
1387                         /* Skip ahead to preserve error code set above. */
1388                         goto out_wait;
1389                 }
1390
1391                 release_sock(sk);
1392                 timeout = schedule_timeout(timeout);
1393                 lock_sock(sk);
1394
1395                 if (signal_pending(current)) {
1396                         err = sock_intr_errno(timeout);
1397                         sk->sk_state = TCP_CLOSE;
1398                         sock->state = SS_UNCONNECTED;
1399                         vsock_transport_cancel_pkt(vsk);
1400                         goto out_wait;
1401                 } else if (timeout == 0) {
1402                         err = -ETIMEDOUT;
1403                         sk->sk_state = TCP_CLOSE;
1404                         sock->state = SS_UNCONNECTED;
1405                         vsock_transport_cancel_pkt(vsk);
1406                         goto out_wait;
1407                 }
1408
1409                 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1410         }
1411
1412         if (sk->sk_err) {
1413                 err = -sk->sk_err;
1414                 sk->sk_state = TCP_CLOSE;
1415                 sock->state = SS_UNCONNECTED;
1416         } else {
1417                 err = 0;
1418         }
1419
1420 out_wait:
1421         finish_wait(sk_sleep(sk), &wait);
1422 out:
1423         release_sock(sk);
1424         return err;
1425 }
1426
1427 static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
1428                         bool kern)
1429 {
1430         struct sock *listener;
1431         int err;
1432         struct sock *connected;
1433         struct vsock_sock *vconnected;
1434         long timeout;
1435         DEFINE_WAIT(wait);
1436
1437         err = 0;
1438         listener = sock->sk;
1439
1440         lock_sock(listener);
1441
1442         if (!sock_type_connectible(sock->type)) {
1443                 err = -EOPNOTSUPP;
1444                 goto out;
1445         }
1446
1447         if (listener->sk_state != TCP_LISTEN) {
1448                 err = -EINVAL;
1449                 goto out;
1450         }
1451
1452         /* Wait for children sockets to appear; these are the new sockets
1453          * created upon connection establishment.
1454          */
1455         timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
1456         prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1457
1458         while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1459                listener->sk_err == 0) {
1460                 release_sock(listener);
1461                 timeout = schedule_timeout(timeout);
1462                 finish_wait(sk_sleep(listener), &wait);
1463                 lock_sock(listener);
1464
1465                 if (signal_pending(current)) {
1466                         err = sock_intr_errno(timeout);
1467                         goto out;
1468                 } else if (timeout == 0) {
1469                         err = -EAGAIN;
1470                         goto out;
1471                 }
1472
1473                 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1474         }
1475         finish_wait(sk_sleep(listener), &wait);
1476
1477         if (listener->sk_err)
1478                 err = -listener->sk_err;
1479
1480         if (connected) {
1481                 sk_acceptq_removed(listener);
1482
1483                 lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
1484                 vconnected = vsock_sk(connected);
1485
1486                 /* If the listener socket has received an error, then we should
1487                  * reject this socket and return.  Note that we simply mark the
1488                  * socket rejected, drop our reference, and let the cleanup
1489                  * function handle the cleanup; the fact that we found it in
1490                  * the listener's accept queue guarantees that the cleanup
1491                  * function hasn't run yet.
1492                  */
1493                 if (err) {
1494                         vconnected->rejected = true;
1495                 } else {
1496                         newsock->state = SS_CONNECTED;
1497                         sock_graft(connected, newsock);
1498                 }
1499
1500                 release_sock(connected);
1501                 sock_put(connected);
1502         }
1503
1504 out:
1505         release_sock(listener);
1506         return err;
1507 }
1508
1509 static int vsock_listen(struct socket *sock, int backlog)
1510 {
1511         int err;
1512         struct sock *sk;
1513         struct vsock_sock *vsk;
1514
1515         sk = sock->sk;
1516
1517         lock_sock(sk);
1518
1519         if (!sock_type_connectible(sk->sk_type)) {
1520                 err = -EOPNOTSUPP;
1521                 goto out;
1522         }
1523
1524         if (sock->state != SS_UNCONNECTED) {
1525                 err = -EINVAL;
1526                 goto out;
1527         }
1528
1529         vsk = vsock_sk(sk);
1530
1531         if (!vsock_addr_bound(&vsk->local_addr)) {
1532                 err = -EINVAL;
1533                 goto out;
1534         }
1535
1536         sk->sk_max_ack_backlog = backlog;
1537         sk->sk_state = TCP_LISTEN;
1538
1539         err = 0;
1540
1541 out:
1542         release_sock(sk);
1543         return err;
1544 }
1545
1546 static void vsock_update_buffer_size(struct vsock_sock *vsk,
1547                                      const struct vsock_transport *transport,
1548                                      u64 val)
1549 {
1550         if (val > vsk->buffer_max_size)
1551                 val = vsk->buffer_max_size;
1552
1553         if (val < vsk->buffer_min_size)
1554                 val = vsk->buffer_min_size;
1555
1556         if (val != vsk->buffer_size &&
1557             transport && transport->notify_buffer_size)
1558                 transport->notify_buffer_size(vsk, &val);
1559
1560         vsk->buffer_size = val;
1561 }
1562
1563 static int vsock_connectible_setsockopt(struct socket *sock,
1564                                         int level,
1565                                         int optname,
1566                                         sockptr_t optval,
1567                                         unsigned int optlen)
1568 {
1569         int err;
1570         struct sock *sk;
1571         struct vsock_sock *vsk;
1572         const struct vsock_transport *transport;
1573         u64 val;
1574
1575         if (level != AF_VSOCK)
1576                 return -ENOPROTOOPT;
1577
1578 #define COPY_IN(_v)                                       \
1579         do {                                              \
1580                 if (optlen < sizeof(_v)) {                \
1581                         err = -EINVAL;                    \
1582                         goto exit;                        \
1583                 }                                         \
1584                 if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) {  \
1585                         err = -EFAULT;                                  \
1586                         goto exit;                                      \
1587                 }                                                       \
1588         } while (0)
1589
1590         err = 0;
1591         sk = sock->sk;
1592         vsk = vsock_sk(sk);
1593
1594         lock_sock(sk);
1595
1596         transport = vsk->transport;
1597
1598         switch (optname) {
1599         case SO_VM_SOCKETS_BUFFER_SIZE:
1600                 COPY_IN(val);
1601                 vsock_update_buffer_size(vsk, transport, val);
1602                 break;
1603
1604         case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1605                 COPY_IN(val);
1606                 vsk->buffer_max_size = val;
1607                 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
1608                 break;
1609
1610         case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1611                 COPY_IN(val);
1612                 vsk->buffer_min_size = val;
1613                 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
1614                 break;
1615
1616         case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
1617                 struct __kernel_old_timeval tv;
1618                 COPY_IN(tv);
1619                 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1620                     tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1621                         vsk->connect_timeout = tv.tv_sec * HZ +
1622                             DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
1623                         if (vsk->connect_timeout == 0)
1624                                 vsk->connect_timeout =
1625                                     VSOCK_DEFAULT_CONNECT_TIMEOUT;
1626
1627                 } else {
1628                         err = -ERANGE;
1629                 }
1630                 break;
1631         }
1632
1633         default:
1634                 err = -ENOPROTOOPT;
1635                 break;
1636         }
1637
1638 #undef COPY_IN
1639
1640 exit:
1641         release_sock(sk);
1642         return err;
1643 }
1644
1645 static int vsock_connectible_getsockopt(struct socket *sock,
1646                                         int level, int optname,
1647                                         char __user *optval,
1648                                         int __user *optlen)
1649 {
1650         int err;
1651         int len;
1652         struct sock *sk;
1653         struct vsock_sock *vsk;
1654         u64 val;
1655
1656         if (level != AF_VSOCK)
1657                 return -ENOPROTOOPT;
1658
1659         err = get_user(len, optlen);
1660         if (err != 0)
1661                 return err;
1662
1663 #define COPY_OUT(_v)                            \
1664         do {                                    \
1665                 if (len < sizeof(_v))           \
1666                         return -EINVAL;         \
1667                                                 \
1668                 len = sizeof(_v);               \
1669                 if (copy_to_user(optval, &_v, len) != 0)        \
1670                         return -EFAULT;                         \
1671                                                                 \
1672         } while (0)
1673
1674         err = 0;
1675         sk = sock->sk;
1676         vsk = vsock_sk(sk);
1677
1678         switch (optname) {
1679         case SO_VM_SOCKETS_BUFFER_SIZE:
1680                 val = vsk->buffer_size;
1681                 COPY_OUT(val);
1682                 break;
1683
1684         case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1685                 val = vsk->buffer_max_size;
1686                 COPY_OUT(val);
1687                 break;
1688
1689         case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1690                 val = vsk->buffer_min_size;
1691                 COPY_OUT(val);
1692                 break;
1693
1694         case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
1695                 struct __kernel_old_timeval tv;
1696                 tv.tv_sec = vsk->connect_timeout / HZ;
1697                 tv.tv_usec =
1698                     (vsk->connect_timeout -
1699                      tv.tv_sec * HZ) * (1000000 / HZ);
1700                 COPY_OUT(tv);
1701                 break;
1702         }
1703         default:
1704                 return -ENOPROTOOPT;
1705         }
1706
1707         err = put_user(len, optlen);
1708         if (err != 0)
1709                 return -EFAULT;
1710
1711 #undef COPY_OUT
1712
1713         return 0;
1714 }
1715
1716 static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg,
1717                                      size_t len)
1718 {
1719         struct sock *sk;
1720         struct vsock_sock *vsk;
1721         const struct vsock_transport *transport;
1722         ssize_t total_written;
1723         long timeout;
1724         int err;
1725         struct vsock_transport_send_notify_data send_data;
1726         DEFINE_WAIT_FUNC(wait, woken_wake_function);
1727
1728         sk = sock->sk;
1729         vsk = vsock_sk(sk);
1730         total_written = 0;
1731         err = 0;
1732
1733         if (msg->msg_flags & MSG_OOB)
1734                 return -EOPNOTSUPP;
1735
1736         lock_sock(sk);
1737
1738         transport = vsk->transport;
1739
1740         /* Callers should not provide a destination with stream sockets. */
1741         if (msg->msg_namelen) {
1742                 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1743                 goto out;
1744         }
1745
1746         /* Send data only if both sides are not shutdown in the direction. */
1747         if (sk->sk_shutdown & SEND_SHUTDOWN ||
1748             vsk->peer_shutdown & RCV_SHUTDOWN) {
1749                 err = -EPIPE;
1750                 goto out;
1751         }
1752
1753         if (!transport || sk->sk_state != TCP_ESTABLISHED ||
1754             !vsock_addr_bound(&vsk->local_addr)) {
1755                 err = -ENOTCONN;
1756                 goto out;
1757         }
1758
1759         if (!vsock_addr_bound(&vsk->remote_addr)) {
1760                 err = -EDESTADDRREQ;
1761                 goto out;
1762         }
1763
1764         /* Wait for room in the produce queue to enqueue our user's data. */
1765         timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1766
1767         err = transport->notify_send_init(vsk, &send_data);
1768         if (err < 0)
1769                 goto out;
1770
1771         while (total_written < len) {
1772                 ssize_t written;
1773
1774                 add_wait_queue(sk_sleep(sk), &wait);
1775                 while (vsock_stream_has_space(vsk) == 0 &&
1776                        sk->sk_err == 0 &&
1777                        !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1778                        !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
1779
1780                         /* Don't wait for non-blocking sockets. */
1781                         if (timeout == 0) {
1782                                 err = -EAGAIN;
1783                                 remove_wait_queue(sk_sleep(sk), &wait);
1784                                 goto out_err;
1785                         }
1786
1787                         err = transport->notify_send_pre_block(vsk, &send_data);
1788                         if (err < 0) {
1789                                 remove_wait_queue(sk_sleep(sk), &wait);
1790                                 goto out_err;
1791                         }
1792
1793                         release_sock(sk);
1794                         timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
1795                         lock_sock(sk);
1796                         if (signal_pending(current)) {
1797                                 err = sock_intr_errno(timeout);
1798                                 remove_wait_queue(sk_sleep(sk), &wait);
1799                                 goto out_err;
1800                         } else if (timeout == 0) {
1801                                 err = -EAGAIN;
1802                                 remove_wait_queue(sk_sleep(sk), &wait);
1803                                 goto out_err;
1804                         }
1805                 }
1806                 remove_wait_queue(sk_sleep(sk), &wait);
1807
1808                 /* These checks occur both as part of and after the loop
1809                  * conditional since we need to check before and after
1810                  * sleeping.
1811                  */
1812                 if (sk->sk_err) {
1813                         err = -sk->sk_err;
1814                         goto out_err;
1815                 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1816                            (vsk->peer_shutdown & RCV_SHUTDOWN)) {
1817                         err = -EPIPE;
1818                         goto out_err;
1819                 }
1820
1821                 err = transport->notify_send_pre_enqueue(vsk, &send_data);
1822                 if (err < 0)
1823                         goto out_err;
1824
1825                 /* Note that enqueue will only write as many bytes as are free
1826                  * in the produce queue, so we don't need to ensure len is
1827                  * smaller than the queue size.  It is the caller's
1828                  * responsibility to check how many bytes we were able to send.
1829                  */
1830
1831                 if (sk->sk_type == SOCK_SEQPACKET) {
1832                         written = transport->seqpacket_enqueue(vsk,
1833                                                 msg, len - total_written);
1834                 } else {
1835                         written = transport->stream_enqueue(vsk,
1836                                         msg, len - total_written);
1837                 }
1838                 if (written < 0) {
1839                         err = -ENOMEM;
1840                         goto out_err;
1841                 }
1842
1843                 total_written += written;
1844
1845                 err = transport->notify_send_post_enqueue(
1846                                 vsk, written, &send_data);
1847                 if (err < 0)
1848                         goto out_err;
1849
1850         }
1851
1852 out_err:
1853         if (total_written > 0) {
1854                 /* Return number of written bytes only if:
1855                  * 1) SOCK_STREAM socket.
1856                  * 2) SOCK_SEQPACKET socket when whole buffer is sent.
1857                  */
1858                 if (sk->sk_type == SOCK_STREAM || total_written == len)
1859                         err = total_written;
1860         }
1861 out:
1862         release_sock(sk);
1863         return err;
1864 }
1865
1866 static int vsock_wait_data(struct sock *sk, struct wait_queue_entry *wait,
1867                            long timeout,
1868                            struct vsock_transport_recv_notify_data *recv_data,
1869                            size_t target)
1870 {
1871         const struct vsock_transport *transport;
1872         struct vsock_sock *vsk;
1873         s64 data;
1874         int err;
1875
1876         vsk = vsock_sk(sk);
1877         err = 0;
1878         transport = vsk->transport;
1879
1880         while ((data = vsock_has_data(vsk)) == 0) {
1881                 prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE);
1882
1883                 if (sk->sk_err != 0 ||
1884                     (sk->sk_shutdown & RCV_SHUTDOWN) ||
1885                     (vsk->peer_shutdown & SEND_SHUTDOWN)) {
1886                         break;
1887                 }
1888
1889                 /* Don't wait for non-blocking sockets. */
1890                 if (timeout == 0) {
1891                         err = -EAGAIN;
1892                         break;
1893                 }
1894
1895                 if (recv_data) {
1896                         err = transport->notify_recv_pre_block(vsk, target, recv_data);
1897                         if (err < 0)
1898                                 break;
1899                 }
1900
1901                 release_sock(sk);
1902                 timeout = schedule_timeout(timeout);
1903                 lock_sock(sk);
1904
1905                 if (signal_pending(current)) {
1906                         err = sock_intr_errno(timeout);
1907                         break;
1908                 } else if (timeout == 0) {
1909                         err = -EAGAIN;
1910                         break;
1911                 }
1912         }
1913
1914         finish_wait(sk_sleep(sk), wait);
1915
1916         if (err)
1917                 return err;
1918
1919         /* Internal transport error when checking for available
1920          * data. XXX This should be changed to a connection
1921          * reset in a later change.
1922          */
1923         if (data < 0)
1924                 return -ENOMEM;
1925
1926         return data;
1927 }
1928
1929 static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg,
1930                                   size_t len, int flags)
1931 {
1932         struct vsock_transport_recv_notify_data recv_data;
1933         const struct vsock_transport *transport;
1934         struct vsock_sock *vsk;
1935         ssize_t copied;
1936         size_t target;
1937         long timeout;
1938         int err;
1939
1940         DEFINE_WAIT(wait);
1941
1942         vsk = vsock_sk(sk);
1943         transport = vsk->transport;
1944
1945         /* We must not copy less than target bytes into the user's buffer
1946          * before returning successfully, so we wait for the consume queue to
1947          * have that much data to consume before dequeueing.  Note that this
1948          * makes it impossible to handle cases where target is greater than the
1949          * queue size.
1950          */
1951         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1952         if (target >= transport->stream_rcvhiwat(vsk)) {
1953                 err = -ENOMEM;
1954                 goto out;
1955         }
1956         timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1957         copied = 0;
1958
1959         err = transport->notify_recv_init(vsk, target, &recv_data);
1960         if (err < 0)
1961                 goto out;
1962
1963
1964         while (1) {
1965                 ssize_t read;
1966
1967                 err = vsock_wait_data(sk, &wait, timeout, &recv_data, target);
1968                 if (err <= 0)
1969                         break;
1970
1971                 err = transport->notify_recv_pre_dequeue(vsk, target,
1972                                                          &recv_data);
1973                 if (err < 0)
1974                         break;
1975
1976                 read = transport->stream_dequeue(vsk, msg, len - copied, flags);
1977                 if (read < 0) {
1978                         err = -ENOMEM;
1979                         break;
1980                 }
1981
1982                 copied += read;
1983
1984                 err = transport->notify_recv_post_dequeue(vsk, target, read,
1985                                                 !(flags & MSG_PEEK), &recv_data);
1986                 if (err < 0)
1987                         goto out;
1988
1989                 if (read >= target || flags & MSG_PEEK)
1990                         break;
1991
1992                 target -= read;
1993         }
1994
1995         if (sk->sk_err)
1996                 err = -sk->sk_err;
1997         else if (sk->sk_shutdown & RCV_SHUTDOWN)
1998                 err = 0;
1999
2000         if (copied > 0)
2001                 err = copied;
2002
2003 out:
2004         return err;
2005 }
2006
2007 static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
2008                                      size_t len, int flags)
2009 {
2010         const struct vsock_transport *transport;
2011         struct vsock_sock *vsk;
2012         ssize_t record_len;
2013         long timeout;
2014         int err = 0;
2015         DEFINE_WAIT(wait);
2016
2017         vsk = vsock_sk(sk);
2018         transport = vsk->transport;
2019
2020         timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2021
2022         err = vsock_wait_data(sk, &wait, timeout, NULL, 0);
2023         if (err <= 0)
2024                 goto out;
2025
2026         record_len = transport->seqpacket_dequeue(vsk, msg, flags);
2027
2028         if (record_len < 0) {
2029                 err = -ENOMEM;
2030                 goto out;
2031         }
2032
2033         if (sk->sk_err) {
2034                 err = -sk->sk_err;
2035         } else if (sk->sk_shutdown & RCV_SHUTDOWN) {
2036                 err = 0;
2037         } else {
2038                 /* User sets MSG_TRUNC, so return real length of
2039                  * packet.
2040                  */
2041                 if (flags & MSG_TRUNC)
2042                         err = record_len;
2043                 else
2044                         err = len - msg_data_left(msg);
2045
2046                 /* Always set MSG_TRUNC if real length of packet is
2047                  * bigger than user's buffer.
2048                  */
2049                 if (record_len > len)
2050                         msg->msg_flags |= MSG_TRUNC;
2051         }
2052
2053 out:
2054         return err;
2055 }
2056
2057 static int
2058 vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2059                           int flags)
2060 {
2061         struct sock *sk;
2062         struct vsock_sock *vsk;
2063         const struct vsock_transport *transport;
2064         int err;
2065
2066         DEFINE_WAIT(wait);
2067
2068         sk = sock->sk;
2069         vsk = vsock_sk(sk);
2070         err = 0;
2071
2072         lock_sock(sk);
2073
2074         transport = vsk->transport;
2075
2076         if (!transport || sk->sk_state != TCP_ESTABLISHED) {
2077                 /* Recvmsg is supposed to return 0 if a peer performs an
2078                  * orderly shutdown. Differentiate between that case and when a
2079                  * peer has not connected or a local shutdown occurred with the
2080                  * SOCK_DONE flag.
2081                  */
2082                 if (sock_flag(sk, SOCK_DONE))
2083                         err = 0;
2084                 else
2085                         err = -ENOTCONN;
2086
2087                 goto out;
2088         }
2089
2090         if (flags & MSG_OOB) {
2091                 err = -EOPNOTSUPP;
2092                 goto out;
2093         }
2094
2095         /* We don't check peer_shutdown flag here since peer may actually shut
2096          * down, but there can be data in the queue that a local socket can
2097          * receive.
2098          */
2099         if (sk->sk_shutdown & RCV_SHUTDOWN) {
2100                 err = 0;
2101                 goto out;
2102         }
2103
2104         /* It is valid on Linux to pass in a zero-length receive buffer.  This
2105          * is not an error.  We may as well bail out now.
2106          */
2107         if (!len) {
2108                 err = 0;
2109                 goto out;
2110         }
2111
2112         if (sk->sk_type == SOCK_STREAM)
2113                 err = __vsock_stream_recvmsg(sk, msg, len, flags);
2114         else
2115                 err = __vsock_seqpacket_recvmsg(sk, msg, len, flags);
2116
2117 out:
2118         release_sock(sk);
2119         return err;
2120 }
2121
2122 static const struct proto_ops vsock_stream_ops = {
2123         .family = PF_VSOCK,
2124         .owner = THIS_MODULE,
2125         .release = vsock_release,
2126         .bind = vsock_bind,
2127         .connect = vsock_connect,
2128         .socketpair = sock_no_socketpair,
2129         .accept = vsock_accept,
2130         .getname = vsock_getname,
2131         .poll = vsock_poll,
2132         .ioctl = sock_no_ioctl,
2133         .listen = vsock_listen,
2134         .shutdown = vsock_shutdown,
2135         .setsockopt = vsock_connectible_setsockopt,
2136         .getsockopt = vsock_connectible_getsockopt,
2137         .sendmsg = vsock_connectible_sendmsg,
2138         .recvmsg = vsock_connectible_recvmsg,
2139         .mmap = sock_no_mmap,
2140         .sendpage = sock_no_sendpage,
2141 };
2142
2143 static const struct proto_ops vsock_seqpacket_ops = {
2144         .family = PF_VSOCK,
2145         .owner = THIS_MODULE,
2146         .release = vsock_release,
2147         .bind = vsock_bind,
2148         .connect = vsock_connect,
2149         .socketpair = sock_no_socketpair,
2150         .accept = vsock_accept,
2151         .getname = vsock_getname,
2152         .poll = vsock_poll,
2153         .ioctl = sock_no_ioctl,
2154         .listen = vsock_listen,
2155         .shutdown = vsock_shutdown,
2156         .setsockopt = vsock_connectible_setsockopt,
2157         .getsockopt = vsock_connectible_getsockopt,
2158         .sendmsg = vsock_connectible_sendmsg,
2159         .recvmsg = vsock_connectible_recvmsg,
2160         .mmap = sock_no_mmap,
2161         .sendpage = sock_no_sendpage,
2162 };
2163
2164 static int vsock_create(struct net *net, struct socket *sock,
2165                         int protocol, int kern)
2166 {
2167         struct vsock_sock *vsk;
2168         struct sock *sk;
2169         int ret;
2170
2171         if (!sock)
2172                 return -EINVAL;
2173
2174         if (protocol && protocol != PF_VSOCK)
2175                 return -EPROTONOSUPPORT;
2176
2177         switch (sock->type) {
2178         case SOCK_DGRAM:
2179                 sock->ops = &vsock_dgram_ops;
2180                 break;
2181         case SOCK_STREAM:
2182                 sock->ops = &vsock_stream_ops;
2183                 break;
2184         case SOCK_SEQPACKET:
2185                 sock->ops = &vsock_seqpacket_ops;
2186                 break;
2187         default:
2188                 return -ESOCKTNOSUPPORT;
2189         }
2190
2191         sock->state = SS_UNCONNECTED;
2192
2193         sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern);
2194         if (!sk)
2195                 return -ENOMEM;
2196
2197         vsk = vsock_sk(sk);
2198
2199         if (sock->type == SOCK_DGRAM) {
2200                 ret = vsock_assign_transport(vsk, NULL);
2201                 if (ret < 0) {
2202                         sock_put(sk);
2203                         return ret;
2204                 }
2205         }
2206
2207         vsock_insert_unbound(vsk);
2208
2209         return 0;
2210 }
2211
2212 static const struct net_proto_family vsock_family_ops = {
2213         .family = AF_VSOCK,
2214         .create = vsock_create,
2215         .owner = THIS_MODULE,
2216 };
2217
2218 static long vsock_dev_do_ioctl(struct file *filp,
2219                                unsigned int cmd, void __user *ptr)
2220 {
2221         u32 __user *p = ptr;
2222         u32 cid = VMADDR_CID_ANY;
2223         int retval = 0;
2224
2225         switch (cmd) {
2226         case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
2227                 /* To be compatible with the VMCI behavior, we prioritize the
2228                  * guest CID instead of well-know host CID (VMADDR_CID_HOST).
2229                  */
2230                 if (transport_g2h)
2231                         cid = transport_g2h->get_local_cid();
2232                 else if (transport_h2g)
2233                         cid = transport_h2g->get_local_cid();
2234
2235                 if (put_user(cid, p) != 0)
2236                         retval = -EFAULT;
2237                 break;
2238
2239         default:
2240                 retval = -ENOIOCTLCMD;
2241         }
2242
2243         return retval;
2244 }
2245
2246 static long vsock_dev_ioctl(struct file *filp,
2247                             unsigned int cmd, unsigned long arg)
2248 {
2249         return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
2250 }
2251
2252 #ifdef CONFIG_COMPAT
2253 static long vsock_dev_compat_ioctl(struct file *filp,
2254                                    unsigned int cmd, unsigned long arg)
2255 {
2256         return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
2257 }
2258 #endif
2259
2260 static const struct file_operations vsock_device_ops = {
2261         .owner          = THIS_MODULE,
2262         .unlocked_ioctl = vsock_dev_ioctl,
2263 #ifdef CONFIG_COMPAT
2264         .compat_ioctl   = vsock_dev_compat_ioctl,
2265 #endif
2266         .open           = nonseekable_open,
2267 };
2268
2269 static struct miscdevice vsock_device = {
2270         .name           = "vsock",
2271         .fops           = &vsock_device_ops,
2272 };
2273
2274 static int __init vsock_init(void)
2275 {
2276         int err = 0;
2277
2278         vsock_init_tables();
2279
2280         vsock_proto.owner = THIS_MODULE;
2281         vsock_device.minor = MISC_DYNAMIC_MINOR;
2282         err = misc_register(&vsock_device);
2283         if (err) {
2284                 pr_err("Failed to register misc device\n");
2285                 goto err_reset_transport;
2286         }
2287
2288         err = proto_register(&vsock_proto, 1);  /* we want our slab */
2289         if (err) {
2290                 pr_err("Cannot register vsock protocol\n");
2291                 goto err_deregister_misc;
2292         }
2293
2294         err = sock_register(&vsock_family_ops);
2295         if (err) {
2296                 pr_err("could not register af_vsock (%d) address family: %d\n",
2297                        AF_VSOCK, err);
2298                 goto err_unregister_proto;
2299         }
2300
2301         return 0;
2302
2303 err_unregister_proto:
2304         proto_unregister(&vsock_proto);
2305 err_deregister_misc:
2306         misc_deregister(&vsock_device);
2307 err_reset_transport:
2308         return err;
2309 }
2310
2311 static void __exit vsock_exit(void)
2312 {
2313         misc_deregister(&vsock_device);
2314         sock_unregister(AF_VSOCK);
2315         proto_unregister(&vsock_proto);
2316 }
2317
2318 const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk)
2319 {
2320         return vsk->transport;
2321 }
2322 EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2323
2324 int vsock_core_register(const struct vsock_transport *t, int features)
2325 {
2326         const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local;
2327         int err = mutex_lock_interruptible(&vsock_register_mutex);
2328
2329         if (err)
2330                 return err;
2331
2332         t_h2g = transport_h2g;
2333         t_g2h = transport_g2h;
2334         t_dgram = transport_dgram;
2335         t_local = transport_local;
2336
2337         if (features & VSOCK_TRANSPORT_F_H2G) {
2338                 if (t_h2g) {
2339                         err = -EBUSY;
2340                         goto err_busy;
2341                 }
2342                 t_h2g = t;
2343         }
2344
2345         if (features & VSOCK_TRANSPORT_F_G2H) {
2346                 if (t_g2h) {
2347                         err = -EBUSY;
2348                         goto err_busy;
2349                 }
2350                 t_g2h = t;
2351         }
2352
2353         if (features & VSOCK_TRANSPORT_F_DGRAM) {
2354                 if (t_dgram) {
2355                         err = -EBUSY;
2356                         goto err_busy;
2357                 }
2358                 t_dgram = t;
2359         }
2360
2361         if (features & VSOCK_TRANSPORT_F_LOCAL) {
2362                 if (t_local) {
2363                         err = -EBUSY;
2364                         goto err_busy;
2365                 }
2366                 t_local = t;
2367         }
2368
2369         transport_h2g = t_h2g;
2370         transport_g2h = t_g2h;
2371         transport_dgram = t_dgram;
2372         transport_local = t_local;
2373
2374 err_busy:
2375         mutex_unlock(&vsock_register_mutex);
2376         return err;
2377 }
2378 EXPORT_SYMBOL_GPL(vsock_core_register);
2379
2380 void vsock_core_unregister(const struct vsock_transport *t)
2381 {
2382         mutex_lock(&vsock_register_mutex);
2383
2384         if (transport_h2g == t)
2385                 transport_h2g = NULL;
2386
2387         if (transport_g2h == t)
2388                 transport_g2h = NULL;
2389
2390         if (transport_dgram == t)
2391                 transport_dgram = NULL;
2392
2393         if (transport_local == t)
2394                 transport_local = NULL;
2395
2396         mutex_unlock(&vsock_register_mutex);
2397 }
2398 EXPORT_SYMBOL_GPL(vsock_core_unregister);
2399
2400 module_init(vsock_init);
2401 module_exit(vsock_exit);
2402
2403 MODULE_AUTHOR("VMware, Inc.");
2404 MODULE_DESCRIPTION("VMware Virtual Socket Family");
2405 MODULE_VERSION("1.0.2.0-k");
2406 MODULE_LICENSE("GPL v2");