2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
39 #include "name_table.h"
42 #include "name_distr.h"
47 #define SS_LISTENING -1 /* socket is listening */
48 #define SS_READY -2 /* socket is connectionless */
50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51 #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
52 #define TIPC_FWD_MSG 1
53 #define TIPC_CONN_OK 0
54 #define TIPC_CONN_PROBING 1
55 #define TIPC_MAX_PORT 0xffffffff
56 #define TIPC_MIN_PORT 1
59 * struct tipc_sock - TIPC socket structure
60 * @sk: socket - interacts with 'port' and with user via the socket API
61 * @connected: non-zero if port is currently connected to a peer port
62 * @conn_type: TIPC type used when connection was established
63 * @conn_instance: TIPC instance used when connection was established
64 * @published: non-zero if port has one or more associated names
65 * @max_pkt: maximum packet size "hint" used when building messages sent by port
66 * @portid: unique port identity in TIPC socket hash table
67 * @phdr: preformatted message header used when sending messages
68 * @port_list: adjacent ports in TIPC's global list of ports
69 * @publications: list of publications for port
70 * @pub_count: total # of publications port has made during its lifetime
73 * @conn_timeout: the time we can wait for an unresponded setup request
74 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
75 * @link_cong: non-zero if owner must sleep because of link congestion
76 * @sent_unacked: # messages sent by socket, and not yet acked by peer
77 * @rcv_unacked: # messages read by user, but not yet acked back to peer
78 * @remote: 'connected' peer for dgram/rdm
79 * @node: hash table node
80 * @rcu: rcu struct for tipc_sock
91 struct list_head sock_list;
92 struct list_head publications;
95 unsigned long probing_intv;
104 struct sockaddr_tipc remote;
105 struct rhash_head node;
109 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
110 static void tipc_data_ready(struct sock *sk);
111 static void tipc_write_space(struct sock *sk);
112 static void tipc_sock_destruct(struct sock *sk);
113 static int tipc_release(struct socket *sock);
114 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
115 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
116 static void tipc_sk_timeout(unsigned long data);
117 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
118 struct tipc_name_seq const *seq);
119 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
120 struct tipc_name_seq const *seq);
121 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
122 static int tipc_sk_insert(struct tipc_sock *tsk);
123 static void tipc_sk_remove(struct tipc_sock *tsk);
124 static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
126 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
128 static const struct proto_ops packet_ops;
129 static const struct proto_ops stream_ops;
130 static const struct proto_ops msg_ops;
131 static struct proto tipc_proto;
133 static const struct rhashtable_params tsk_rht_params;
136 * Revised TIPC socket locking policy:
138 * Most socket operations take the standard socket lock when they start
139 * and hold it until they finish (or until they need to sleep). Acquiring
140 * this lock grants the owner exclusive access to the fields of the socket
141 * data structures, with the exception of the backlog queue. A few socket
142 * operations can be done without taking the socket lock because they only
143 * read socket information that never changes during the life of the socket.
145 * Socket operations may acquire the lock for the associated TIPC port if they
146 * need to perform an operation on the port. If any routine needs to acquire
147 * both the socket lock and the port lock it must take the socket lock first
148 * to avoid the risk of deadlock.
150 * The dispatcher handling incoming messages cannot grab the socket lock in
151 * the standard fashion, since invoked it runs at the BH level and cannot block.
152 * Instead, it checks to see if the socket lock is currently owned by someone,
153 * and either handles the message itself or adds it to the socket's backlog
154 * queue; in the latter case the queued message is processed once the process
155 * owning the socket lock releases it.
157 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
158 * the problem of a blocked socket operation preventing any other operations
159 * from occurring. However, applications must be careful if they have
160 * multiple threads trying to send (or receive) on the same socket, as these
161 * operations might interfere with each other. For example, doing a connect
162 * and a receive at the same time might allow the receive to consume the
163 * ACK message meant for the connect. While additional work could be done
164 * to try and overcome this, it doesn't seem to be worthwhile at the present.
166 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
167 * that another operation that must be performed in a non-blocking manner is
168 * not delayed for very long because the lock has already been taken.
170 * NOTE: This code assumes that certain fields of a port/socket pair are
171 * constant over its lifetime; such fields can be examined without taking
172 * the socket lock and/or port lock, and do not need to be re-read even
173 * after resuming processing after waiting. These fields include:
175 * - pointer to socket sk structure (aka tipc_sock structure)
176 * - pointer to port structure
180 static u32 tsk_own_node(struct tipc_sock *tsk)
182 return msg_prevnode(&tsk->phdr);
185 static u32 tsk_peer_node(struct tipc_sock *tsk)
187 return msg_destnode(&tsk->phdr);
190 static u32 tsk_peer_port(struct tipc_sock *tsk)
192 return msg_destport(&tsk->phdr);
195 static bool tsk_unreliable(struct tipc_sock *tsk)
197 return msg_src_droppable(&tsk->phdr) != 0;
200 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
202 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
205 static bool tsk_unreturnable(struct tipc_sock *tsk)
207 return msg_dest_droppable(&tsk->phdr) != 0;
210 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
212 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
215 static int tsk_importance(struct tipc_sock *tsk)
217 return msg_importance(&tsk->phdr);
220 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
222 if (imp > TIPC_CRITICAL_IMPORTANCE)
224 msg_set_importance(&tsk->phdr, (u32)imp);
228 static struct tipc_sock *tipc_sk(const struct sock *sk)
230 return container_of(sk, struct tipc_sock, sk);
233 static bool tsk_conn_cong(struct tipc_sock *tsk)
235 return tsk->snt_unacked >= tsk->snd_win;
238 /* tsk_blocks(): translate a buffer size in bytes to number of
239 * advertisable blocks, taking into account the ratio truesize(len)/len
240 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
242 static u16 tsk_adv_blocks(int len)
244 return len / FLOWCTL_BLK_SZ / 4;
247 /* tsk_inc(): increment counter for sent or received data
248 * - If block based flow control is not supported by peer we
249 * fall back to message based ditto, incrementing the counter
251 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
253 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
254 return ((msglen / FLOWCTL_BLK_SZ) + 1);
259 * tsk_advance_rx_queue - discard first buffer in socket receive queue
261 * Caller must hold socket lock
263 static void tsk_advance_rx_queue(struct sock *sk)
265 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
268 /* tipc_sk_respond() : send response message back to sender
270 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
274 u32 onode = tipc_own_addr(sock_net(sk));
276 if (!tipc_msg_reverse(onode, &skb, err))
279 dnode = msg_destnode(buf_msg(skb));
280 selector = msg_origport(buf_msg(skb));
281 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
285 * tsk_rej_rx_queue - reject all buffers in socket receive queue
287 * Caller must hold socket lock
289 static void tsk_rej_rx_queue(struct sock *sk)
293 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
294 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
297 /* tsk_peer_msg - verify if message was sent by connected port's peer
299 * Handles cases where the node's network address has changed from
300 * the default of <0.0.0> to its configured setting.
302 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
304 struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id);
305 u32 peer_port = tsk_peer_port(tsk);
309 if (unlikely(!tsk->connected))
312 if (unlikely(msg_origport(msg) != peer_port))
315 orig_node = msg_orignode(msg);
316 peer_node = tsk_peer_node(tsk);
318 if (likely(orig_node == peer_node))
321 if (!orig_node && (peer_node == tn->own_addr))
324 if (!peer_node && (orig_node == tn->own_addr))
331 * tipc_sk_create - create a TIPC socket
332 * @net: network namespace (must be default network)
333 * @sock: pre-allocated socket structure
334 * @protocol: protocol indicator (must be 0)
335 * @kern: caused by kernel or by userspace?
337 * This routine creates additional data structures used by the TIPC socket,
338 * initializes them, and links them together.
340 * Returns 0 on success, errno otherwise
342 static int tipc_sk_create(struct net *net, struct socket *sock,
343 int protocol, int kern)
346 const struct proto_ops *ops;
349 struct tipc_sock *tsk;
350 struct tipc_msg *msg;
352 /* Validate arguments */
353 if (unlikely(protocol != 0))
354 return -EPROTONOSUPPORT;
356 switch (sock->type) {
359 state = SS_UNCONNECTED;
363 state = SS_UNCONNECTED;
374 /* Allocate socket's protocol area */
375 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
380 tsk->max_pkt = MAX_PKT_DEFAULT;
381 INIT_LIST_HEAD(&tsk->publications);
383 tn = net_generic(sock_net(sk), tipc_net_id);
384 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
387 /* Finish initializing socket data structures */
390 sock_init_data(sock, sk);
391 if (tipc_sk_insert(tsk)) {
392 pr_warn("Socket create failed; port number exhausted\n");
395 msg_set_origport(msg, tsk->portid);
396 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
397 sk->sk_backlog_rcv = tipc_backlog_rcv;
398 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
399 sk->sk_data_ready = tipc_data_ready;
400 sk->sk_write_space = tipc_write_space;
401 sk->sk_destruct = tipc_sock_destruct;
402 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
403 atomic_set(&tsk->dupl_rcvcnt, 0);
405 /* Start out with safe limits until we receive an advertised window */
406 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
407 tsk->rcv_win = tsk->snd_win;
409 if (sock->state == SS_READY) {
410 tsk_set_unreturnable(tsk, true);
411 if (sock->type == SOCK_DGRAM)
412 tsk_set_unreliable(tsk, true);
417 static void tipc_sk_callback(struct rcu_head *head)
419 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
425 * tipc_release - destroy a TIPC socket
426 * @sock: socket to destroy
428 * This routine cleans up any messages that are still queued on the socket.
429 * For DGRAM and RDM socket types, all queued messages are rejected.
430 * For SEQPACKET and STREAM socket types, the first message is rejected
431 * and any others are discarded. (If the first message on a STREAM socket
432 * is partially-read, it is discarded and the next one is rejected instead.)
434 * NOTE: Rejected messages are not necessarily returned to the sender! They
435 * are returned or discarded according to the "destination droppable" setting
436 * specified for the message by the sender.
438 * Returns 0 on success, errno otherwise
440 static int tipc_release(struct socket *sock)
442 struct sock *sk = sock->sk;
444 struct tipc_sock *tsk;
449 * Exit if socket isn't fully initialized (occurs when a failed accept()
450 * releases a pre-allocated child socket that was never used)
460 * Reject all unreceived messages, except on an active connection
461 * (which disconnects locally & sends a 'FIN+' to peer)
463 dnode = tsk_peer_node(tsk);
464 while (sock->state != SS_DISCONNECTING) {
465 skb = __skb_dequeue(&sk->sk_receive_queue);
468 if (TIPC_SKB_CB(skb)->handle != NULL)
471 if ((sock->state == SS_CONNECTING) ||
472 (sock->state == SS_CONNECTED)) {
473 sock->state = SS_DISCONNECTING;
475 tipc_node_remove_conn(net, dnode, tsk->portid);
477 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
481 tipc_sk_withdraw(tsk, 0, NULL);
482 sk_stop_timer(sk, &sk->sk_timer);
484 if (tsk->connected) {
485 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
486 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
487 tsk_own_node(tsk), tsk_peer_port(tsk),
488 tsk->portid, TIPC_ERR_NO_PORT);
490 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
491 tipc_node_remove_conn(net, dnode, tsk->portid);
494 /* Reject any messages that accumulated in backlog queue */
495 sock->state = SS_DISCONNECTING;
498 call_rcu(&tsk->rcu, tipc_sk_callback);
505 * tipc_bind - associate or disassocate TIPC name(s) with a socket
506 * @sock: socket structure
507 * @uaddr: socket address describing name(s) and desired operation
508 * @uaddr_len: size of socket address data structure
510 * Name and name sequence binding is indicated using a positive scope value;
511 * a negative scope value unbinds the specified name. Specifying no name
512 * (i.e. a socket address length of 0) unbinds all names from the socket.
514 * Returns 0 on success, errno otherwise
516 * NOTE: This routine doesn't need to take the socket lock since it doesn't
517 * access any non-constant socket information.
519 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
522 struct sock *sk = sock->sk;
523 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
524 struct tipc_sock *tsk = tipc_sk(sk);
528 if (unlikely(!uaddr_len)) {
529 res = tipc_sk_withdraw(tsk, 0, NULL);
533 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
537 if (addr->family != AF_TIPC) {
542 if (addr->addrtype == TIPC_ADDR_NAME)
543 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
544 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
549 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
550 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
551 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
556 res = (addr->scope > 0) ?
557 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
558 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
565 * tipc_getname - get port ID of socket or peer socket
566 * @sock: socket structure
567 * @uaddr: area for returned socket address
568 * @uaddr_len: area for returned length of socket address
569 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
571 * Returns 0 on success, errno otherwise
573 * NOTE: This routine doesn't need to take the socket lock since it only
574 * accesses socket information that is unchanging (or which changes in
575 * a completely predictable manner).
577 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
578 int *uaddr_len, int peer)
580 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
581 struct tipc_sock *tsk = tipc_sk(sock->sk);
582 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
584 memset(addr, 0, sizeof(*addr));
586 if ((sock->state != SS_CONNECTED) &&
587 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
589 addr->addr.id.ref = tsk_peer_port(tsk);
590 addr->addr.id.node = tsk_peer_node(tsk);
592 addr->addr.id.ref = tsk->portid;
593 addr->addr.id.node = tn->own_addr;
596 *uaddr_len = sizeof(*addr);
597 addr->addrtype = TIPC_ADDR_ID;
598 addr->family = AF_TIPC;
600 addr->addr.name.domain = 0;
606 * tipc_poll - read and possibly block on pollmask
607 * @file: file structure associated with the socket
608 * @sock: socket for which to calculate the poll bits
611 * Returns pollmask value
614 * It appears that the usual socket locking mechanisms are not useful here
615 * since the pollmask info is potentially out-of-date the moment this routine
616 * exits. TCP and other protocols seem to rely on higher level poll routines
617 * to handle any preventable race conditions, so TIPC will do the same ...
619 * TIPC sets the returned events as follows:
621 * socket state flags set
622 * ------------ ---------
623 * unconnected no read flags
624 * POLLOUT if port is not congested
626 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
629 * connected POLLIN/POLLRDNORM if data in rx queue
630 * POLLOUT if port is not congested
632 * disconnecting POLLIN/POLLRDNORM/POLLHUP
635 * listening POLLIN if SYN in rx queue
638 * ready POLLIN/POLLRDNORM if data in rx queue
639 * [connectionless] POLLOUT (since port cannot be congested)
641 * IMPORTANT: The fact that a read or write operation is indicated does NOT
642 * imply that the operation will succeed, merely that it should be performed
643 * and will not block.
645 static unsigned int tipc_poll(struct file *file, struct socket *sock,
648 struct sock *sk = sock->sk;
649 struct tipc_sock *tsk = tipc_sk(sk);
652 sock_poll_wait(file, sk_sleep(sk), wait);
654 switch ((int)sock->state) {
661 if (!tsk->link_cong && !tsk_conn_cong(tsk))
666 if (!skb_queue_empty(&sk->sk_receive_queue))
667 mask |= (POLLIN | POLLRDNORM);
669 case SS_DISCONNECTING:
670 mask = (POLLIN | POLLRDNORM | POLLHUP);
678 * tipc_sendmcast - send multicast message
679 * @sock: socket structure
680 * @seq: destination address
681 * @msg: message to send
682 * @dsz: total length of message data
683 * @timeo: timeout to wait for wakeup
685 * Called from function tipc_sendmsg(), which has done all sanity checks
686 * Returns the number of bytes sent on success, or errno
688 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
689 struct msghdr *msg, size_t dsz, long timeo)
691 struct sock *sk = sock->sk;
692 struct tipc_sock *tsk = tipc_sk(sk);
693 struct net *net = sock_net(sk);
694 struct tipc_msg *mhdr = &tsk->phdr;
695 struct sk_buff_head pktchain;
696 struct iov_iter save = msg->msg_iter;
700 msg_set_type(mhdr, TIPC_MCAST_MSG);
701 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
702 msg_set_destport(mhdr, 0);
703 msg_set_destnode(mhdr, 0);
704 msg_set_nametype(mhdr, seq->type);
705 msg_set_namelower(mhdr, seq->lower);
706 msg_set_nameupper(mhdr, seq->upper);
707 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
709 skb_queue_head_init(&pktchain);
712 mtu = tipc_bcast_get_mtu(net);
713 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
714 if (unlikely(rc < 0))
718 rc = tipc_bcast_xmit(net, &pktchain);
722 if (rc == -ELINKCONG) {
724 rc = tipc_wait_for_sndmsg(sock, &timeo);
728 __skb_queue_purge(&pktchain);
729 if (rc == -EMSGSIZE) {
730 msg->msg_iter = save;
739 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
740 * @arrvq: queue with arriving messages, to be cloned after destination lookup
741 * @inputq: queue with cloned messages, delivered to socket after dest lookup
743 * Multi-threaded: parallel calls with reference to same queues may occur
745 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
746 struct sk_buff_head *inputq)
748 struct tipc_msg *msg;
749 struct tipc_plist dports;
751 u32 scope = TIPC_CLUSTER_SCOPE;
752 struct sk_buff_head tmpq;
754 struct sk_buff *skb, *_skb;
756 __skb_queue_head_init(&tmpq);
757 tipc_plist_init(&dports);
759 skb = tipc_skb_peek(arrvq, &inputq->lock);
760 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
762 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
764 if (in_own_node(net, msg_orignode(msg)))
765 scope = TIPC_NODE_SCOPE;
767 /* Create destination port list and message clones: */
768 tipc_nametbl_mc_translate(net,
769 msg_nametype(msg), msg_namelower(msg),
770 msg_nameupper(msg), scope, &dports);
771 portid = tipc_plist_pop(&dports);
772 for (; portid; portid = tipc_plist_pop(&dports)) {
773 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
775 msg_set_destport(buf_msg(_skb), portid);
776 __skb_queue_tail(&tmpq, _skb);
779 pr_warn("Failed to clone mcast rcv buffer\n");
781 /* Append to inputq if not already done by other thread */
782 spin_lock_bh(&inputq->lock);
783 if (skb_peek(arrvq) == skb) {
784 skb_queue_splice_tail_init(&tmpq, inputq);
785 kfree_skb(__skb_dequeue(arrvq));
787 spin_unlock_bh(&inputq->lock);
788 __skb_queue_purge(&tmpq);
791 tipc_sk_rcv(net, inputq);
795 * tipc_sk_proto_rcv - receive a connection mng protocol message
796 * @tsk: receiving socket
797 * @skb: pointer to message buffer.
799 static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
800 struct sk_buff_head *xmitq)
802 struct sock *sk = &tsk->sk;
803 u32 onode = tsk_own_node(tsk);
804 struct tipc_msg *hdr = buf_msg(skb);
805 int mtyp = msg_type(hdr);
808 /* Ignore if connection cannot be validated: */
809 if (!tsk_peer_msg(tsk, hdr))
812 tsk->probing_state = TIPC_CONN_OK;
814 if (mtyp == CONN_PROBE) {
815 msg_set_type(hdr, CONN_PROBE_REPLY);
816 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
817 __skb_queue_tail(xmitq, skb);
819 } else if (mtyp == CONN_ACK) {
820 conn_cong = tsk_conn_cong(tsk);
821 tsk->snt_unacked -= msg_conn_ack(hdr);
822 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
823 tsk->snd_win = msg_adv_win(hdr);
825 sk->sk_write_space(sk);
826 } else if (mtyp != CONN_PROBE_REPLY) {
827 pr_warn("Received unknown CONN_PROTO msg\n");
833 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
835 struct sock *sk = sock->sk;
836 struct tipc_sock *tsk = tipc_sk(sk);
841 int err = sock_error(sk);
844 if (sock->state == SS_DISCONNECTING)
848 if (signal_pending(current))
849 return sock_intr_errno(*timeo_p);
851 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
852 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
853 finish_wait(sk_sleep(sk), &wait);
859 * tipc_sendmsg - send message in connectionless manner
860 * @sock: socket structure
861 * @m: message to send
862 * @dsz: amount of user data to be sent
864 * Message must have an destination specified explicitly.
865 * Used for SOCK_RDM and SOCK_DGRAM messages,
866 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
867 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
869 * Returns the number of bytes sent on success, or errno otherwise
871 static int tipc_sendmsg(struct socket *sock,
872 struct msghdr *m, size_t dsz)
874 struct sock *sk = sock->sk;
878 ret = __tipc_sendmsg(sock, m, dsz);
884 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
886 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
887 struct sock *sk = sock->sk;
888 struct tipc_sock *tsk = tipc_sk(sk);
889 struct net *net = sock_net(sk);
890 struct tipc_msg *mhdr = &tsk->phdr;
892 struct sk_buff_head pktchain;
894 struct tipc_name_seq *seq;
895 struct iov_iter save;
900 if (dsz > TIPC_MAX_USER_MSG_SIZE)
902 if (unlikely(!dest)) {
903 if (tsk->connected && sock->state == SS_READY)
906 return -EDESTADDRREQ;
907 } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
908 dest->family != AF_TIPC) {
911 if (unlikely(sock->state != SS_READY)) {
912 if (sock->state == SS_LISTENING)
914 if (sock->state != SS_UNCONNECTED)
918 if (dest->addrtype == TIPC_ADDR_NAME) {
919 tsk->conn_type = dest->addr.name.name.type;
920 tsk->conn_instance = dest->addr.name.name.instance;
923 seq = &dest->addr.nameseq;
924 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
926 if (dest->addrtype == TIPC_ADDR_MCAST) {
927 return tipc_sendmcast(sock, seq, m, dsz, timeo);
928 } else if (dest->addrtype == TIPC_ADDR_NAME) {
929 u32 type = dest->addr.name.name.type;
930 u32 inst = dest->addr.name.name.instance;
931 u32 domain = dest->addr.name.domain;
934 msg_set_type(mhdr, TIPC_NAMED_MSG);
935 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
936 msg_set_nametype(mhdr, type);
937 msg_set_nameinst(mhdr, inst);
938 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
939 dport = tipc_nametbl_translate(net, type, inst, &dnode);
940 msg_set_destnode(mhdr, dnode);
941 msg_set_destport(mhdr, dport);
942 if (unlikely(!dport && !dnode))
943 return -EHOSTUNREACH;
944 } else if (dest->addrtype == TIPC_ADDR_ID) {
945 dnode = dest->addr.id.node;
946 msg_set_type(mhdr, TIPC_DIRECT_MSG);
947 msg_set_lookup_scope(mhdr, 0);
948 msg_set_destnode(mhdr, dnode);
949 msg_set_destport(mhdr, dest->addr.id.ref);
950 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
953 skb_queue_head_init(&pktchain);
956 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
957 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
962 skb = skb_peek(&pktchain);
963 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
964 rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
966 if (sock->state != SS_READY)
967 sock->state = SS_CONNECTING;
970 if (rc == -ELINKCONG) {
972 rc = tipc_wait_for_sndmsg(sock, &timeo);
976 __skb_queue_purge(&pktchain);
977 if (rc == -EMSGSIZE) {
987 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
989 struct sock *sk = sock->sk;
990 struct tipc_sock *tsk = tipc_sk(sk);
995 int err = sock_error(sk);
998 if (sock->state == SS_DISCONNECTING)
1000 else if (sock->state != SS_CONNECTED)
1004 if (signal_pending(current))
1005 return sock_intr_errno(*timeo_p);
1007 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1008 done = sk_wait_event(sk, timeo_p,
1010 !tsk_conn_cong(tsk)) ||
1012 finish_wait(sk_sleep(sk), &wait);
1018 * tipc_send_stream - send stream-oriented data
1019 * @sock: socket structure
1021 * @dsz: total length of data to be transmitted
1023 * Used for SOCK_STREAM data.
1025 * Returns the number of bytes sent on success (or partial success),
1026 * or errno if no data sent
1028 static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1030 struct sock *sk = sock->sk;
1034 ret = __tipc_send_stream(sock, m, dsz);
1040 static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1042 struct sock *sk = sock->sk;
1043 struct net *net = sock_net(sk);
1044 struct tipc_sock *tsk = tipc_sk(sk);
1045 struct tipc_msg *mhdr = &tsk->phdr;
1046 struct sk_buff_head pktchain;
1047 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1048 u32 portid = tsk->portid;
1052 uint mtu, send, sent = 0;
1053 struct iov_iter save;
1054 int hlen = MIN_H_SIZE;
1056 /* Handle implied connection establishment */
1057 if (unlikely(dest)) {
1058 rc = __tipc_sendmsg(sock, m, dsz);
1059 hlen = msg_hdr_sz(mhdr);
1060 if (dsz && (dsz == rc))
1061 tsk->snt_unacked = tsk_inc(tsk, dsz + hlen);
1064 if (dsz > (uint)INT_MAX)
1067 if (unlikely(sock->state != SS_CONNECTED)) {
1068 if (sock->state == SS_DISCONNECTING)
1074 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1075 dnode = tsk_peer_node(tsk);
1076 skb_queue_head_init(&pktchain);
1081 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1082 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
1083 if (unlikely(rc < 0))
1087 if (likely(!tsk_conn_cong(tsk))) {
1088 rc = tipc_node_xmit(net, &pktchain, dnode, portid);
1090 tsk->snt_unacked += tsk_inc(tsk, send + hlen);
1096 if (rc == -EMSGSIZE) {
1097 __skb_queue_purge(&pktchain);
1098 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1103 if (rc != -ELINKCONG)
1108 rc = tipc_wait_for_sndpkt(sock, &timeo);
1111 __skb_queue_purge(&pktchain);
1112 return sent ? sent : rc;
1116 * tipc_send_packet - send a connection-oriented message
1117 * @sock: socket structure
1118 * @m: message to send
1119 * @dsz: length of data to be transmitted
1121 * Used for SOCK_SEQPACKET messages.
1123 * Returns the number of bytes sent on success, or errno otherwise
1125 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1127 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1130 return tipc_send_stream(sock, m, dsz);
1133 /* tipc_sk_finish_conn - complete the setup of a connection
1135 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1138 struct sock *sk = &tsk->sk;
1139 struct net *net = sock_net(sk);
1140 struct tipc_msg *msg = &tsk->phdr;
1142 msg_set_destnode(msg, peer_node);
1143 msg_set_destport(msg, peer_port);
1144 msg_set_type(msg, TIPC_CONN_MSG);
1145 msg_set_lookup_scope(msg, 0);
1146 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1148 tsk->probing_intv = CONN_PROBING_INTERVAL;
1149 tsk->probing_state = TIPC_CONN_OK;
1151 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
1152 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1153 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1154 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1155 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1158 /* Fall back to message based flow control */
1159 tsk->rcv_win = FLOWCTL_MSG_WIN;
1160 tsk->snd_win = FLOWCTL_MSG_WIN;
1164 * set_orig_addr - capture sender's address for received message
1165 * @m: descriptor for message info
1166 * @msg: received message header
1168 * Note: Address is not captured if not requested by receiver.
1170 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1172 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1175 addr->family = AF_TIPC;
1176 addr->addrtype = TIPC_ADDR_ID;
1177 memset(&addr->addr, 0, sizeof(addr->addr));
1178 addr->addr.id.ref = msg_origport(msg);
1179 addr->addr.id.node = msg_orignode(msg);
1180 addr->addr.name.domain = 0; /* could leave uninitialized */
1181 addr->scope = 0; /* could leave uninitialized */
1182 m->msg_namelen = sizeof(struct sockaddr_tipc);
1187 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1188 * @m: descriptor for message info
1189 * @msg: received message header
1190 * @tsk: TIPC port associated with message
1192 * Note: Ancillary data is not captured if not requested by receiver.
1194 * Returns 0 if successful, otherwise errno
1196 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1197 struct tipc_sock *tsk)
1205 if (likely(m->msg_controllen == 0))
1208 /* Optionally capture errored message object(s) */
1209 err = msg ? msg_errcode(msg) : 0;
1210 if (unlikely(err)) {
1212 anc_data[1] = msg_data_sz(msg);
1213 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1217 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1224 /* Optionally capture message destination object */
1225 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1226 switch (dest_type) {
1227 case TIPC_NAMED_MSG:
1229 anc_data[0] = msg_nametype(msg);
1230 anc_data[1] = msg_namelower(msg);
1231 anc_data[2] = msg_namelower(msg);
1233 case TIPC_MCAST_MSG:
1235 anc_data[0] = msg_nametype(msg);
1236 anc_data[1] = msg_namelower(msg);
1237 anc_data[2] = msg_nameupper(msg);
1240 has_name = (tsk->conn_type != 0);
1241 anc_data[0] = tsk->conn_type;
1242 anc_data[1] = tsk->conn_instance;
1243 anc_data[2] = tsk->conn_instance;
1249 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1257 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1259 struct net *net = sock_net(&tsk->sk);
1260 struct sk_buff *skb = NULL;
1261 struct tipc_msg *msg;
1262 u32 peer_port = tsk_peer_port(tsk);
1263 u32 dnode = tsk_peer_node(tsk);
1265 if (!tsk->connected)
1267 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1268 dnode, tsk_own_node(tsk), peer_port,
1269 tsk->portid, TIPC_OK);
1273 msg_set_conn_ack(msg, tsk->rcv_unacked);
1274 tsk->rcv_unacked = 0;
1276 /* Adjust to and advertize the correct window limit */
1277 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1278 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1279 msg_set_adv_win(msg, tsk->rcv_win);
1281 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1284 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1286 struct sock *sk = sock->sk;
1288 long timeo = *timeop;
1292 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1293 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1294 if (sock->state == SS_DISCONNECTING) {
1299 timeo = schedule_timeout(timeo);
1303 if (!skb_queue_empty(&sk->sk_receive_queue))
1308 err = sock_intr_errno(timeo);
1309 if (signal_pending(current))
1312 finish_wait(sk_sleep(sk), &wait);
1318 * tipc_recvmsg - receive packet-oriented message
1319 * @m: descriptor for message info
1320 * @buf_len: total size of user buffer area
1321 * @flags: receive flags
1323 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1324 * If the complete message doesn't fit in user area, truncate it.
1326 * Returns size of returned message data, errno otherwise
1328 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
1331 struct sock *sk = sock->sk;
1332 struct tipc_sock *tsk = tipc_sk(sk);
1333 struct sk_buff *buf;
1334 struct tipc_msg *msg;
1340 /* Catch invalid receive requests */
1341 if (unlikely(!buf_len))
1346 if (unlikely(sock->state == SS_UNCONNECTED)) {
1351 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1354 /* Look for a message in receive queue; wait if necessary */
1355 res = tipc_wait_for_rcvmsg(sock, &timeo);
1359 /* Look at first message in receive queue */
1360 buf = skb_peek(&sk->sk_receive_queue);
1362 sz = msg_data_sz(msg);
1363 hlen = msg_hdr_sz(msg);
1364 err = msg_errcode(msg);
1366 /* Discard an empty non-errored message & try again */
1367 if ((!sz) && (!err)) {
1368 tsk_advance_rx_queue(sk);
1372 /* Capture sender's address (optional) */
1373 set_orig_addr(m, msg);
1375 /* Capture ancillary data (optional) */
1376 res = tipc_sk_anc_data_recv(m, msg, tsk);
1380 /* Capture message data (if valid) & compute return value (always) */
1382 if (unlikely(buf_len < sz)) {
1384 m->msg_flags |= MSG_TRUNC;
1386 res = skb_copy_datagram_msg(buf, hlen, m, sz);
1391 if ((sock->state == SS_READY) ||
1392 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1398 if (unlikely(flags & MSG_PEEK))
1401 if (likely(sock->state != SS_READY)) {
1402 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1403 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1404 tipc_sk_send_ack(tsk);
1406 tsk_advance_rx_queue(sk);
1413 * tipc_recv_stream - receive stream-oriented data
1414 * @m: descriptor for message info
1415 * @buf_len: total size of user buffer area
1416 * @flags: receive flags
1418 * Used for SOCK_STREAM messages only. If not enough data is available
1419 * will optionally wait for more; never truncates data.
1421 * Returns size of returned message data, errno otherwise
1423 static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
1424 size_t buf_len, int flags)
1426 struct sock *sk = sock->sk;
1427 struct tipc_sock *tsk = tipc_sk(sk);
1428 struct sk_buff *buf;
1429 struct tipc_msg *msg;
1432 int sz_to_copy, target, needed;
1437 /* Catch invalid receive attempts */
1438 if (unlikely(!buf_len))
1443 if (unlikely(sock->state == SS_UNCONNECTED)) {
1448 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1449 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1452 /* Look for a message in receive queue; wait if necessary */
1453 res = tipc_wait_for_rcvmsg(sock, &timeo);
1457 /* Look at first message in receive queue */
1458 buf = skb_peek(&sk->sk_receive_queue);
1460 sz = msg_data_sz(msg);
1461 hlen = msg_hdr_sz(msg);
1462 err = msg_errcode(msg);
1464 /* Discard an empty non-errored message & try again */
1465 if ((!sz) && (!err)) {
1466 tsk_advance_rx_queue(sk);
1470 /* Optionally capture sender's address & ancillary data of first msg */
1471 if (sz_copied == 0) {
1472 set_orig_addr(m, msg);
1473 res = tipc_sk_anc_data_recv(m, msg, tsk);
1478 /* Capture message data (if valid) & compute return value (always) */
1480 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1483 needed = (buf_len - sz_copied);
1484 sz_to_copy = (sz <= needed) ? sz : needed;
1486 res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
1490 sz_copied += sz_to_copy;
1492 if (sz_to_copy < sz) {
1493 if (!(flags & MSG_PEEK))
1494 TIPC_SKB_CB(buf)->handle =
1495 (void *)(unsigned long)(offset + sz_to_copy);
1500 goto exit; /* can't add error msg to valid data */
1502 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1508 if (unlikely(flags & MSG_PEEK))
1511 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1512 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1513 tipc_sk_send_ack(tsk);
1514 tsk_advance_rx_queue(sk);
1516 /* Loop around if more data is required */
1517 if ((sz_copied < buf_len) && /* didn't get all requested data */
1518 (!skb_queue_empty(&sk->sk_receive_queue) ||
1519 (sz_copied < target)) && /* and more is ready or required */
1520 (!err)) /* and haven't reached a FIN */
1525 return sz_copied ? sz_copied : res;
1529 * tipc_write_space - wake up thread if port congestion is released
1532 static void tipc_write_space(struct sock *sk)
1534 struct socket_wq *wq;
1537 wq = rcu_dereference(sk->sk_wq);
1538 if (skwq_has_sleeper(wq))
1539 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1540 POLLWRNORM | POLLWRBAND);
1545 * tipc_data_ready - wake up threads to indicate messages have been received
1547 * @len: the length of messages
1549 static void tipc_data_ready(struct sock *sk)
1551 struct socket_wq *wq;
1554 wq = rcu_dereference(sk->sk_wq);
1555 if (skwq_has_sleeper(wq))
1556 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1557 POLLRDNORM | POLLRDBAND);
1561 static void tipc_sock_destruct(struct sock *sk)
1563 __skb_queue_purge(&sk->sk_receive_queue);
1567 * filter_connect - Handle all incoming messages for a connection-based socket
1569 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1571 * Returns true if everything ok, false otherwise
1573 static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1575 struct sock *sk = &tsk->sk;
1576 struct net *net = sock_net(sk);
1577 struct socket *sock = sk->sk_socket;
1578 struct tipc_msg *hdr = buf_msg(skb);
1580 if (unlikely(msg_mcast(hdr)))
1583 switch ((int)sock->state) {
1586 /* Accept only connection-based messages sent by peer */
1587 if (unlikely(!tsk_peer_msg(tsk, hdr)))
1590 if (unlikely(msg_errcode(hdr))) {
1591 sock->state = SS_DISCONNECTING;
1593 /* Let timer expire on it's own */
1594 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1601 /* Accept only ACK or NACK message */
1602 if (unlikely(!msg_connected(hdr)))
1605 if (unlikely(msg_errcode(hdr))) {
1606 sock->state = SS_DISCONNECTING;
1607 sk->sk_err = ECONNREFUSED;
1611 if (unlikely(!msg_isdata(hdr))) {
1612 sock->state = SS_DISCONNECTING;
1613 sk->sk_err = EINVAL;
1617 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1618 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1619 sock->state = SS_CONNECTED;
1621 /* If 'ACK+' message, add to socket receive queue */
1622 if (msg_data_sz(hdr))
1625 /* If empty 'ACK-' message, wake up sleeping connect() */
1626 if (waitqueue_active(sk_sleep(sk)))
1627 wake_up_interruptible(sk_sleep(sk));
1629 /* 'ACK-' message is neither accepted nor rejected: */
1630 msg_set_dest_droppable(hdr, 1);
1634 case SS_UNCONNECTED:
1636 /* Accept only SYN message */
1637 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
1640 case SS_DISCONNECTING:
1643 pr_err("Unknown socket state %u\n", sock->state);
1649 * rcvbuf_limit - get proper overload limit of socket receive queue
1653 * For connection oriented messages, irrespective of importance,
1654 * default queue limit is 2 MB.
1656 * For connectionless messages, queue limits are based on message
1657 * importance as follows:
1659 * TIPC_LOW_IMPORTANCE (2 MB)
1660 * TIPC_MEDIUM_IMPORTANCE (4 MB)
1661 * TIPC_HIGH_IMPORTANCE (8 MB)
1662 * TIPC_CRITICAL_IMPORTANCE (16 MB)
1664 * Returns overload limit according to corresponding message importance
1666 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1668 struct tipc_sock *tsk = tipc_sk(sk);
1669 struct tipc_msg *hdr = buf_msg(skb);
1671 if (unlikely(!msg_connected(hdr)))
1672 return sk->sk_rcvbuf << msg_importance(hdr);
1674 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
1675 return sk->sk_rcvbuf;
1677 return FLOWCTL_MSG_LIM;
1681 * filter_rcv - validate incoming message
1683 * @skb: pointer to message.
1685 * Enqueues message on receive queue if acceptable; optionally handles
1686 * disconnect indication for a connected socket.
1688 * Called with socket lock already taken
1690 * Returns true if message was added to socket receive queue, otherwise false
1692 static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
1693 struct sk_buff_head *xmitq)
1695 struct socket *sock = sk->sk_socket;
1696 struct tipc_sock *tsk = tipc_sk(sk);
1697 struct tipc_msg *hdr = buf_msg(skb);
1698 unsigned int limit = rcvbuf_limit(sk, skb);
1700 int usr = msg_user(hdr);
1702 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
1703 tipc_sk_proto_rcv(tsk, skb, xmitq);
1707 if (unlikely(usr == SOCK_WAKEUP)) {
1710 sk->sk_write_space(sk);
1714 /* Drop if illegal message type */
1715 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
1720 /* Reject if wrong message type for current socket state */
1721 if (unlikely(sock->state == SS_READY)) {
1722 if (msg_connected(hdr)) {
1723 err = TIPC_ERR_NO_PORT;
1726 } else if (unlikely(!filter_connect(tsk, skb))) {
1727 err = TIPC_ERR_NO_PORT;
1731 /* Reject message if there isn't room to queue it */
1732 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
1733 err = TIPC_ERR_OVERLOAD;
1737 /* Enqueue message */
1738 TIPC_SKB_CB(skb)->handle = NULL;
1739 __skb_queue_tail(&sk->sk_receive_queue, skb);
1740 skb_set_owner_r(skb, sk);
1742 sk->sk_data_ready(sk);
1746 if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
1747 __skb_queue_tail(xmitq, skb);
1752 * tipc_backlog_rcv - handle incoming message from backlog queue
1756 * Caller must hold socket lock
1760 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1762 unsigned int truesize = skb->truesize;
1763 struct sk_buff_head xmitq;
1764 u32 dnode, selector;
1766 __skb_queue_head_init(&xmitq);
1768 if (likely(filter_rcv(sk, skb, &xmitq))) {
1769 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1773 if (skb_queue_empty(&xmitq))
1776 /* Send response/rejected message */
1777 skb = __skb_dequeue(&xmitq);
1778 dnode = msg_destnode(buf_msg(skb));
1779 selector = msg_origport(buf_msg(skb));
1780 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1785 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
1786 * inputq and try adding them to socket or backlog queue
1787 * @inputq: list of incoming buffers with potentially different destinations
1788 * @sk: socket where the buffers should be enqueued
1789 * @dport: port number for the socket
1791 * Caller must hold socket lock
1793 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1794 u32 dport, struct sk_buff_head *xmitq)
1796 unsigned long time_limit = jiffies + 2;
1797 struct sk_buff *skb;
1802 while (skb_queue_len(inputq)) {
1803 if (unlikely(time_after_eq(jiffies, time_limit)))
1806 skb = tipc_skb_dequeue(inputq, dport);
1810 /* Add message directly to receive queue if possible */
1811 if (!sock_owned_by_user(sk)) {
1812 filter_rcv(sk, skb, xmitq);
1816 /* Try backlog, compensating for double-counted bytes */
1817 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1818 if (!sk->sk_backlog.len)
1819 atomic_set(dcnt, 0);
1820 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1821 if (likely(!sk_add_backlog(sk, skb, lim)))
1824 /* Overload => reject message back to sender */
1825 onode = tipc_own_addr(sock_net(sk));
1826 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
1827 __skb_queue_tail(xmitq, skb);
1833 * tipc_sk_rcv - handle a chain of incoming buffers
1834 * @inputq: buffer list containing the buffers
1835 * Consumes all buffers in list until inputq is empty
1836 * Note: may be called in multiple threads referring to the same queue
1838 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1840 struct sk_buff_head xmitq;
1841 u32 dnode, dport = 0;
1843 struct tipc_sock *tsk;
1845 struct sk_buff *skb;
1847 __skb_queue_head_init(&xmitq);
1848 while (skb_queue_len(inputq)) {
1849 dport = tipc_skb_peek_port(inputq, dport);
1850 tsk = tipc_sk_lookup(net, dport);
1854 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1855 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1856 spin_unlock_bh(&sk->sk_lock.slock);
1858 /* Send pending response/rejected messages, if any */
1859 while ((skb = __skb_dequeue(&xmitq))) {
1860 dnode = msg_destnode(buf_msg(skb));
1861 tipc_node_xmit_skb(net, skb, dnode, dport);
1867 /* No destination socket => dequeue skb if still there */
1868 skb = tipc_skb_dequeue(inputq, dport);
1872 /* Try secondary lookup if unresolved named message */
1873 err = TIPC_ERR_NO_PORT;
1874 if (tipc_msg_lookup_dest(net, skb, &err))
1877 /* Prepare for message rejection */
1878 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1881 dnode = msg_destnode(buf_msg(skb));
1882 tipc_node_xmit_skb(net, skb, dnode, dport);
1886 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1888 struct sock *sk = sock->sk;
1893 int err = sock_error(sk);
1898 if (signal_pending(current))
1899 return sock_intr_errno(*timeo_p);
1901 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1902 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1903 finish_wait(sk_sleep(sk), &wait);
1909 * tipc_connect - establish a connection to another TIPC port
1910 * @sock: socket structure
1911 * @dest: socket address for destination port
1912 * @destlen: size of socket address data structure
1913 * @flags: file-related flags associated with socket
1915 * Returns 0 on success, errno otherwise
1917 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1918 int destlen, int flags)
1920 struct sock *sk = sock->sk;
1921 struct tipc_sock *tsk = tipc_sk(sk);
1922 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1923 struct msghdr m = {NULL,};
1924 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1925 socket_state previous;
1930 /* DGRAM/RDM connect(), just save the destaddr */
1931 if (sock->state == SS_READY) {
1932 if (dst->family == AF_UNSPEC) {
1933 memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc));
1935 } else if (destlen != sizeof(struct sockaddr_tipc)) {
1938 memcpy(&tsk->remote, dest, destlen);
1945 * Reject connection attempt using multicast address
1947 * Note: send_msg() validates the rest of the address fields,
1948 * so there's no need to do it here
1950 if (dst->addrtype == TIPC_ADDR_MCAST) {
1955 previous = sock->state;
1956 switch (sock->state) {
1957 case SS_UNCONNECTED:
1958 /* Send a 'SYN-' to destination */
1960 m.msg_namelen = destlen;
1962 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1963 * indicate send_msg() is never blocked.
1966 m.msg_flags = MSG_DONTWAIT;
1968 res = __tipc_sendmsg(sock, &m, 0);
1969 if ((res < 0) && (res != -EWOULDBLOCK))
1972 /* Just entered SS_CONNECTING state; the only
1973 * difference is that return value in non-blocking
1974 * case is EINPROGRESS, rather than EALREADY.
1978 if (previous == SS_CONNECTING)
1982 timeout = msecs_to_jiffies(timeout);
1983 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1984 res = tipc_wait_for_connect(sock, &timeout);
1999 * tipc_listen - allow socket to listen for incoming connections
2000 * @sock: socket structure
2003 * Returns 0 on success, errno otherwise
2005 static int tipc_listen(struct socket *sock, int len)
2007 struct sock *sk = sock->sk;
2012 if (sock->state != SS_UNCONNECTED)
2015 sock->state = SS_LISTENING;
2023 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2025 struct sock *sk = sock->sk;
2029 /* True wake-one mechanism for incoming connections: only
2030 * one process gets woken up, not the 'whole herd'.
2031 * Since we do not 'race & poll' for established sockets
2032 * anymore, the common case will execute the loop only once.
2035 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2036 TASK_INTERRUPTIBLE);
2037 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2039 timeo = schedule_timeout(timeo);
2043 if (!skb_queue_empty(&sk->sk_receive_queue))
2046 if (sock->state != SS_LISTENING)
2051 err = sock_intr_errno(timeo);
2052 if (signal_pending(current))
2055 finish_wait(sk_sleep(sk), &wait);
2060 * tipc_accept - wait for connection request
2061 * @sock: listening socket
2062 * @newsock: new socket that is to be connected
2063 * @flags: file-related flags associated with socket
2065 * Returns 0 on success, errno otherwise
2067 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2069 struct sock *new_sk, *sk = sock->sk;
2070 struct sk_buff *buf;
2071 struct tipc_sock *new_tsock;
2072 struct tipc_msg *msg;
2078 if (sock->state != SS_LISTENING) {
2082 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2083 res = tipc_wait_for_accept(sock, timeo);
2087 buf = skb_peek(&sk->sk_receive_queue);
2089 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
2092 security_sk_clone(sock->sk, new_sock->sk);
2094 new_sk = new_sock->sk;
2095 new_tsock = tipc_sk(new_sk);
2098 /* we lock on new_sk; but lockdep sees the lock on sk */
2099 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2102 * Reject any stray messages received by new socket
2103 * before the socket lock was taken (very, very unlikely)
2105 tsk_rej_rx_queue(new_sk);
2107 /* Connect new socket to it's peer */
2108 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2109 new_sock->state = SS_CONNECTED;
2111 tsk_set_importance(new_tsock, msg_importance(msg));
2112 if (msg_named(msg)) {
2113 new_tsock->conn_type = msg_nametype(msg);
2114 new_tsock->conn_instance = msg_nameinst(msg);
2118 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2119 * Respond to 'SYN+' by queuing it on new socket.
2121 if (!msg_data_sz(msg)) {
2122 struct msghdr m = {NULL,};
2124 tsk_advance_rx_queue(sk);
2125 __tipc_send_stream(new_sock, &m, 0);
2127 __skb_dequeue(&sk->sk_receive_queue);
2128 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2129 skb_set_owner_r(buf, new_sk);
2131 release_sock(new_sk);
2138 * tipc_shutdown - shutdown socket connection
2139 * @sock: socket structure
2140 * @how: direction to close (must be SHUT_RDWR)
2142 * Terminates connection (if necessary), then purges socket's receive queue.
2144 * Returns 0 on success, errno otherwise
2146 static int tipc_shutdown(struct socket *sock, int how)
2148 struct sock *sk = sock->sk;
2149 struct net *net = sock_net(sk);
2150 struct tipc_sock *tsk = tipc_sk(sk);
2151 struct sk_buff *skb;
2152 u32 dnode = tsk_peer_node(tsk);
2153 u32 dport = tsk_peer_port(tsk);
2154 u32 onode = tipc_own_addr(net);
2155 u32 oport = tsk->portid;
2158 if (how != SHUT_RDWR)
2163 switch (sock->state) {
2168 dnode = tsk_peer_node(tsk);
2170 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
2171 skb = __skb_dequeue(&sk->sk_receive_queue);
2173 if (TIPC_SKB_CB(skb)->handle != NULL) {
2177 tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
2179 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2180 TIPC_CONN_MSG, SHORT_H_SIZE,
2181 0, dnode, onode, dport, oport,
2182 TIPC_CONN_SHUTDOWN);
2184 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2187 sock->state = SS_DISCONNECTING;
2188 tipc_node_remove_conn(net, dnode, tsk->portid);
2191 case SS_DISCONNECTING:
2193 /* Discard any unreceived messages */
2194 __skb_queue_purge(&sk->sk_receive_queue);
2196 /* Wake up anyone sleeping in poll */
2197 sk->sk_state_change(sk);
2209 static void tipc_sk_timeout(unsigned long data)
2211 struct tipc_sock *tsk = (struct tipc_sock *)data;
2212 struct sock *sk = &tsk->sk;
2213 struct sk_buff *skb = NULL;
2214 u32 peer_port, peer_node;
2215 u32 own_node = tsk_own_node(tsk);
2218 if (!tsk->connected) {
2222 peer_port = tsk_peer_port(tsk);
2223 peer_node = tsk_peer_node(tsk);
2225 if (tsk->probing_state == TIPC_CONN_PROBING) {
2226 if (!sock_owned_by_user(sk)) {
2227 sk->sk_socket->state = SS_DISCONNECTING;
2229 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
2230 tsk_peer_port(tsk));
2231 sk->sk_state_change(sk);
2233 /* Try again later */
2234 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
2238 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
2239 INT_H_SIZE, 0, peer_node, own_node,
2240 peer_port, tsk->portid, TIPC_OK);
2241 tsk->probing_state = TIPC_CONN_PROBING;
2242 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
2246 tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2251 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2252 struct tipc_name_seq const *seq)
2254 struct net *net = sock_net(&tsk->sk);
2255 struct publication *publ;
2260 key = tsk->portid + tsk->pub_count + 1;
2261 if (key == tsk->portid)
2264 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2265 scope, tsk->portid, key);
2266 if (unlikely(!publ))
2269 list_add(&publ->pport_list, &tsk->publications);
2275 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2276 struct tipc_name_seq const *seq)
2278 struct net *net = sock_net(&tsk->sk);
2279 struct publication *publ;
2280 struct publication *safe;
2283 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2285 if (publ->scope != scope)
2287 if (publ->type != seq->type)
2289 if (publ->lower != seq->lower)
2291 if (publ->upper != seq->upper)
2293 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2294 publ->ref, publ->key);
2298 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2299 publ->ref, publ->key);
2302 if (list_empty(&tsk->publications))
2307 /* tipc_sk_reinit: set non-zero address in all existing sockets
2308 * when we go from standalone to network mode.
2310 void tipc_sk_reinit(struct net *net)
2312 struct tipc_net *tn = net_generic(net, tipc_net_id);
2313 const struct bucket_table *tbl;
2314 struct rhash_head *pos;
2315 struct tipc_sock *tsk;
2316 struct tipc_msg *msg;
2320 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2321 for (i = 0; i < tbl->size; i++) {
2322 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2323 spin_lock_bh(&tsk->sk.sk_lock.slock);
2325 msg_set_prevnode(msg, tn->own_addr);
2326 msg_set_orignode(msg, tn->own_addr);
2327 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2333 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2335 struct tipc_net *tn = net_generic(net, tipc_net_id);
2336 struct tipc_sock *tsk;
2339 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2341 sock_hold(&tsk->sk);
2347 static int tipc_sk_insert(struct tipc_sock *tsk)
2349 struct sock *sk = &tsk->sk;
2350 struct net *net = sock_net(sk);
2351 struct tipc_net *tn = net_generic(net, tipc_net_id);
2352 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2353 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2355 while (remaining--) {
2357 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2358 portid = TIPC_MIN_PORT;
2359 tsk->portid = portid;
2360 sock_hold(&tsk->sk);
2361 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2370 static void tipc_sk_remove(struct tipc_sock *tsk)
2372 struct sock *sk = &tsk->sk;
2373 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2375 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2376 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2381 static const struct rhashtable_params tsk_rht_params = {
2383 .head_offset = offsetof(struct tipc_sock, node),
2384 .key_offset = offsetof(struct tipc_sock, portid),
2385 .key_len = sizeof(u32), /* portid */
2386 .max_size = 1048576,
2388 .automatic_shrinking = true,
2391 int tipc_sk_rht_init(struct net *net)
2393 struct tipc_net *tn = net_generic(net, tipc_net_id);
2395 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2398 void tipc_sk_rht_destroy(struct net *net)
2400 struct tipc_net *tn = net_generic(net, tipc_net_id);
2402 /* Wait for socket readers to complete */
2405 rhashtable_destroy(&tn->sk_rht);
2409 * tipc_setsockopt - set socket option
2410 * @sock: socket structure
2411 * @lvl: option level
2412 * @opt: option identifier
2413 * @ov: pointer to new option value
2414 * @ol: length of option value
2416 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2417 * (to ease compatibility).
2419 * Returns 0 on success, errno otherwise
2421 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2422 char __user *ov, unsigned int ol)
2424 struct sock *sk = sock->sk;
2425 struct tipc_sock *tsk = tipc_sk(sk);
2429 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2431 if (lvl != SOL_TIPC)
2432 return -ENOPROTOOPT;
2433 if (ol < sizeof(value))
2435 res = get_user(value, (u32 __user *)ov);
2442 case TIPC_IMPORTANCE:
2443 res = tsk_set_importance(tsk, value);
2445 case TIPC_SRC_DROPPABLE:
2446 if (sock->type != SOCK_STREAM)
2447 tsk_set_unreliable(tsk, value);
2451 case TIPC_DEST_DROPPABLE:
2452 tsk_set_unreturnable(tsk, value);
2454 case TIPC_CONN_TIMEOUT:
2455 tipc_sk(sk)->conn_timeout = value;
2456 /* no need to set "res", since already 0 at this point */
2468 * tipc_getsockopt - get socket option
2469 * @sock: socket structure
2470 * @lvl: option level
2471 * @opt: option identifier
2472 * @ov: receptacle for option value
2473 * @ol: receptacle for length of option value
2475 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2476 * (to ease compatibility).
2478 * Returns 0 on success, errno otherwise
2480 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2481 char __user *ov, int __user *ol)
2483 struct sock *sk = sock->sk;
2484 struct tipc_sock *tsk = tipc_sk(sk);
2489 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2490 return put_user(0, ol);
2491 if (lvl != SOL_TIPC)
2492 return -ENOPROTOOPT;
2493 res = get_user(len, ol);
2500 case TIPC_IMPORTANCE:
2501 value = tsk_importance(tsk);
2503 case TIPC_SRC_DROPPABLE:
2504 value = tsk_unreliable(tsk);
2506 case TIPC_DEST_DROPPABLE:
2507 value = tsk_unreturnable(tsk);
2509 case TIPC_CONN_TIMEOUT:
2510 value = tsk->conn_timeout;
2511 /* no need to set "res", since already 0 at this point */
2513 case TIPC_NODE_RECVQ_DEPTH:
2514 value = 0; /* was tipc_queue_size, now obsolete */
2516 case TIPC_SOCK_RECVQ_DEPTH:
2517 value = skb_queue_len(&sk->sk_receive_queue);
2526 return res; /* "get" failed */
2528 if (len < sizeof(value))
2531 if (copy_to_user(ov, &value, sizeof(value)))
2534 return put_user(sizeof(value), ol);
2537 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2539 struct sock *sk = sock->sk;
2540 struct tipc_sioc_ln_req lnr;
2541 void __user *argp = (void __user *)arg;
2544 case SIOCGETLINKNAME:
2545 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2547 if (!tipc_node_get_linkname(sock_net(sk),
2548 lnr.bearer_id & 0xffff, lnr.peer,
2549 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2550 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2554 return -EADDRNOTAVAIL;
2556 return -ENOIOCTLCMD;
2560 /* Protocol switches for the various types of TIPC sockets */
2562 static const struct proto_ops msg_ops = {
2563 .owner = THIS_MODULE,
2565 .release = tipc_release,
2567 .connect = tipc_connect,
2568 .socketpair = sock_no_socketpair,
2569 .accept = sock_no_accept,
2570 .getname = tipc_getname,
2572 .ioctl = tipc_ioctl,
2573 .listen = sock_no_listen,
2574 .shutdown = tipc_shutdown,
2575 .setsockopt = tipc_setsockopt,
2576 .getsockopt = tipc_getsockopt,
2577 .sendmsg = tipc_sendmsg,
2578 .recvmsg = tipc_recvmsg,
2579 .mmap = sock_no_mmap,
2580 .sendpage = sock_no_sendpage
2583 static const struct proto_ops packet_ops = {
2584 .owner = THIS_MODULE,
2586 .release = tipc_release,
2588 .connect = tipc_connect,
2589 .socketpair = sock_no_socketpair,
2590 .accept = tipc_accept,
2591 .getname = tipc_getname,
2593 .ioctl = tipc_ioctl,
2594 .listen = tipc_listen,
2595 .shutdown = tipc_shutdown,
2596 .setsockopt = tipc_setsockopt,
2597 .getsockopt = tipc_getsockopt,
2598 .sendmsg = tipc_send_packet,
2599 .recvmsg = tipc_recvmsg,
2600 .mmap = sock_no_mmap,
2601 .sendpage = sock_no_sendpage
2604 static const struct proto_ops stream_ops = {
2605 .owner = THIS_MODULE,
2607 .release = tipc_release,
2609 .connect = tipc_connect,
2610 .socketpair = sock_no_socketpair,
2611 .accept = tipc_accept,
2612 .getname = tipc_getname,
2614 .ioctl = tipc_ioctl,
2615 .listen = tipc_listen,
2616 .shutdown = tipc_shutdown,
2617 .setsockopt = tipc_setsockopt,
2618 .getsockopt = tipc_getsockopt,
2619 .sendmsg = tipc_send_stream,
2620 .recvmsg = tipc_recv_stream,
2621 .mmap = sock_no_mmap,
2622 .sendpage = sock_no_sendpage
2625 static const struct net_proto_family tipc_family_ops = {
2626 .owner = THIS_MODULE,
2628 .create = tipc_sk_create
2631 static struct proto tipc_proto = {
2633 .owner = THIS_MODULE,
2634 .obj_size = sizeof(struct tipc_sock),
2635 .sysctl_rmem = sysctl_tipc_rmem
2639 * tipc_socket_init - initialize TIPC socket interface
2641 * Returns 0 on success, errno otherwise
2643 int tipc_socket_init(void)
2647 res = proto_register(&tipc_proto, 1);
2649 pr_err("Failed to register TIPC protocol type\n");
2653 res = sock_register(&tipc_family_ops);
2655 pr_err("Failed to register TIPC socket type\n");
2656 proto_unregister(&tipc_proto);
2664 * tipc_socket_stop - stop TIPC socket interface
2666 void tipc_socket_stop(void)
2668 sock_unregister(tipc_family_ops.family);
2669 proto_unregister(&tipc_proto);
2672 /* Caller should hold socket lock for the passed tipc socket. */
2673 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2677 struct nlattr *nest;
2679 peer_node = tsk_peer_node(tsk);
2680 peer_port = tsk_peer_port(tsk);
2682 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2684 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2686 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2689 if (tsk->conn_type != 0) {
2690 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2692 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2694 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2697 nla_nest_end(skb, nest);
2702 nla_nest_cancel(skb, nest);
2707 /* Caller should hold socket lock for the passed tipc socket. */
2708 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2709 struct tipc_sock *tsk)
2713 struct nlattr *attrs;
2714 struct net *net = sock_net(skb->sk);
2715 struct tipc_net *tn = net_generic(net, tipc_net_id);
2717 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2718 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2722 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2724 goto genlmsg_cancel;
2725 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2726 goto attr_msg_cancel;
2727 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2728 goto attr_msg_cancel;
2730 if (tsk->connected) {
2731 err = __tipc_nl_add_sk_con(skb, tsk);
2733 goto attr_msg_cancel;
2734 } else if (!list_empty(&tsk->publications)) {
2735 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2736 goto attr_msg_cancel;
2738 nla_nest_end(skb, attrs);
2739 genlmsg_end(skb, hdr);
2744 nla_nest_cancel(skb, attrs);
2746 genlmsg_cancel(skb, hdr);
2751 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2754 struct tipc_sock *tsk;
2755 const struct bucket_table *tbl;
2756 struct rhash_head *pos;
2757 struct net *net = sock_net(skb->sk);
2758 struct tipc_net *tn = net_generic(net, tipc_net_id);
2759 u32 tbl_id = cb->args[0];
2760 u32 prev_portid = cb->args[1];
2763 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2764 for (; tbl_id < tbl->size; tbl_id++) {
2765 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2766 spin_lock_bh(&tsk->sk.sk_lock.slock);
2767 if (prev_portid && prev_portid != tsk->portid) {
2768 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2772 err = __tipc_nl_add_sk(skb, cb, tsk);
2774 prev_portid = tsk->portid;
2775 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2779 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2784 cb->args[0] = tbl_id;
2785 cb->args[1] = prev_portid;
2790 /* Caller should hold socket lock for the passed tipc socket. */
2791 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2792 struct netlink_callback *cb,
2793 struct publication *publ)
2796 struct nlattr *attrs;
2798 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2799 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2803 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2805 goto genlmsg_cancel;
2807 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2808 goto attr_msg_cancel;
2809 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2810 goto attr_msg_cancel;
2811 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2812 goto attr_msg_cancel;
2813 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2814 goto attr_msg_cancel;
2816 nla_nest_end(skb, attrs);
2817 genlmsg_end(skb, hdr);
2822 nla_nest_cancel(skb, attrs);
2824 genlmsg_cancel(skb, hdr);
2829 /* Caller should hold socket lock for the passed tipc socket. */
2830 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2831 struct netlink_callback *cb,
2832 struct tipc_sock *tsk, u32 *last_publ)
2835 struct publication *p;
2838 list_for_each_entry(p, &tsk->publications, pport_list) {
2839 if (p->key == *last_publ)
2842 if (p->key != *last_publ) {
2843 /* We never set seq or call nl_dump_check_consistent()
2844 * this means that setting prev_seq here will cause the
2845 * consistence check to fail in the netlink callback
2846 * handler. Resulting in the last NLMSG_DONE message
2847 * having the NLM_F_DUMP_INTR flag set.
2854 p = list_first_entry(&tsk->publications, struct publication,
2858 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2859 err = __tipc_nl_add_sk_publ(skb, cb, p);
2861 *last_publ = p->key;
2870 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2873 u32 tsk_portid = cb->args[0];
2874 u32 last_publ = cb->args[1];
2875 u32 done = cb->args[2];
2876 struct net *net = sock_net(skb->sk);
2877 struct tipc_sock *tsk;
2880 struct nlattr **attrs;
2881 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2883 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2887 if (!attrs[TIPC_NLA_SOCK])
2890 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2891 attrs[TIPC_NLA_SOCK],
2892 tipc_nl_sock_policy);
2896 if (!sock[TIPC_NLA_SOCK_REF])
2899 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2905 tsk = tipc_sk_lookup(net, tsk_portid);
2909 lock_sock(&tsk->sk);
2910 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2913 release_sock(&tsk->sk);
2916 cb->args[0] = tsk_portid;
2917 cb->args[1] = last_publ;