2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
38 #include <net/inet6_hashtables.h>
39 #include <net/addrconf.h>
44 #define RDS_CONNECTION_HASH_BITS 12
45 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
46 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
48 /* converting this to RCU is a chore for another day.. */
49 static DEFINE_SPINLOCK(rds_conn_lock);
50 static unsigned long rds_conn_count;
51 static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
52 static struct kmem_cache *rds_conn_slab;
54 static struct hlist_head *rds_conn_bucket(const struct in6_addr *laddr,
55 const struct in6_addr *faddr)
57 static u32 rds6_hash_secret __read_mostly;
58 static u32 rds_hash_secret __read_mostly;
60 u32 lhash, fhash, hash;
62 net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
63 net_get_random_once(&rds6_hash_secret, sizeof(rds6_hash_secret));
65 lhash = (__force u32)laddr->s6_addr32[3];
66 #if IS_ENABLED(CONFIG_IPV6)
67 fhash = __ipv6_addr_jhash(faddr, rds6_hash_secret);
69 fhash = (__force u32)faddr->s6_addr32[3];
71 hash = __inet_ehashfn(lhash, 0, fhash, 0, rds_hash_secret);
73 return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
76 #define rds_conn_info_set(var, test, suffix) do { \
78 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
81 /* rcu read lock must be held or the connection spinlock */
82 static struct rds_connection *rds_conn_lookup(struct net *net,
83 struct hlist_head *head,
84 const struct in6_addr *laddr,
85 const struct in6_addr *faddr,
86 struct rds_transport *trans,
89 struct rds_connection *conn, *ret = NULL;
91 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
92 if (ipv6_addr_equal(&conn->c_faddr, faddr) &&
93 ipv6_addr_equal(&conn->c_laddr, laddr) &&
94 conn->c_trans == trans &&
96 net == rds_conn_net(conn) &&
97 conn->c_dev_if == dev_if) {
102 rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret,
108 * This is called by transports as they're bringing down a connection.
109 * It clears partial message state so that the transport can start sending
110 * and receiving over this connection again in the future. It is up to
111 * the transport to have serialized this call with its send and recv.
113 static void rds_conn_path_reset(struct rds_conn_path *cp)
115 struct rds_connection *conn = cp->cp_conn;
117 rdsdebug("connection %pI6c to %pI6c reset\n",
118 &conn->c_laddr, &conn->c_faddr);
120 rds_stats_inc(s_conn_reset);
121 rds_send_path_reset(cp);
124 /* Do not clear next_rx_seq here, else we cannot distinguish
125 * retransmitted packets from new packets, and will hand all
126 * of them to the application. That is not consistent with the
127 * reliability guarantees of RDS. */
130 static void __rds_conn_path_init(struct rds_connection *conn,
131 struct rds_conn_path *cp, bool is_outgoing)
133 spin_lock_init(&cp->cp_lock);
134 cp->cp_next_tx_seq = 1;
135 init_waitqueue_head(&cp->cp_waitq);
136 INIT_LIST_HEAD(&cp->cp_send_queue);
137 INIT_LIST_HEAD(&cp->cp_retrans);
140 atomic_set(&cp->cp_state, RDS_CONN_DOWN);
142 cp->cp_reconnect_jiffies = 0;
143 cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION;
144 INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
145 INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
146 INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
147 INIT_WORK(&cp->cp_down_w, rds_shutdown_worker);
148 mutex_init(&cp->cp_cm_lock);
153 * There is only every one 'conn' for a given pair of addresses in the
154 * system at a time. They contain messages to be retransmitted and so
155 * span the lifetime of the actual underlying transport connections.
157 * For now they are not garbage collected once they're created. They
158 * are torn down as the module is removed, if ever.
160 static struct rds_connection *__rds_conn_create(struct net *net,
161 const struct in6_addr *laddr,
162 const struct in6_addr *faddr,
163 struct rds_transport *trans,
168 struct rds_connection *conn, *parent = NULL;
169 struct hlist_head *head = rds_conn_bucket(laddr, faddr);
170 struct rds_transport *loop_trans;
173 int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
176 conn = rds_conn_lookup(net, head, laddr, faddr, trans, tos, dev_if);
179 conn->c_trans != &rds_loop_transport &&
180 ipv6_addr_equal(laddr, faddr) &&
182 /* This is a looped back IB connection, and we're
183 * called by the code handling the incoming connect.
184 * We need a second connection object into which we
185 * can stick the other QP. */
187 conn = parent->c_passive;
193 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
195 conn = ERR_PTR(-ENOMEM);
198 conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp);
200 kmem_cache_free(rds_conn_slab, conn);
201 conn = ERR_PTR(-ENOMEM);
205 INIT_HLIST_NODE(&conn->c_hash_node);
206 conn->c_laddr = *laddr;
207 conn->c_isv6 = !ipv6_addr_v4mapped(laddr);
208 conn->c_faddr = *faddr;
209 conn->c_dev_if = dev_if;
212 #if IS_ENABLED(CONFIG_IPV6)
213 /* If the local address is link local, set c_bound_if to be the
214 * index used for this connection. Otherwise, set it to 0 as
215 * the socket is not bound to an interface. c_bound_if is used
216 * to look up a socket when a packet is received
218 if (ipv6_addr_type(laddr) & IPV6_ADDR_LINKLOCAL)
219 conn->c_bound_if = dev_if;
222 conn->c_bound_if = 0;
224 rds_conn_net_set(conn, net);
226 ret = rds_cong_get_maps(conn);
229 kmem_cache_free(rds_conn_slab, conn);
235 * This is where a connection becomes loopback. If *any* RDS sockets
236 * can bind to the destination address then we'd rather the messages
237 * flow through loopback rather than either transport.
239 loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if);
241 rds_trans_put(loop_trans);
242 conn->c_loopback = 1;
243 if (trans->t_prefer_loopback) {
244 if (likely(is_outgoing)) {
245 /* "outgoing" connection to local address.
246 * Protocol says it wants the connection
247 * handled by the loopback transport.
248 * This is what TCP does.
250 trans = &rds_loop_transport;
252 /* No transport currently in use
253 * should end up here, but if it
254 * does, reset/destroy the connection.
256 kmem_cache_free(rds_conn_slab, conn);
257 conn = ERR_PTR(-EOPNOTSUPP);
263 conn->c_trans = trans;
265 init_waitqueue_head(&conn->c_hs_waitq);
266 for (i = 0; i < npaths; i++) {
267 __rds_conn_path_init(conn, &conn->c_path[i],
269 conn->c_path[i].cp_index = i;
272 if (rds_destroy_pending(conn))
275 ret = trans->conn_alloc(conn, GFP_ATOMIC);
279 kmem_cache_free(rds_conn_slab, conn);
284 rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n",
286 strnlen(trans->t_name, sizeof(trans->t_name)) ?
287 trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : "");
290 * Since we ran without holding the conn lock, someone could
291 * have created the same conn (either normal or passive) in the
292 * interim. We check while holding the lock. If we won, we complete
293 * init and return our conn. If we lost, we rollback and return the
296 spin_lock_irqsave(&rds_conn_lock, flags);
298 /* Creating passive conn */
299 if (parent->c_passive) {
300 trans->conn_free(conn->c_path[0].cp_transport_data);
302 kmem_cache_free(rds_conn_slab, conn);
303 conn = parent->c_passive;
305 parent->c_passive = conn;
306 rds_cong_add_conn(conn);
310 /* Creating normal conn */
311 struct rds_connection *found;
313 found = rds_conn_lookup(net, head, laddr, faddr, trans,
316 struct rds_conn_path *cp;
319 for (i = 0; i < npaths; i++) {
320 cp = &conn->c_path[i];
321 /* The ->conn_alloc invocation may have
322 * allocated resource for all paths, so all
323 * of them may have to be freed here.
325 if (cp->cp_transport_data)
326 trans->conn_free(cp->cp_transport_data);
329 kmem_cache_free(rds_conn_slab, conn);
332 conn->c_my_gen_num = rds_gen_num;
333 conn->c_peer_gen_num = 0;
334 hlist_add_head_rcu(&conn->c_hash_node, head);
335 rds_cong_add_conn(conn);
339 spin_unlock_irqrestore(&rds_conn_lock, flags);
346 struct rds_connection *rds_conn_create(struct net *net,
347 const struct in6_addr *laddr,
348 const struct in6_addr *faddr,
349 struct rds_transport *trans, u8 tos,
350 gfp_t gfp, int dev_if)
352 return __rds_conn_create(net, laddr, faddr, trans, gfp, tos, 0, dev_if);
354 EXPORT_SYMBOL_GPL(rds_conn_create);
356 struct rds_connection *rds_conn_create_outgoing(struct net *net,
357 const struct in6_addr *laddr,
358 const struct in6_addr *faddr,
359 struct rds_transport *trans,
360 u8 tos, gfp_t gfp, int dev_if)
362 return __rds_conn_create(net, laddr, faddr, trans, gfp, tos, 1, dev_if);
364 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
366 void rds_conn_shutdown(struct rds_conn_path *cp)
368 struct rds_connection *conn = cp->cp_conn;
370 /* shut it down unless it's down already */
371 if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
373 * Quiesce the connection mgmt handlers before we start tearing
374 * things down. We don't hold the mutex for the entire
375 * duration of the shutdown operation, else we may be
376 * deadlocking with the CM handler. Instead, the CM event
377 * handler is supposed to check for state DISCONNECTING
379 mutex_lock(&cp->cp_cm_lock);
380 if (!rds_conn_path_transition(cp, RDS_CONN_UP,
381 RDS_CONN_DISCONNECTING) &&
382 !rds_conn_path_transition(cp, RDS_CONN_ERROR,
383 RDS_CONN_DISCONNECTING)) {
384 rds_conn_path_error(cp,
385 "shutdown called in state %d\n",
386 atomic_read(&cp->cp_state));
387 mutex_unlock(&cp->cp_cm_lock);
390 mutex_unlock(&cp->cp_cm_lock);
392 wait_event(cp->cp_waitq,
393 !test_bit(RDS_IN_XMIT, &cp->cp_flags));
394 wait_event(cp->cp_waitq,
395 !test_bit(RDS_RECV_REFILL, &cp->cp_flags));
397 conn->c_trans->conn_path_shutdown(cp);
398 rds_conn_path_reset(cp);
400 if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
402 !rds_conn_path_transition(cp, RDS_CONN_ERROR,
404 /* This can happen - eg when we're in the middle of tearing
405 * down the connection, and someone unloads the rds module.
406 * Quite reproducible with loopback connections.
409 * Note that this also happens with rds-tcp because
410 * we could have triggered rds_conn_path_drop in irq
411 * mode from rds_tcp_state change on the receipt of
412 * a FIN, thus we need to recheck for RDS_CONN_ERROR
415 rds_conn_path_error(cp, "%s: failed to transition "
416 "to state DOWN, current state "
418 atomic_read(&cp->cp_state));
423 /* Then reconnect if it's still live.
424 * The passive side of an IB loopback connection is never added
425 * to the conn hash, so we never trigger a reconnect on this
426 * conn - the reconnect is always triggered by the active peer. */
427 cancel_delayed_work_sync(&cp->cp_conn_w);
429 if (!hlist_unhashed(&conn->c_hash_node)) {
431 rds_queue_reconnect(cp);
437 /* destroy a single rds_conn_path. rds_conn_destroy() iterates over
438 * all paths using rds_conn_path_destroy()
440 static void rds_conn_path_destroy(struct rds_conn_path *cp)
442 struct rds_message *rm, *rtmp;
444 if (!cp->cp_transport_data)
447 /* make sure lingering queued work won't try to ref the conn */
448 cancel_delayed_work_sync(&cp->cp_send_w);
449 cancel_delayed_work_sync(&cp->cp_recv_w);
451 rds_conn_path_drop(cp, true);
452 flush_work(&cp->cp_down_w);
454 /* tear down queued messages */
455 list_for_each_entry_safe(rm, rtmp,
458 list_del_init(&rm->m_conn_item);
459 BUG_ON(!list_empty(&rm->m_sock_item));
463 rds_message_put(cp->cp_xmit_rm);
465 WARN_ON(delayed_work_pending(&cp->cp_send_w));
466 WARN_ON(delayed_work_pending(&cp->cp_recv_w));
467 WARN_ON(delayed_work_pending(&cp->cp_conn_w));
468 WARN_ON(work_pending(&cp->cp_down_w));
470 cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
474 * Stop and free a connection.
476 * This can only be used in very limited circumstances. It assumes that once
477 * the conn has been shutdown that no one else is referencing the connection.
478 * We can only ensure this in the rmmod path in the current code.
480 void rds_conn_destroy(struct rds_connection *conn)
484 struct rds_conn_path *cp;
485 int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
487 rdsdebug("freeing conn %p for %pI4 -> "
488 "%pI4\n", conn, &conn->c_laddr,
491 /* Ensure conn will not be scheduled for reconnect */
492 spin_lock_irq(&rds_conn_lock);
493 hlist_del_init_rcu(&conn->c_hash_node);
494 spin_unlock_irq(&rds_conn_lock);
497 /* shut the connection down */
498 for (i = 0; i < npaths; i++) {
499 cp = &conn->c_path[i];
500 rds_conn_path_destroy(cp);
501 BUG_ON(!list_empty(&cp->cp_retrans));
505 * The congestion maps aren't freed up here. They're
506 * freed by rds_cong_exit() after all the connections
509 rds_cong_remove_conn(conn);
512 kmem_cache_free(rds_conn_slab, conn);
514 spin_lock_irqsave(&rds_conn_lock, flags);
516 spin_unlock_irqrestore(&rds_conn_lock, flags);
518 EXPORT_SYMBOL_GPL(rds_conn_destroy);
520 static void __rds_inc_msg_cp(struct rds_incoming *inc,
521 struct rds_info_iterator *iter,
522 void *saddr, void *daddr, int flip, bool isv6)
524 #if IS_ENABLED(CONFIG_IPV6)
526 rds6_inc_info_copy(inc, iter, saddr, daddr, flip);
529 rds_inc_info_copy(inc, iter, *(__be32 *)saddr,
530 *(__be32 *)daddr, flip);
533 static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len,
534 struct rds_info_iterator *iter,
535 struct rds_info_lengths *lens,
536 int want_send, bool isv6)
538 struct hlist_head *head;
539 struct list_head *list;
540 struct rds_connection *conn;
541 struct rds_message *rm;
542 unsigned int total = 0;
548 len /= sizeof(struct rds6_info_message);
550 len /= sizeof(struct rds_info_message);
554 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
556 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
557 struct rds_conn_path *cp;
560 if (!isv6 && conn->c_isv6)
563 npaths = (conn->c_trans->t_mp_capable ?
564 RDS_MPATH_WORKERS : 1);
566 for (j = 0; j < npaths; j++) {
567 cp = &conn->c_path[j];
569 list = &cp->cp_send_queue;
571 list = &cp->cp_retrans;
573 spin_lock_irqsave(&cp->cp_lock, flags);
575 /* XXX too lazy to maintain counts.. */
576 list_for_each_entry(rm, list, m_conn_item) {
579 __rds_inc_msg_cp(&rm->m_inc,
586 spin_unlock_irqrestore(&cp->cp_lock, flags);
594 lens->each = sizeof(struct rds6_info_message);
596 lens->each = sizeof(struct rds_info_message);
599 static void rds_conn_message_info(struct socket *sock, unsigned int len,
600 struct rds_info_iterator *iter,
601 struct rds_info_lengths *lens,
604 rds_conn_message_info_cmn(sock, len, iter, lens, want_send, false);
607 #if IS_ENABLED(CONFIG_IPV6)
608 static void rds6_conn_message_info(struct socket *sock, unsigned int len,
609 struct rds_info_iterator *iter,
610 struct rds_info_lengths *lens,
613 rds_conn_message_info_cmn(sock, len, iter, lens, want_send, true);
617 static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
618 struct rds_info_iterator *iter,
619 struct rds_info_lengths *lens)
621 rds_conn_message_info(sock, len, iter, lens, 1);
624 #if IS_ENABLED(CONFIG_IPV6)
625 static void rds6_conn_message_info_send(struct socket *sock, unsigned int len,
626 struct rds_info_iterator *iter,
627 struct rds_info_lengths *lens)
629 rds6_conn_message_info(sock, len, iter, lens, 1);
633 static void rds_conn_message_info_retrans(struct socket *sock,
635 struct rds_info_iterator *iter,
636 struct rds_info_lengths *lens)
638 rds_conn_message_info(sock, len, iter, lens, 0);
641 #if IS_ENABLED(CONFIG_IPV6)
642 static void rds6_conn_message_info_retrans(struct socket *sock,
644 struct rds_info_iterator *iter,
645 struct rds_info_lengths *lens)
647 rds6_conn_message_info(sock, len, iter, lens, 0);
651 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
652 struct rds_info_iterator *iter,
653 struct rds_info_lengths *lens,
654 int (*visitor)(struct rds_connection *, void *),
658 struct hlist_head *head;
659 struct rds_connection *conn;
665 lens->each = item_len;
667 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
669 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
671 /* XXX no c_lock usage.. */
672 if (!visitor(conn, buffer))
675 /* We copy as much as we can fit in the buffer,
676 * but we count all items so that the caller
677 * can resize the buffer. */
678 if (len >= item_len) {
679 rds_info_copy(iter, buffer, item_len);
687 EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
689 static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
690 struct rds_info_iterator *iter,
691 struct rds_info_lengths *lens,
692 int (*visitor)(struct rds_conn_path *, void *),
696 struct hlist_head *head;
697 struct rds_connection *conn;
703 lens->each = item_len;
705 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
707 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
708 struct rds_conn_path *cp;
710 /* XXX We only copy the information from the first
711 * path for now. The problem is that if there are
712 * more than one underlying paths, we cannot report
713 * information of all of them using the existing
714 * API. For example, there is only one next_tx_seq,
715 * which path's next_tx_seq should we report? It is
716 * a bug in the design of MPRDS.
720 /* XXX no cp_lock usage.. */
721 if (!visitor(cp, buffer))
724 /* We copy as much as we can fit in the buffer,
725 * but we count all items so that the caller
726 * can resize the buffer.
728 if (len >= item_len) {
729 rds_info_copy(iter, buffer, item_len);
738 static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
740 struct rds_info_connection *cinfo = buffer;
741 struct rds_connection *conn = cp->cp_conn;
746 cinfo->next_tx_seq = cp->cp_next_tx_seq;
747 cinfo->next_rx_seq = cp->cp_next_rx_seq;
748 cinfo->laddr = conn->c_laddr.s6_addr32[3];
749 cinfo->faddr = conn->c_faddr.s6_addr32[3];
750 cinfo->tos = conn->c_tos;
751 strncpy(cinfo->transport, conn->c_trans->t_name,
752 sizeof(cinfo->transport));
755 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
757 /* XXX Future: return the state rather than these funky bits */
758 rds_conn_info_set(cinfo->flags,
759 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
761 rds_conn_info_set(cinfo->flags,
762 atomic_read(&cp->cp_state) == RDS_CONN_UP,
767 #if IS_ENABLED(CONFIG_IPV6)
768 static int rds6_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
770 struct rds6_info_connection *cinfo6 = buffer;
771 struct rds_connection *conn = cp->cp_conn;
773 cinfo6->next_tx_seq = cp->cp_next_tx_seq;
774 cinfo6->next_rx_seq = cp->cp_next_rx_seq;
775 cinfo6->laddr = conn->c_laddr;
776 cinfo6->faddr = conn->c_faddr;
777 strncpy(cinfo6->transport, conn->c_trans->t_name,
778 sizeof(cinfo6->transport));
781 rds_conn_info_set(cinfo6->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
783 /* XXX Future: return the state rather than these funky bits */
784 rds_conn_info_set(cinfo6->flags,
785 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
787 rds_conn_info_set(cinfo6->flags,
788 atomic_read(&cp->cp_state) == RDS_CONN_UP,
790 /* Just return 1 as there is no error case. This is a helper function
791 * for rds_walk_conn_path_info() and it wants a return value.
797 static void rds_conn_info(struct socket *sock, unsigned int len,
798 struct rds_info_iterator *iter,
799 struct rds_info_lengths *lens)
801 u64 buffer[(sizeof(struct rds_info_connection) + 7) / 8];
803 rds_walk_conn_path_info(sock, len, iter, lens,
804 rds_conn_info_visitor,
806 sizeof(struct rds_info_connection));
809 #if IS_ENABLED(CONFIG_IPV6)
810 static void rds6_conn_info(struct socket *sock, unsigned int len,
811 struct rds_info_iterator *iter,
812 struct rds_info_lengths *lens)
814 u64 buffer[(sizeof(struct rds6_info_connection) + 7) / 8];
816 rds_walk_conn_path_info(sock, len, iter, lens,
817 rds6_conn_info_visitor,
819 sizeof(struct rds6_info_connection));
823 int rds_conn_init(void)
827 ret = rds_loop_net_init(); /* register pernet callback */
831 rds_conn_slab = kmem_cache_create("rds_connection",
832 sizeof(struct rds_connection),
834 if (!rds_conn_slab) {
839 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
840 rds_info_register_func(RDS_INFO_SEND_MESSAGES,
841 rds_conn_message_info_send);
842 rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
843 rds_conn_message_info_retrans);
844 #if IS_ENABLED(CONFIG_IPV6)
845 rds_info_register_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
846 rds_info_register_func(RDS6_INFO_SEND_MESSAGES,
847 rds6_conn_message_info_send);
848 rds_info_register_func(RDS6_INFO_RETRANS_MESSAGES,
849 rds6_conn_message_info_retrans);
854 void rds_conn_exit(void)
856 rds_loop_net_exit(); /* unregister pernet callback */
859 WARN_ON(!hlist_empty(rds_conn_hash));
861 kmem_cache_destroy(rds_conn_slab);
863 rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
864 rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
865 rds_conn_message_info_send);
866 rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
867 rds_conn_message_info_retrans);
868 #if IS_ENABLED(CONFIG_IPV6)
869 rds_info_deregister_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
870 rds_info_deregister_func(RDS6_INFO_SEND_MESSAGES,
871 rds6_conn_message_info_send);
872 rds_info_deregister_func(RDS6_INFO_RETRANS_MESSAGES,
873 rds6_conn_message_info_retrans);
880 void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
882 atomic_set(&cp->cp_state, RDS_CONN_ERROR);
885 if (!destroy && rds_destroy_pending(cp->cp_conn)) {
889 queue_work(rds_wq, &cp->cp_down_w);
892 EXPORT_SYMBOL_GPL(rds_conn_path_drop);
894 void rds_conn_drop(struct rds_connection *conn)
896 WARN_ON(conn->c_trans->t_mp_capable);
897 rds_conn_path_drop(&conn->c_path[0], false);
899 EXPORT_SYMBOL_GPL(rds_conn_drop);
902 * If the connection is down, trigger a connect. We may have scheduled a
903 * delayed reconnect however - in this case we should not interfere.
905 void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
908 if (rds_destroy_pending(cp->cp_conn)) {
912 if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
913 !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
914 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
917 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
919 /* Check connectivity of all paths
921 void rds_check_all_paths(struct rds_connection *conn)
926 rds_conn_path_connect_if_down(&conn->c_path[i]);
927 } while (++i < conn->c_npaths);
930 void rds_conn_connect_if_down(struct rds_connection *conn)
932 WARN_ON(conn->c_trans->t_mp_capable);
933 rds_conn_path_connect_if_down(&conn->c_path[0]);
935 EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
938 __rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...)
946 rds_conn_path_drop(cp, false);