1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
15 * This is the "low-level" comms layer.
17 * It is responsible for sending/receiving messages
18 * from other nodes in the cluster.
20 * Cluster nodes are referred to by their nodeids. nodeids are
21 * simply 32 bit numbers to the locking module - if they need to
22 * be expanded for the cluster infrastructure then that is its
23 * responsibility. It is this layer's
24 * responsibility to resolve these into IP address or
25 * whatever it needs for inter-node communication.
27 * The comms level is two kernel threads that deal mainly with
28 * the receiving of messages from other nodes and passing them
29 * up to the mid-level comms layer (which understands the
30 * message format) for execution by the locking core, and
31 * a send thread which does all the setting up of connections
32 * to remote nodes and the sending of data. Threads are not allowed
33 * to send their own data because it may cause them to wait in times
34 * of high load. Also, this way, the sending thread can collect together
35 * messages bound for one node and send them in one block.
37 * lowcomms will choose to use either TCP or SCTP as its transport layer
38 * depending on the configuration variable 'protocol'. This should be set
39 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
40 * cluster-wide mechanism as it must be the same on all nodes of the cluster
41 * for the DLM to function.
45 #include <asm/ioctls.h>
48 #include <linux/pagemap.h>
49 #include <linux/file.h>
50 #include <linux/mutex.h>
51 #include <linux/sctp.h>
52 #include <linux/slab.h>
53 #include <net/sctp/sctp.h>
56 #include "dlm_internal.h"
61 #define NEEDED_RMEM (4*1024*1024)
63 /* Number of messages to send before rescheduling */
64 #define MAX_SEND_MSG_COUNT 25
65 #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(10000)
68 struct socket *sock; /* NULL if not connected */
69 uint32_t nodeid; /* So we know who we are in the list */
70 struct mutex sock_mutex;
72 #define CF_READ_PENDING 1
73 #define CF_WRITE_PENDING 2
74 #define CF_INIT_PENDING 4
75 #define CF_IS_OTHERCON 5
77 #define CF_APP_LIMITED 7
80 #define CF_CONNECTED 10
81 #define CF_RECONNECT 11
82 #define CF_DELAY_CONNECT 12
84 struct list_head writequeue; /* List of outgoing writequeue_entries */
85 spinlock_t writequeue_lock;
86 atomic_t writequeue_cnt;
87 struct mutex wq_alloc;
89 #define MAX_CONNECT_RETRIES 3
90 struct hlist_node list;
91 struct connection *othercon;
92 struct connection *sendcon;
93 struct work_struct rwork; /* Receive workqueue */
94 struct work_struct swork; /* Send workqueue */
95 wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
96 unsigned char *rx_buf;
101 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
103 struct listen_connection {
105 struct work_struct rwork;
108 #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end)
109 #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset)
111 /* An entry waiting to be sent */
112 struct writequeue_entry {
113 struct list_head list;
120 struct connection *con;
121 struct list_head msgs;
126 struct writequeue_entry *entry;
127 struct dlm_msg *orig_msg;
131 int idx; /* new()/commit() idx exchange */
133 struct list_head list;
137 struct dlm_node_addr {
138 struct list_head list;
143 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
146 struct dlm_proto_ops {
151 int (*connect)(struct connection *con, struct socket *sock,
152 struct sockaddr *addr, int addr_len);
153 void (*sockopts)(struct socket *sock);
154 int (*bind)(struct socket *sock);
155 int (*listen_validate)(void);
156 void (*listen_sockopts)(struct socket *sock);
157 int (*listen_bind)(struct socket *sock);
158 /* What to do to shutdown */
159 void (*shutdown_action)(struct connection *con);
160 /* What to do to eof check */
161 bool (*eof_condition)(struct connection *con);
164 static struct listen_sock_callbacks {
165 void (*sk_error_report)(struct sock *);
166 void (*sk_data_ready)(struct sock *);
167 void (*sk_state_change)(struct sock *);
168 void (*sk_write_space)(struct sock *);
171 static LIST_HEAD(dlm_node_addrs);
172 static DEFINE_SPINLOCK(dlm_node_addrs_spin);
174 static struct listen_connection listen_con;
175 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
176 static int dlm_local_count;
180 static struct workqueue_struct *recv_workqueue;
181 static struct workqueue_struct *send_workqueue;
183 static struct hlist_head connection_hash[CONN_HASH_SIZE];
184 static DEFINE_SPINLOCK(connections_lock);
185 DEFINE_STATIC_SRCU(connections_srcu);
187 static const struct dlm_proto_ops *dlm_proto_ops;
189 static void process_recv_sockets(struct work_struct *work);
190 static void process_send_sockets(struct work_struct *work);
192 /* need to held writequeue_lock */
193 static struct writequeue_entry *con_next_wq(struct connection *con)
195 struct writequeue_entry *e;
197 if (list_empty(&con->writequeue))
200 e = list_first_entry(&con->writequeue, struct writequeue_entry,
208 static struct connection *__find_con(int nodeid, int r)
210 struct connection *con;
212 hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
213 if (con->nodeid == nodeid)
220 static bool tcp_eof_condition(struct connection *con)
222 return atomic_read(&con->writequeue_cnt);
225 static int dlm_con_init(struct connection *con, int nodeid)
227 con->rx_buflen = dlm_config.ci_buffer_size;
228 con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS);
232 con->nodeid = nodeid;
233 mutex_init(&con->sock_mutex);
234 INIT_LIST_HEAD(&con->writequeue);
235 spin_lock_init(&con->writequeue_lock);
236 atomic_set(&con->writequeue_cnt, 0);
237 INIT_WORK(&con->swork, process_send_sockets);
238 INIT_WORK(&con->rwork, process_recv_sockets);
239 init_waitqueue_head(&con->shutdown_wait);
245 * If 'allocation' is zero then we don't attempt to create a new
246 * connection structure for this node.
248 static struct connection *nodeid2con(int nodeid, gfp_t alloc)
250 struct connection *con, *tmp;
253 r = nodeid_hash(nodeid);
254 con = __find_con(nodeid, r);
258 con = kzalloc(sizeof(*con), alloc);
262 ret = dlm_con_init(con, nodeid);
268 mutex_init(&con->wq_alloc);
270 spin_lock(&connections_lock);
271 /* Because multiple workqueues/threads calls this function it can
272 * race on multiple cpu's. Instead of locking hot path __find_con()
273 * we just check in rare cases of recently added nodes again
274 * under protection of connections_lock. If this is the case we
275 * abort our connection creation and return the existing connection.
277 tmp = __find_con(nodeid, r);
279 spin_unlock(&connections_lock);
285 hlist_add_head_rcu(&con->list, &connection_hash[r]);
286 spin_unlock(&connections_lock);
291 /* Loop round all connections */
292 static void foreach_conn(void (*conn_func)(struct connection *c))
295 struct connection *con;
297 for (i = 0; i < CONN_HASH_SIZE; i++) {
298 hlist_for_each_entry_rcu(con, &connection_hash[i], list)
303 static struct dlm_node_addr *find_node_addr(int nodeid)
305 struct dlm_node_addr *na;
307 list_for_each_entry(na, &dlm_node_addrs, list) {
308 if (na->nodeid == nodeid)
314 static int addr_compare(const struct sockaddr_storage *x,
315 const struct sockaddr_storage *y)
317 switch (x->ss_family) {
319 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
320 struct sockaddr_in *siny = (struct sockaddr_in *)y;
321 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
323 if (sinx->sin_port != siny->sin_port)
328 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
329 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
330 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
332 if (sinx->sin6_port != siny->sin6_port)
342 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
343 struct sockaddr *sa_out, bool try_new_addr,
346 struct sockaddr_storage sas;
347 struct dlm_node_addr *na;
349 if (!dlm_local_count)
352 spin_lock(&dlm_node_addrs_spin);
353 na = find_node_addr(nodeid);
354 if (na && na->addr_count) {
355 memcpy(&sas, na->addr[na->curr_addr_index],
356 sizeof(struct sockaddr_storage));
359 na->curr_addr_index++;
360 if (na->curr_addr_index == na->addr_count)
361 na->curr_addr_index = 0;
364 spin_unlock(&dlm_node_addrs_spin);
375 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
380 if (dlm_local_addr[0]->ss_family == AF_INET) {
381 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
382 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
383 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
385 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas;
386 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
387 ret6->sin6_addr = in6->sin6_addr;
393 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
396 struct dlm_node_addr *na;
400 spin_lock(&dlm_node_addrs_spin);
401 list_for_each_entry(na, &dlm_node_addrs, list) {
405 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
406 if (addr_compare(na->addr[addr_i], addr)) {
407 *nodeid = na->nodeid;
415 spin_unlock(&dlm_node_addrs_spin);
419 /* caller need to held dlm_node_addrs_spin lock */
420 static bool dlm_lowcomms_na_has_addr(const struct dlm_node_addr *na,
421 const struct sockaddr_storage *addr)
425 for (i = 0; i < na->addr_count; i++) {
426 if (addr_compare(na->addr[i], addr))
433 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
435 struct sockaddr_storage *new_addr;
436 struct dlm_node_addr *new_node, *na;
439 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
443 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
449 memcpy(new_addr, addr, len);
451 spin_lock(&dlm_node_addrs_spin);
452 na = find_node_addr(nodeid);
454 new_node->nodeid = nodeid;
455 new_node->addr[0] = new_addr;
456 new_node->addr_count = 1;
457 new_node->mark = dlm_config.ci_mark;
458 list_add(&new_node->list, &dlm_node_addrs);
459 spin_unlock(&dlm_node_addrs_spin);
463 ret = dlm_lowcomms_na_has_addr(na, addr);
465 spin_unlock(&dlm_node_addrs_spin);
471 if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
472 spin_unlock(&dlm_node_addrs_spin);
478 na->addr[na->addr_count++] = new_addr;
479 spin_unlock(&dlm_node_addrs_spin);
484 /* Data available on socket or listen socket received a connect */
485 static void lowcomms_data_ready(struct sock *sk)
487 struct connection *con;
489 read_lock_bh(&sk->sk_callback_lock);
491 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
492 queue_work(recv_workqueue, &con->rwork);
493 read_unlock_bh(&sk->sk_callback_lock);
496 static void lowcomms_listen_data_ready(struct sock *sk)
501 queue_work(recv_workqueue, &listen_con.rwork);
504 static void lowcomms_write_space(struct sock *sk)
506 struct connection *con;
508 read_lock_bh(&sk->sk_callback_lock);
513 if (!test_and_set_bit(CF_CONNECTED, &con->flags)) {
514 log_print("successful connected to node %d", con->nodeid);
515 queue_work(send_workqueue, &con->swork);
519 clear_bit(SOCK_NOSPACE, &con->sock->flags);
521 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
522 con->sock->sk->sk_write_pending--;
523 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
526 queue_work(send_workqueue, &con->swork);
528 read_unlock_bh(&sk->sk_callback_lock);
531 static inline void lowcomms_connect_sock(struct connection *con)
533 if (test_bit(CF_CLOSE, &con->flags))
535 queue_work(send_workqueue, &con->swork);
539 static void lowcomms_state_change(struct sock *sk)
541 /* SCTP layer is not calling sk_data_ready when the connection
542 * is done, so we catch the signal through here. Also, it
543 * doesn't switch socket state when entering shutdown, so we
544 * skip the write in that case.
546 if (sk->sk_shutdown) {
547 if (sk->sk_shutdown == RCV_SHUTDOWN)
548 lowcomms_data_ready(sk);
549 } else if (sk->sk_state == TCP_ESTABLISHED) {
550 lowcomms_write_space(sk);
554 int dlm_lowcomms_connect_node(int nodeid)
556 struct connection *con;
559 if (nodeid == dlm_our_nodeid())
562 idx = srcu_read_lock(&connections_srcu);
563 con = nodeid2con(nodeid, GFP_NOFS);
565 srcu_read_unlock(&connections_srcu, idx);
569 lowcomms_connect_sock(con);
570 srcu_read_unlock(&connections_srcu, idx);
575 int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
577 struct dlm_node_addr *na;
579 spin_lock(&dlm_node_addrs_spin);
580 na = find_node_addr(nodeid);
582 spin_unlock(&dlm_node_addrs_spin);
587 spin_unlock(&dlm_node_addrs_spin);
592 static void lowcomms_error_report(struct sock *sk)
594 struct connection *con;
595 void (*orig_report)(struct sock *) = NULL;
596 struct inet_sock *inet;
598 read_lock_bh(&sk->sk_callback_lock);
603 orig_report = listen_sock.sk_error_report;
606 switch (sk->sk_family) {
608 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
609 "sending to node %d at %pI4, dport %d, "
610 "sk_err=%d/%d\n", dlm_our_nodeid(),
611 con->nodeid, &inet->inet_daddr,
612 ntohs(inet->inet_dport), sk->sk_err,
615 #if IS_ENABLED(CONFIG_IPV6)
617 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
618 "sending to node %d at %pI6c, "
619 "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(),
620 con->nodeid, &sk->sk_v6_daddr,
621 ntohs(inet->inet_dport), sk->sk_err,
626 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
627 "invalid socket family %d set, "
628 "sk_err=%d/%d\n", dlm_our_nodeid(),
629 sk->sk_family, sk->sk_err, sk->sk_err_soft);
633 /* below sendcon only handling */
634 if (test_bit(CF_IS_OTHERCON, &con->flags))
637 switch (sk->sk_err) {
639 set_bit(CF_DELAY_CONNECT, &con->flags);
645 if (!test_and_set_bit(CF_RECONNECT, &con->flags))
646 queue_work(send_workqueue, &con->swork);
649 read_unlock_bh(&sk->sk_callback_lock);
654 /* Note: sk_callback_lock must be locked before calling this function. */
655 static void save_listen_callbacks(struct socket *sock)
657 struct sock *sk = sock->sk;
659 listen_sock.sk_data_ready = sk->sk_data_ready;
660 listen_sock.sk_state_change = sk->sk_state_change;
661 listen_sock.sk_write_space = sk->sk_write_space;
662 listen_sock.sk_error_report = sk->sk_error_report;
665 static void restore_callbacks(struct socket *sock)
667 struct sock *sk = sock->sk;
669 write_lock_bh(&sk->sk_callback_lock);
670 sk->sk_user_data = NULL;
671 sk->sk_data_ready = listen_sock.sk_data_ready;
672 sk->sk_state_change = listen_sock.sk_state_change;
673 sk->sk_write_space = listen_sock.sk_write_space;
674 sk->sk_error_report = listen_sock.sk_error_report;
675 write_unlock_bh(&sk->sk_callback_lock);
678 static void add_listen_sock(struct socket *sock, struct listen_connection *con)
680 struct sock *sk = sock->sk;
682 write_lock_bh(&sk->sk_callback_lock);
683 save_listen_callbacks(sock);
686 sk->sk_user_data = con;
687 sk->sk_allocation = GFP_NOFS;
688 /* Install a data_ready callback */
689 sk->sk_data_ready = lowcomms_listen_data_ready;
690 write_unlock_bh(&sk->sk_callback_lock);
693 /* Make a socket active */
694 static void add_sock(struct socket *sock, struct connection *con)
696 struct sock *sk = sock->sk;
698 write_lock_bh(&sk->sk_callback_lock);
701 sk->sk_user_data = con;
702 /* Install a data_ready callback */
703 sk->sk_data_ready = lowcomms_data_ready;
704 sk->sk_write_space = lowcomms_write_space;
705 sk->sk_state_change = lowcomms_state_change;
706 sk->sk_allocation = GFP_NOFS;
707 sk->sk_error_report = lowcomms_error_report;
708 write_unlock_bh(&sk->sk_callback_lock);
711 /* Add the port number to an IPv6 or 4 sockaddr and return the address
713 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
716 saddr->ss_family = dlm_local_addr[0]->ss_family;
717 if (saddr->ss_family == AF_INET) {
718 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
719 in4_addr->sin_port = cpu_to_be16(port);
720 *addr_len = sizeof(struct sockaddr_in);
721 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
723 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
724 in6_addr->sin6_port = cpu_to_be16(port);
725 *addr_len = sizeof(struct sockaddr_in6);
727 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
730 static void dlm_page_release(struct kref *kref)
732 struct writequeue_entry *e = container_of(kref, struct writequeue_entry,
735 __free_page(e->page);
739 static void dlm_msg_release(struct kref *kref)
741 struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref);
743 kref_put(&msg->entry->ref, dlm_page_release);
747 static void free_entry(struct writequeue_entry *e)
749 struct dlm_msg *msg, *tmp;
751 list_for_each_entry_safe(msg, tmp, &e->msgs, list) {
753 msg->orig_msg->retransmit = false;
754 kref_put(&msg->orig_msg->ref, dlm_msg_release);
757 list_del(&msg->list);
758 kref_put(&msg->ref, dlm_msg_release);
762 atomic_dec(&e->con->writequeue_cnt);
763 kref_put(&e->ref, dlm_page_release);
766 static void dlm_close_sock(struct socket **sock)
769 restore_callbacks(*sock);
775 /* Close a remote connection and tidy up */
776 static void close_connection(struct connection *con, bool and_other,
779 bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
780 struct writequeue_entry *e;
782 if (tx && !closing && cancel_work_sync(&con->swork)) {
783 log_print("canceled swork for node %d", con->nodeid);
784 clear_bit(CF_WRITE_PENDING, &con->flags);
786 if (rx && !closing && cancel_work_sync(&con->rwork)) {
787 log_print("canceled rwork for node %d", con->nodeid);
788 clear_bit(CF_READ_PENDING, &con->flags);
791 mutex_lock(&con->sock_mutex);
792 dlm_close_sock(&con->sock);
794 if (con->othercon && and_other) {
795 /* Will only re-enter once. */
796 close_connection(con->othercon, false, tx, rx);
799 /* if we send a writequeue entry only a half way, we drop the
800 * whole entry because reconnection and that we not start of the
801 * middle of a msg which will confuse the other end.
803 * we can always drop messages because retransmits, but what we
804 * cannot allow is to transmit half messages which may be processed
807 * our policy is to start on a clean state when disconnects, we don't
808 * know what's send/received on transport layer in this case.
810 spin_lock(&con->writequeue_lock);
811 if (!list_empty(&con->writequeue)) {
812 e = list_first_entry(&con->writequeue, struct writequeue_entry,
817 spin_unlock(&con->writequeue_lock);
819 con->rx_leftover = 0;
821 clear_bit(CF_APP_LIMITED, &con->flags);
822 clear_bit(CF_CONNECTED, &con->flags);
823 clear_bit(CF_DELAY_CONNECT, &con->flags);
824 clear_bit(CF_RECONNECT, &con->flags);
825 clear_bit(CF_EOF, &con->flags);
826 mutex_unlock(&con->sock_mutex);
827 clear_bit(CF_CLOSING, &con->flags);
830 static void shutdown_connection(struct connection *con)
834 flush_work(&con->swork);
836 mutex_lock(&con->sock_mutex);
837 /* nothing to shutdown */
839 mutex_unlock(&con->sock_mutex);
843 set_bit(CF_SHUTDOWN, &con->flags);
844 ret = kernel_sock_shutdown(con->sock, SHUT_WR);
845 mutex_unlock(&con->sock_mutex);
847 log_print("Connection %p failed to shutdown: %d will force close",
851 ret = wait_event_timeout(con->shutdown_wait,
852 !test_bit(CF_SHUTDOWN, &con->flags),
853 DLM_SHUTDOWN_WAIT_TIMEOUT);
855 log_print("Connection %p shutdown timed out, will force close",
864 clear_bit(CF_SHUTDOWN, &con->flags);
865 close_connection(con, false, true, true);
868 static void dlm_tcp_shutdown(struct connection *con)
871 shutdown_connection(con->othercon);
872 shutdown_connection(con);
875 static int con_realloc_receive_buf(struct connection *con, int newlen)
877 unsigned char *newbuf;
879 newbuf = kmalloc(newlen, GFP_NOFS);
883 /* copy any leftover from last receive */
884 if (con->rx_leftover)
885 memmove(newbuf, con->rx_buf, con->rx_leftover);
887 /* swap to new buffer space */
889 con->rx_buflen = newlen;
890 con->rx_buf = newbuf;
895 /* Data received from remote end */
896 static int receive_from_sock(struct connection *con)
902 mutex_lock(&con->sock_mutex);
904 if (con->sock == NULL) {
909 /* realloc if we get new buffer size to read out */
910 buflen = dlm_config.ci_buffer_size;
911 if (con->rx_buflen != buflen && con->rx_leftover <= buflen) {
912 ret = con_realloc_receive_buf(con, buflen);
918 /* calculate new buffer parameter regarding last receive and
919 * possible leftover bytes
921 iov.iov_base = con->rx_buf + con->rx_leftover;
922 iov.iov_len = con->rx_buflen - con->rx_leftover;
924 memset(&msg, 0, sizeof(msg));
925 msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
926 ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
933 /* new buflen according readed bytes and leftover from last receive */
934 buflen = ret + con->rx_leftover;
935 ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen);
939 /* calculate leftover bytes from process and put it into begin of
940 * the receive buffer, so next receive we have the full message
941 * at the start address of the receive buffer.
943 con->rx_leftover = buflen - ret;
944 if (con->rx_leftover) {
945 memmove(con->rx_buf, con->rx_buf + ret,
950 dlm_midcomms_receive_done(con->nodeid);
951 mutex_unlock(&con->sock_mutex);
955 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
956 queue_work(recv_workqueue, &con->rwork);
957 mutex_unlock(&con->sock_mutex);
962 log_print("connection %p got EOF from %d",
965 if (dlm_proto_ops->eof_condition &&
966 dlm_proto_ops->eof_condition(con)) {
967 set_bit(CF_EOF, &con->flags);
968 mutex_unlock(&con->sock_mutex);
970 mutex_unlock(&con->sock_mutex);
971 close_connection(con, false, true, false);
973 /* handling for tcp shutdown */
974 clear_bit(CF_SHUTDOWN, &con->flags);
975 wake_up(&con->shutdown_wait);
978 /* signal to breaking receive worker */
981 mutex_unlock(&con->sock_mutex);
986 /* Listening socket is busy, accept a connection */
987 static int accept_from_sock(struct listen_connection *con)
990 struct sockaddr_storage peeraddr;
991 struct socket *newsock;
994 struct connection *newcon;
995 struct connection *addcon;
1001 result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
1005 /* Get the connected socket's peer */
1006 memset(&peeraddr, 0, sizeof(peeraddr));
1007 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
1009 result = -ECONNABORTED;
1013 /* Get the new node's NODEID */
1014 make_sockaddr(&peeraddr, 0, &len);
1015 if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
1016 unsigned char *b=(unsigned char *)&peeraddr;
1017 log_print("connect from non cluster node");
1018 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
1019 b, sizeof(struct sockaddr_storage));
1020 sock_release(newsock);
1024 log_print("got connection from %d", nodeid);
1026 /* Check to see if we already have a connection to this node. This
1027 * could happen if the two nodes initiate a connection at roughly
1028 * the same time and the connections cross on the wire.
1029 * In this case we store the incoming one in "othercon"
1031 idx = srcu_read_lock(&connections_srcu);
1032 newcon = nodeid2con(nodeid, GFP_NOFS);
1034 srcu_read_unlock(&connections_srcu, idx);
1039 sock_set_mark(newsock->sk, mark);
1041 mutex_lock(&newcon->sock_mutex);
1043 struct connection *othercon = newcon->othercon;
1046 othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
1048 log_print("failed to allocate incoming socket");
1049 mutex_unlock(&newcon->sock_mutex);
1050 srcu_read_unlock(&connections_srcu, idx);
1055 result = dlm_con_init(othercon, nodeid);
1058 mutex_unlock(&newcon->sock_mutex);
1059 srcu_read_unlock(&connections_srcu, idx);
1063 lockdep_set_subclass(&othercon->sock_mutex, 1);
1064 set_bit(CF_IS_OTHERCON, &othercon->flags);
1065 newcon->othercon = othercon;
1066 othercon->sendcon = newcon;
1068 /* close other sock con if we have something new */
1069 close_connection(othercon, false, true, false);
1072 mutex_lock(&othercon->sock_mutex);
1073 add_sock(newsock, othercon);
1075 mutex_unlock(&othercon->sock_mutex);
1078 /* accept copies the sk after we've saved the callbacks, so we
1079 don't want to save them a second time or comm errors will
1080 result in calling sk_error_report recursively. */
1081 add_sock(newsock, newcon);
1085 set_bit(CF_CONNECTED, &addcon->flags);
1086 mutex_unlock(&newcon->sock_mutex);
1089 * Add it to the active queue in case we got data
1090 * between processing the accept adding the socket
1091 * to the read_sockets list
1093 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
1094 queue_work(recv_workqueue, &addcon->rwork);
1096 srcu_read_unlock(&connections_srcu, idx);
1102 sock_release(newsock);
1104 if (result != -EAGAIN)
1105 log_print("error accepting connection from node: %d", result);
1110 * writequeue_entry_complete - try to delete and free write queue entry
1111 * @e: write queue entry to try to delete
1112 * @completed: bytes completed
1114 * writequeue_lock must be held.
1116 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
1118 e->offset += completed;
1119 e->len -= completed;
1120 /* signal that page was half way transmitted */
1123 if (e->len == 0 && e->users == 0)
1128 * sctp_bind_addrs - bind a SCTP socket to all our addresses
1130 static int sctp_bind_addrs(struct socket *sock, uint16_t port)
1132 struct sockaddr_storage localaddr;
1133 struct sockaddr *addr = (struct sockaddr *)&localaddr;
1134 int i, addr_len, result = 0;
1136 for (i = 0; i < dlm_local_count; i++) {
1137 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
1138 make_sockaddr(&localaddr, port, &addr_len);
1141 result = kernel_bind(sock, addr, addr_len);
1143 result = sock_bind_add(sock->sk, addr, addr_len);
1146 log_print("Can't bind to %d addr number %d, %d.\n",
1147 port, i + 1, result);
1154 /* Get local addresses */
1155 static void init_local(void)
1157 struct sockaddr_storage sas, *addr;
1160 dlm_local_count = 0;
1161 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1162 if (dlm_our_addr(&sas, i))
1165 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
1168 dlm_local_addr[dlm_local_count++] = addr;
1172 static void deinit_local(void)
1176 for (i = 0; i < dlm_local_count; i++)
1177 kfree(dlm_local_addr[i]);
1180 static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1183 struct writequeue_entry *entry;
1185 entry = kzalloc(sizeof(*entry), allocation);
1189 entry->page = alloc_page(allocation | __GFP_ZERO);
1197 kref_init(&entry->ref);
1198 INIT_LIST_HEAD(&entry->msgs);
1203 static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
1204 gfp_t allocation, char **ppc,
1205 void (*cb)(struct dlm_mhandle *mh),
1206 struct dlm_mhandle *mh)
1208 struct writequeue_entry *e;
1210 spin_lock(&con->writequeue_lock);
1211 if (!list_empty(&con->writequeue)) {
1212 e = list_last_entry(&con->writequeue, struct writequeue_entry, list);
1213 if (DLM_WQ_REMAIN_BYTES(e) >= len) {
1216 *ppc = page_address(e->page) + e->end;
1222 spin_unlock(&con->writequeue_lock);
1227 spin_unlock(&con->writequeue_lock);
1229 e = new_writequeue_entry(con, allocation);
1234 *ppc = page_address(e->page);
1236 atomic_inc(&con->writequeue_cnt);
1238 spin_lock(&con->writequeue_lock);
1242 list_add_tail(&e->list, &con->writequeue);
1243 spin_unlock(&con->writequeue_lock);
1248 static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
1249 gfp_t allocation, char **ppc,
1250 void (*cb)(struct dlm_mhandle *mh),
1251 struct dlm_mhandle *mh)
1253 struct writequeue_entry *e;
1254 struct dlm_msg *msg;
1257 msg = kzalloc(sizeof(*msg), allocation);
1261 /* this mutex is being used as a wait to avoid multiple "fast"
1262 * new writequeue page list entry allocs in new_wq_entry in
1263 * normal operation which is sleepable context. Without it
1264 * we could end in multiple writequeue entries with one
1265 * dlm message because multiple callers were waiting at
1266 * the writequeue_lock in new_wq_entry().
1268 sleepable = gfpflags_normal_context(allocation);
1270 mutex_lock(&con->wq_alloc);
1272 kref_init(&msg->ref);
1274 e = new_wq_entry(con, len, allocation, ppc, cb, mh);
1277 mutex_unlock(&con->wq_alloc);
1284 mutex_unlock(&con->wq_alloc);
1293 struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
1294 char **ppc, void (*cb)(struct dlm_mhandle *mh),
1295 struct dlm_mhandle *mh)
1297 struct connection *con;
1298 struct dlm_msg *msg;
1301 if (len > DLM_MAX_SOCKET_BUFSIZE ||
1302 len < sizeof(struct dlm_header)) {
1303 BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE);
1304 log_print("failed to allocate a buffer of size %d", len);
1309 idx = srcu_read_lock(&connections_srcu);
1310 con = nodeid2con(nodeid, allocation);
1312 srcu_read_unlock(&connections_srcu, idx);
1316 msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, mh);
1318 srcu_read_unlock(&connections_srcu, idx);
1322 /* for dlm_lowcomms_commit_msg() */
1323 kref_get(&msg->ref);
1324 /* we assume if successful commit must called */
1329 static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
1331 struct writequeue_entry *e = msg->entry;
1332 struct connection *con = e->con;
1335 spin_lock(&con->writequeue_lock);
1336 kref_get(&msg->ref);
1337 list_add(&msg->list, &e->msgs);
1343 e->len = DLM_WQ_LENGTH_BYTES(e);
1344 spin_unlock(&con->writequeue_lock);
1346 queue_work(send_workqueue, &con->swork);
1350 spin_unlock(&con->writequeue_lock);
1354 void dlm_lowcomms_commit_msg(struct dlm_msg *msg)
1356 _dlm_lowcomms_commit_msg(msg);
1357 srcu_read_unlock(&connections_srcu, msg->idx);
1358 /* because dlm_lowcomms_new_msg() */
1359 kref_put(&msg->ref, dlm_msg_release);
1362 void dlm_lowcomms_put_msg(struct dlm_msg *msg)
1364 kref_put(&msg->ref, dlm_msg_release);
1367 /* does not held connections_srcu, usage workqueue only */
1368 int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
1370 struct dlm_msg *msg_resend;
1373 if (msg->retransmit)
1376 msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len,
1377 GFP_ATOMIC, &ppc, NULL, NULL);
1381 msg->retransmit = true;
1382 kref_get(&msg->ref);
1383 msg_resend->orig_msg = msg;
1385 memcpy(ppc, msg->ppc, msg->len);
1386 _dlm_lowcomms_commit_msg(msg_resend);
1387 dlm_lowcomms_put_msg(msg_resend);
1392 /* Send a message */
1393 static void send_to_sock(struct connection *con)
1395 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1396 struct writequeue_entry *e;
1397 int len, offset, ret;
1400 mutex_lock(&con->sock_mutex);
1401 if (con->sock == NULL)
1404 spin_lock(&con->writequeue_lock);
1406 e = con_next_wq(con);
1410 e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
1413 BUG_ON(len == 0 && e->users == 0);
1414 spin_unlock(&con->writequeue_lock);
1416 ret = kernel_sendpage(con->sock, e->page, offset, len,
1418 if (ret == -EAGAIN || ret == 0) {
1419 if (ret == -EAGAIN &&
1420 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1421 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1422 /* Notify TCP that we're limited by the
1423 * application window size.
1425 set_bit(SOCK_NOSPACE, &con->sock->flags);
1426 con->sock->sk->sk_write_pending++;
1433 /* Don't starve people filling buffers */
1434 if (++count >= MAX_SEND_MSG_COUNT) {
1439 spin_lock(&con->writequeue_lock);
1440 writequeue_entry_complete(e, ret);
1442 spin_unlock(&con->writequeue_lock);
1444 /* close if we got EOF */
1445 if (test_and_clear_bit(CF_EOF, &con->flags)) {
1446 mutex_unlock(&con->sock_mutex);
1447 close_connection(con, false, false, true);
1449 /* handling for tcp shutdown */
1450 clear_bit(CF_SHUTDOWN, &con->flags);
1451 wake_up(&con->shutdown_wait);
1453 mutex_unlock(&con->sock_mutex);
1459 mutex_unlock(&con->sock_mutex);
1463 mutex_unlock(&con->sock_mutex);
1464 queue_work(send_workqueue, &con->swork);
1468 static void clean_one_writequeue(struct connection *con)
1470 struct writequeue_entry *e, *safe;
1472 spin_lock(&con->writequeue_lock);
1473 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1476 spin_unlock(&con->writequeue_lock);
1479 /* Called from recovery when it knows that a node has
1481 int dlm_lowcomms_close(int nodeid)
1483 struct connection *con;
1484 struct dlm_node_addr *na;
1487 log_print("closing connection to node %d", nodeid);
1488 idx = srcu_read_lock(&connections_srcu);
1489 con = nodeid2con(nodeid, 0);
1491 set_bit(CF_CLOSE, &con->flags);
1492 close_connection(con, true, true, true);
1493 clean_one_writequeue(con);
1495 clean_one_writequeue(con->othercon);
1497 srcu_read_unlock(&connections_srcu, idx);
1499 spin_lock(&dlm_node_addrs_spin);
1500 na = find_node_addr(nodeid);
1502 list_del(&na->list);
1503 while (na->addr_count--)
1504 kfree(na->addr[na->addr_count]);
1507 spin_unlock(&dlm_node_addrs_spin);
1512 /* Receive workqueue function */
1513 static void process_recv_sockets(struct work_struct *work)
1515 struct connection *con = container_of(work, struct connection, rwork);
1517 clear_bit(CF_READ_PENDING, &con->flags);
1518 receive_from_sock(con);
1521 static void process_listen_recv_socket(struct work_struct *work)
1523 accept_from_sock(&listen_con);
1526 static void dlm_connect(struct connection *con)
1528 struct sockaddr_storage addr;
1529 int result, addr_len;
1530 struct socket *sock;
1533 /* Some odd races can cause double-connects, ignore them */
1534 if (con->retries++ > MAX_CONNECT_RETRIES)
1538 log_print("node %d already connected.", con->nodeid);
1542 memset(&addr, 0, sizeof(addr));
1543 result = nodeid_to_addr(con->nodeid, &addr, NULL,
1544 dlm_proto_ops->try_new_addr, &mark);
1546 log_print("no address for nodeid %d", con->nodeid);
1550 /* Create a socket to communicate with */
1551 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1552 SOCK_STREAM, dlm_proto_ops->proto, &sock);
1556 sock_set_mark(sock->sk, mark);
1557 dlm_proto_ops->sockopts(sock);
1559 add_sock(sock, con);
1561 result = dlm_proto_ops->bind(sock);
1565 log_print_ratelimited("connecting to %d", con->nodeid);
1566 make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len);
1567 result = dlm_proto_ops->connect(con, sock, (struct sockaddr *)&addr,
1575 dlm_close_sock(&con->sock);
1579 * Some errors are fatal and this list might need adjusting. For other
1580 * errors we try again until the max number of retries is reached.
1582 if (result != -EHOSTUNREACH &&
1583 result != -ENETUNREACH &&
1584 result != -ENETDOWN &&
1585 result != -EINVAL &&
1586 result != -EPROTONOSUPPORT) {
1587 log_print("connect %d try %d error %d", con->nodeid,
1588 con->retries, result);
1590 lowcomms_connect_sock(con);
1594 /* Send workqueue function */
1595 static void process_send_sockets(struct work_struct *work)
1597 struct connection *con = container_of(work, struct connection, swork);
1599 WARN_ON(test_bit(CF_IS_OTHERCON, &con->flags));
1601 clear_bit(CF_WRITE_PENDING, &con->flags);
1603 if (test_and_clear_bit(CF_RECONNECT, &con->flags)) {
1604 close_connection(con, false, false, true);
1605 dlm_midcomms_unack_msg_resend(con->nodeid);
1608 if (con->sock == NULL) {
1609 if (test_and_clear_bit(CF_DELAY_CONNECT, &con->flags))
1612 mutex_lock(&con->sock_mutex);
1614 mutex_unlock(&con->sock_mutex);
1617 if (!list_empty(&con->writequeue))
1621 static void work_stop(void)
1623 if (recv_workqueue) {
1624 destroy_workqueue(recv_workqueue);
1625 recv_workqueue = NULL;
1628 if (send_workqueue) {
1629 destroy_workqueue(send_workqueue);
1630 send_workqueue = NULL;
1634 static int work_start(void)
1636 recv_workqueue = alloc_ordered_workqueue("dlm_recv", WQ_MEM_RECLAIM);
1637 if (!recv_workqueue) {
1638 log_print("can't start dlm_recv");
1642 send_workqueue = alloc_ordered_workqueue("dlm_send", WQ_MEM_RECLAIM);
1643 if (!send_workqueue) {
1644 log_print("can't start dlm_send");
1645 destroy_workqueue(recv_workqueue);
1646 recv_workqueue = NULL;
1653 static void shutdown_conn(struct connection *con)
1655 if (dlm_proto_ops->shutdown_action)
1656 dlm_proto_ops->shutdown_action(con);
1659 void dlm_lowcomms_shutdown(void)
1663 /* Set all the flags to prevent any
1669 flush_workqueue(recv_workqueue);
1671 flush_workqueue(send_workqueue);
1673 dlm_close_sock(&listen_con.sock);
1675 idx = srcu_read_lock(&connections_srcu);
1676 foreach_conn(shutdown_conn);
1677 srcu_read_unlock(&connections_srcu, idx);
1680 static void _stop_conn(struct connection *con, bool and_other)
1682 mutex_lock(&con->sock_mutex);
1683 set_bit(CF_CLOSE, &con->flags);
1684 set_bit(CF_READ_PENDING, &con->flags);
1685 set_bit(CF_WRITE_PENDING, &con->flags);
1686 if (con->sock && con->sock->sk) {
1687 write_lock_bh(&con->sock->sk->sk_callback_lock);
1688 con->sock->sk->sk_user_data = NULL;
1689 write_unlock_bh(&con->sock->sk->sk_callback_lock);
1691 if (con->othercon && and_other)
1692 _stop_conn(con->othercon, false);
1693 mutex_unlock(&con->sock_mutex);
1696 static void stop_conn(struct connection *con)
1698 _stop_conn(con, true);
1701 static void connection_release(struct rcu_head *rcu)
1703 struct connection *con = container_of(rcu, struct connection, rcu);
1709 static void free_conn(struct connection *con)
1711 close_connection(con, true, true, true);
1712 spin_lock(&connections_lock);
1713 hlist_del_rcu(&con->list);
1714 spin_unlock(&connections_lock);
1715 if (con->othercon) {
1716 clean_one_writequeue(con->othercon);
1717 call_srcu(&connections_srcu, &con->othercon->rcu,
1718 connection_release);
1720 clean_one_writequeue(con);
1721 call_srcu(&connections_srcu, &con->rcu, connection_release);
1724 static void work_flush(void)
1728 struct connection *con;
1732 foreach_conn(stop_conn);
1734 flush_workqueue(recv_workqueue);
1736 flush_workqueue(send_workqueue);
1737 for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1738 hlist_for_each_entry_rcu(con, &connection_hash[i],
1740 ok &= test_bit(CF_READ_PENDING, &con->flags);
1741 ok &= test_bit(CF_WRITE_PENDING, &con->flags);
1742 if (con->othercon) {
1743 ok &= test_bit(CF_READ_PENDING,
1744 &con->othercon->flags);
1745 ok &= test_bit(CF_WRITE_PENDING,
1746 &con->othercon->flags);
1753 void dlm_lowcomms_stop(void)
1757 idx = srcu_read_lock(&connections_srcu);
1759 foreach_conn(free_conn);
1760 srcu_read_unlock(&connections_srcu, idx);
1764 dlm_proto_ops = NULL;
1767 static int dlm_listen_for_all(void)
1769 struct socket *sock;
1772 log_print("Using %s for communications",
1773 dlm_proto_ops->name);
1775 result = dlm_proto_ops->listen_validate();
1779 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1780 SOCK_STREAM, dlm_proto_ops->proto, &sock);
1782 log_print("Can't create comms socket, check SCTP is loaded");
1786 sock_set_mark(sock->sk, dlm_config.ci_mark);
1787 dlm_proto_ops->listen_sockopts(sock);
1789 result = dlm_proto_ops->listen_bind(sock);
1793 save_listen_callbacks(sock);
1794 add_listen_sock(sock, &listen_con);
1796 INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
1797 result = sock->ops->listen(sock, 5);
1799 dlm_close_sock(&listen_con.sock);
1810 static int dlm_tcp_bind(struct socket *sock)
1812 struct sockaddr_storage src_addr;
1813 int result, addr_len;
1815 /* Bind to our cluster-known address connecting to avoid
1818 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
1819 make_sockaddr(&src_addr, 0, &addr_len);
1821 result = sock->ops->bind(sock, (struct sockaddr *)&src_addr,
1824 /* This *may* not indicate a critical error */
1825 log_print("could not bind for connect: %d", result);
1831 static int dlm_tcp_connect(struct connection *con, struct socket *sock,
1832 struct sockaddr *addr, int addr_len)
1836 ret = sock->ops->connect(sock, addr, addr_len, O_NONBLOCK);
1847 static int dlm_tcp_listen_validate(void)
1849 /* We don't support multi-homed hosts */
1850 if (dlm_local_count > 1) {
1851 log_print("TCP protocol can't handle multi-homed hosts, try SCTP");
1858 static void dlm_tcp_sockopts(struct socket *sock)
1860 /* Turn off Nagle's algorithm */
1861 tcp_sock_set_nodelay(sock->sk);
1864 static void dlm_tcp_listen_sockopts(struct socket *sock)
1866 dlm_tcp_sockopts(sock);
1867 sock_set_reuseaddr(sock->sk);
1870 static int dlm_tcp_listen_bind(struct socket *sock)
1874 /* Bind to our port */
1875 make_sockaddr(dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
1876 return sock->ops->bind(sock, (struct sockaddr *)dlm_local_addr[0],
1880 static const struct dlm_proto_ops dlm_tcp_ops = {
1882 .proto = IPPROTO_TCP,
1883 .connect = dlm_tcp_connect,
1884 .sockopts = dlm_tcp_sockopts,
1885 .bind = dlm_tcp_bind,
1886 .listen_validate = dlm_tcp_listen_validate,
1887 .listen_sockopts = dlm_tcp_listen_sockopts,
1888 .listen_bind = dlm_tcp_listen_bind,
1889 .shutdown_action = dlm_tcp_shutdown,
1890 .eof_condition = tcp_eof_condition,
1893 static int dlm_sctp_bind(struct socket *sock)
1895 return sctp_bind_addrs(sock, 0);
1898 static int dlm_sctp_connect(struct connection *con, struct socket *sock,
1899 struct sockaddr *addr, int addr_len)
1904 * Make sock->ops->connect() function return in specified time,
1905 * since O_NONBLOCK argument in connect() function does not work here,
1906 * then, we should restore the default value of this attribute.
1908 sock_set_sndtimeo(sock->sk, 5);
1909 ret = sock->ops->connect(sock, addr, addr_len, 0);
1910 sock_set_sndtimeo(sock->sk, 0);
1914 if (!test_and_set_bit(CF_CONNECTED, &con->flags))
1915 log_print("successful connected to node %d", con->nodeid);
1920 static int dlm_sctp_listen_validate(void)
1922 if (!IS_ENABLED(CONFIG_IP_SCTP)) {
1923 log_print("SCTP is not enabled by this kernel");
1927 request_module("sctp");
1931 static int dlm_sctp_bind_listen(struct socket *sock)
1933 return sctp_bind_addrs(sock, dlm_config.ci_tcp_port);
1936 static void dlm_sctp_sockopts(struct socket *sock)
1938 /* Turn off Nagle's algorithm */
1939 sctp_sock_set_nodelay(sock->sk);
1940 sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
1943 static const struct dlm_proto_ops dlm_sctp_ops = {
1945 .proto = IPPROTO_SCTP,
1946 .try_new_addr = true,
1947 .connect = dlm_sctp_connect,
1948 .sockopts = dlm_sctp_sockopts,
1949 .bind = dlm_sctp_bind,
1950 .listen_validate = dlm_sctp_listen_validate,
1951 .listen_sockopts = dlm_sctp_sockopts,
1952 .listen_bind = dlm_sctp_bind_listen,
1955 int dlm_lowcomms_start(void)
1957 int error = -EINVAL;
1960 for (i = 0; i < CONN_HASH_SIZE; i++)
1961 INIT_HLIST_HEAD(&connection_hash[i]);
1964 if (!dlm_local_count) {
1966 log_print("no local IP address has been set");
1970 INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
1972 error = work_start();
1978 /* Start listening */
1979 switch (dlm_config.ci_protocol) {
1981 dlm_proto_ops = &dlm_tcp_ops;
1983 case DLM_PROTO_SCTP:
1984 dlm_proto_ops = &dlm_sctp_ops;
1987 log_print("Invalid protocol identifier %d set",
1988 dlm_config.ci_protocol);
1990 goto fail_proto_ops;
1993 error = dlm_listen_for_all();
2000 dlm_proto_ops = NULL;
2010 void dlm_lowcomms_exit(void)
2012 struct dlm_node_addr *na, *safe;
2014 spin_lock(&dlm_node_addrs_spin);
2015 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
2016 list_del(&na->list);
2017 while (na->addr_count--)
2018 kfree(na->addr[na->addr_count]);
2021 spin_unlock(&dlm_node_addrs_spin);