2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
5 * applies to SOCK_STREAM sockets only
6 * offers an alternative communication option for TCP-protocol sockets
7 * applicable with RoCE-cards only
9 * Initial restrictions:
10 * - IPv6 support postponed
11 * - support for alternate links postponed
12 * - partial support for non-blocking sockets only
13 * - support for urgent data postponed
15 * Copyright IBM Corp. 2016
17 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
18 * based on prototype from Frank Blaschka
21 #define KMSG_COMPONENT "smc"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24 #include <linux/module.h>
25 #include <linux/socket.h>
26 #include <linux/workqueue.h>
28 #include <linux/sched/signal.h>
43 #include "smc_close.h"
45 static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group
49 struct smc_lgr_list smc_lgr_list = { /* established link groups */
50 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
51 .list = LIST_HEAD_INIT(smc_lgr_list.list),
54 static void smc_tcp_listen_work(struct work_struct *);
56 static void smc_set_keepalive(struct sock *sk, int val)
58 struct smc_sock *smc = smc_sk(sk);
60 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
63 static struct smc_hashinfo smc_v4_hashinfo = {
64 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
67 int smc_hash_sk(struct sock *sk)
69 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
70 struct hlist_head *head;
74 write_lock_bh(&h->lock);
75 sk_add_node(sk, head);
76 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
77 write_unlock_bh(&h->lock);
81 EXPORT_SYMBOL_GPL(smc_hash_sk);
83 void smc_unhash_sk(struct sock *sk)
85 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
87 write_lock_bh(&h->lock);
88 if (sk_del_node_init(sk))
89 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
90 write_unlock_bh(&h->lock);
92 EXPORT_SYMBOL_GPL(smc_unhash_sk);
94 struct proto smc_proto = {
97 .keepalive = smc_set_keepalive,
99 .unhash = smc_unhash_sk,
100 .obj_size = sizeof(struct smc_sock),
101 .h.smc_hash = &smc_v4_hashinfo,
102 .slab_flags = SLAB_TYPESAFE_BY_RCU,
104 EXPORT_SYMBOL_GPL(smc_proto);
106 static int smc_release(struct socket *sock)
108 struct sock *sk = sock->sk;
109 struct smc_sock *smc;
116 if (sk->sk_state == SMC_LISTEN)
117 /* smc_close_non_accepted() is called and acquires
118 * sock lock for child sockets again
120 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
124 if (!smc->use_fallback) {
125 rc = smc_close_active(smc);
126 sock_set_flag(sk, SOCK_DEAD);
127 sk->sk_shutdown |= SHUTDOWN_MASK;
130 sock_release(smc->clcsock);
133 if (smc->use_fallback) {
134 sock_put(sk); /* passive closing */
135 sk->sk_state = SMC_CLOSED;
136 sk->sk_state_change(sk);
142 if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
143 smc_conn_free(&smc->conn);
146 sk->sk_prot->unhash(sk);
147 sock_put(sk); /* final sock_put */
152 static void smc_destruct(struct sock *sk)
154 if (sk->sk_state != SMC_CLOSED)
156 if (!sock_flag(sk, SOCK_DEAD))
159 sk_refcnt_debug_dec(sk);
162 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock)
164 struct smc_sock *smc;
167 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, &smc_proto, 0);
171 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
172 sk->sk_state = SMC_INIT;
173 sk->sk_destruct = smc_destruct;
174 sk->sk_protocol = SMCPROTO_SMC;
176 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
177 INIT_LIST_HEAD(&smc->accept_q);
178 spin_lock_init(&smc->accept_q_lock);
179 sk->sk_prot->hash(sk);
180 sk_refcnt_debug_inc(sk);
185 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
188 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
189 struct sock *sk = sock->sk;
190 struct smc_sock *smc;
195 /* replicate tests from inet_bind(), to be safe wrt. future changes */
197 if (addr_len < sizeof(struct sockaddr_in))
201 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
202 if ((addr->sin_family != AF_INET) &&
203 ((addr->sin_family != AF_UNSPEC) ||
204 (addr->sin_addr.s_addr != htonl(INADDR_ANY))))
209 /* Check if socket is already active */
211 if (sk->sk_state != SMC_INIT)
214 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
215 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
223 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
226 /* options we don't get control via setsockopt for */
227 nsk->sk_type = osk->sk_type;
228 nsk->sk_sndbuf = osk->sk_sndbuf;
229 nsk->sk_rcvbuf = osk->sk_rcvbuf;
230 nsk->sk_sndtimeo = osk->sk_sndtimeo;
231 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
232 nsk->sk_mark = osk->sk_mark;
233 nsk->sk_priority = osk->sk_priority;
234 nsk->sk_rcvlowat = osk->sk_rcvlowat;
235 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
236 nsk->sk_err = osk->sk_err;
238 nsk->sk_flags &= ~mask;
239 nsk->sk_flags |= osk->sk_flags & mask;
242 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
243 (1UL << SOCK_KEEPOPEN) | \
244 (1UL << SOCK_LINGER) | \
245 (1UL << SOCK_BROADCAST) | \
246 (1UL << SOCK_TIMESTAMP) | \
247 (1UL << SOCK_DBG) | \
248 (1UL << SOCK_RCVTSTAMP) | \
249 (1UL << SOCK_RCVTSTAMPNS) | \
250 (1UL << SOCK_LOCALROUTE) | \
251 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
252 (1UL << SOCK_RXQ_OVFL) | \
253 (1UL << SOCK_WIFI_STATUS) | \
254 (1UL << SOCK_NOFCS) | \
255 (1UL << SOCK_FILTER_LOCKED))
256 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
257 * clc socket (since smc is not called for these options from net/core)
259 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
261 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
264 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
265 (1UL << SOCK_KEEPOPEN) | \
266 (1UL << SOCK_LINGER) | \
268 /* copy only settings and flags relevant for smc from clc to smc socket */
269 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
271 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
274 static int smc_clnt_conf_first_link(struct smc_sock *smc)
276 struct smc_link_group *lgr = smc->conn.lgr;
277 struct smc_link *link;
281 link = &lgr->lnk[SMC_SINGLE_LINK];
282 /* receive CONFIRM LINK request from server over RoCE fabric */
283 rest = wait_for_completion_interruptible_timeout(
285 SMC_LLC_WAIT_FIRST_TIME);
287 struct smc_clc_msg_decline dclc;
289 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
294 if (link->llc_confirm_rc)
295 return SMC_CLC_DECL_RMBE_EC;
297 rc = smc_ib_modify_qp_rts(link);
299 return SMC_CLC_DECL_INTERR;
301 smc_wr_remember_qp_attr(link);
303 rc = smc_wr_reg_send(link,
304 smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]);
306 return SMC_CLC_DECL_INTERR;
308 /* send CONFIRM LINK response over RoCE fabric */
309 rc = smc_llc_send_confirm_link(link,
310 link->smcibdev->mac[link->ibport - 1],
311 &link->smcibdev->gid[link->ibport - 1],
314 return SMC_CLC_DECL_TCL;
319 static void smc_conn_save_peer_info(struct smc_sock *smc,
320 struct smc_clc_msg_accept_confirm *clc)
322 smc->conn.peer_conn_idx = clc->conn_idx;
323 smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
324 smc->conn.peer_rmbe_size = smc_uncompress_bufsize(clc->rmbe_size);
325 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
328 static void smc_link_save_peer_info(struct smc_link *link,
329 struct smc_clc_msg_accept_confirm *clc)
331 link->peer_qpn = ntoh24(clc->qpn);
332 memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
333 memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
334 link->peer_psn = ntoh24(clc->psn);
335 link->peer_mtu = clc->qp_mtu;
338 static void smc_lgr_forget(struct smc_link_group *lgr)
340 spin_lock_bh(&smc_lgr_list.lock);
341 /* do not use this link group for new connections */
342 if (!list_empty(&lgr->list))
343 list_del_init(&lgr->list);
344 spin_unlock_bh(&smc_lgr_list.lock);
347 /* setup for RDMA connection of client */
348 static int smc_connect_rdma(struct smc_sock *smc)
350 struct smc_clc_msg_accept_confirm aclc;
351 int local_contact = SMC_FIRST_CONTACT;
352 struct smc_ib_device *smcibdev;
353 struct smc_link *link;
354 u8 srv_first_contact;
359 sock_hold(&smc->sk); /* sock put in passive closing */
361 if (!tcp_sk(smc->clcsock->sk)->syn_smc) {
362 /* peer has not signalled SMC-capability */
363 smc->use_fallback = true;
367 /* IPSec connections opt out of SMC-R optimizations */
368 if (using_ipsec(smc)) {
369 reason_code = SMC_CLC_DECL_IPSEC;
373 /* PNET table look up: search active ib_device and port
374 * within same PNETID that also contains the ethernet device
375 * used for the internal TCP socket
377 smc_pnet_find_roce_resource(smc->clcsock->sk, &smcibdev, &ibport);
379 reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
383 /* do inband token exchange */
384 reason_code = smc_clc_send_proposal(smc, smcibdev, ibport);
385 if (reason_code < 0) {
389 if (reason_code > 0) /* configuration error */
391 /* receive SMC Accept CLC message */
392 reason_code = smc_clc_wait_msg(smc, &aclc, sizeof(aclc),
394 if (reason_code < 0) {
401 srv_first_contact = aclc.hdr.flag;
402 mutex_lock(&smc_create_lgr_pending);
403 local_contact = smc_conn_create(smc, smcibdev, ibport, &aclc.lcl,
405 if (local_contact < 0) {
408 reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
409 else if (rc == -ENOLINK)
410 reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
411 goto decline_rdma_unlock;
413 link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
415 smc_conn_save_peer_info(smc, &aclc);
417 /* create send buffer and rmb */
418 rc = smc_buf_create(smc);
420 reason_code = SMC_CLC_DECL_MEM;
421 goto decline_rdma_unlock;
424 if (local_contact == SMC_FIRST_CONTACT)
425 smc_link_save_peer_info(link, &aclc);
427 rc = smc_rmb_rtoken_handling(&smc->conn, &aclc);
429 reason_code = SMC_CLC_DECL_INTERR;
430 goto decline_rdma_unlock;
436 if (local_contact == SMC_FIRST_CONTACT) {
437 rc = smc_ib_ready_link(link);
439 reason_code = SMC_CLC_DECL_INTERR;
440 goto decline_rdma_unlock;
443 struct smc_buf_desc *buf_desc = smc->conn.rmb_desc;
445 if (!buf_desc->reused) {
446 /* register memory region for new rmb */
447 rc = smc_wr_reg_send(link,
448 buf_desc->mr_rx[SMC_SINGLE_LINK]);
450 reason_code = SMC_CLC_DECL_INTERR;
451 goto decline_rdma_unlock;
455 smc_rmb_sync_sg_for_device(&smc->conn);
457 rc = smc_clc_send_confirm(smc);
461 if (local_contact == SMC_FIRST_CONTACT) {
462 /* QP confirmation over RoCE fabric */
463 reason_code = smc_clnt_conf_first_link(smc);
464 if (reason_code < 0) {
469 goto decline_rdma_unlock;
472 mutex_unlock(&smc_create_lgr_pending);
476 smc_copy_sock_settings_to_clc(smc);
477 if (smc->sk.sk_state == SMC_INIT)
478 smc->sk.sk_state = SMC_ACTIVE;
480 return rc ? rc : local_contact;
483 if (local_contact == SMC_FIRST_CONTACT)
484 smc_lgr_forget(smc->conn.lgr);
485 mutex_unlock(&smc_create_lgr_pending);
486 smc_conn_free(&smc->conn);
488 /* RDMA setup failed, switch back to TCP */
489 smc->use_fallback = true;
490 if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
491 rc = smc_clc_send_decline(smc, reason_code);
498 if (local_contact == SMC_FIRST_CONTACT)
499 smc_lgr_forget(smc->conn.lgr);
500 mutex_unlock(&smc_create_lgr_pending);
501 smc_conn_free(&smc->conn);
503 if (smc->sk.sk_state == SMC_INIT)
504 sock_put(&smc->sk); /* passive closing */
508 static int smc_connect(struct socket *sock, struct sockaddr *addr,
511 struct sock *sk = sock->sk;
512 struct smc_sock *smc;
517 /* separate smc parameter checking to be safe */
518 if (alen < sizeof(addr->sa_family))
520 if (addr->sa_family != AF_INET)
524 switch (sk->sk_state) {
535 smc_copy_sock_settings_to_clc(smc);
536 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
537 rc = kernel_connect(smc->clcsock, addr, alen, flags);
541 /* setup RDMA connection */
542 rc = smc_connect_rdma(smc);
546 rc = 0; /* success cases including fallback */
554 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
556 struct socket *new_clcsock = NULL;
557 struct sock *lsk = &lsmc->sk;
562 new_sk = smc_sock_alloc(sock_net(lsk), NULL);
565 lsk->sk_err = ENOMEM;
570 *new_smc = smc_sk(new_sk);
572 rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
576 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
578 sock_release(new_clcsock);
579 new_sk->sk_state = SMC_CLOSED;
580 sock_set_flag(new_sk, SOCK_DEAD);
581 new_sk->sk_prot->unhash(new_sk);
582 sock_put(new_sk); /* final */
587 (*new_smc)->clcsock = new_clcsock;
592 /* add a just created sock to the accept queue of the listen sock as
593 * candidate for a following socket accept call from user space
595 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
597 struct smc_sock *par = smc_sk(parent);
599 sock_hold(sk); /* sock_put in smc_accept_unlink () */
600 spin_lock(&par->accept_q_lock);
601 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
602 spin_unlock(&par->accept_q_lock);
603 sk_acceptq_added(parent);
606 /* remove a socket from the accept queue of its parental listening socket */
607 static void smc_accept_unlink(struct sock *sk)
609 struct smc_sock *par = smc_sk(sk)->listen_smc;
611 spin_lock(&par->accept_q_lock);
612 list_del_init(&smc_sk(sk)->accept_q);
613 spin_unlock(&par->accept_q_lock);
614 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
615 sock_put(sk); /* sock_hold in smc_accept_enqueue */
618 /* remove a sock from the accept queue to bind it to a new socket created
619 * for a socket accept call from user space
621 struct sock *smc_accept_dequeue(struct sock *parent,
622 struct socket *new_sock)
624 struct smc_sock *isk, *n;
627 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
628 new_sk = (struct sock *)isk;
630 smc_accept_unlink(new_sk);
631 if (new_sk->sk_state == SMC_CLOSED) {
633 sock_release(isk->clcsock);
636 new_sk->sk_prot->unhash(new_sk);
637 sock_put(new_sk); /* final */
641 sock_graft(new_sk, new_sock);
647 /* clean up for a created but never accepted sock */
648 void smc_close_non_accepted(struct sock *sk)
650 struct smc_sock *smc = smc_sk(sk);
653 if (!sk->sk_lingertime)
654 /* wait for peer closing */
655 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
656 if (!smc->use_fallback) {
657 smc_close_active(smc);
658 sock_set_flag(sk, SOCK_DEAD);
659 sk->sk_shutdown |= SHUTDOWN_MASK;
668 if (smc->use_fallback) {
669 sock_put(sk); /* passive closing */
670 sk->sk_state = SMC_CLOSED;
672 if (sk->sk_state == SMC_CLOSED)
673 smc_conn_free(&smc->conn);
676 sk->sk_prot->unhash(sk);
677 sock_put(sk); /* final sock_put */
680 static int smc_serv_conf_first_link(struct smc_sock *smc)
682 struct smc_link_group *lgr = smc->conn.lgr;
683 struct smc_link *link;
687 link = &lgr->lnk[SMC_SINGLE_LINK];
689 rc = smc_wr_reg_send(link,
690 smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]);
692 return SMC_CLC_DECL_INTERR;
694 /* send CONFIRM LINK request to client over the RoCE fabric */
695 rc = smc_llc_send_confirm_link(link,
696 link->smcibdev->mac[link->ibport - 1],
697 &link->smcibdev->gid[link->ibport - 1],
700 return SMC_CLC_DECL_TCL;
702 /* receive CONFIRM LINK response from client over the RoCE fabric */
703 rest = wait_for_completion_interruptible_timeout(
704 &link->llc_confirm_resp,
705 SMC_LLC_WAIT_FIRST_TIME);
707 struct smc_clc_msg_decline dclc;
709 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
714 if (link->llc_confirm_resp_rc)
715 return SMC_CLC_DECL_RMBE_EC;
720 /* setup for RDMA connection of server */
721 static void smc_listen_work(struct work_struct *work)
723 struct smc_sock *new_smc = container_of(work, struct smc_sock,
725 struct smc_clc_msg_proposal_prefix *pclc_prfx;
726 struct socket *newclcsock = new_smc->clcsock;
727 struct smc_sock *lsmc = new_smc->listen_smc;
728 struct smc_clc_msg_accept_confirm cclc;
729 int local_contact = SMC_REUSE_CONTACT;
730 struct sock *newsmcsk = &new_smc->sk;
731 struct smc_clc_msg_proposal *pclc;
732 struct smc_ib_device *smcibdev;
733 u8 buf[SMC_CLC_MAX_LEN];
734 struct smc_link *link;
741 /* check if peer is smc capable */
742 if (!tcp_sk(newclcsock->sk)->syn_smc) {
743 new_smc->use_fallback = true;
747 /* do inband token exchange -
748 *wait for and receive SMC Proposal CLC message
750 reason_code = smc_clc_wait_msg(new_smc, &buf, sizeof(buf),
757 /* IPSec connections opt out of SMC-R optimizations */
758 if (using_ipsec(new_smc)) {
759 reason_code = SMC_CLC_DECL_IPSEC;
763 /* PNET table look up: search active ib_device and port
764 * within same PNETID that also contains the ethernet device
765 * used for the internal TCP socket
767 smc_pnet_find_roce_resource(newclcsock->sk, &smcibdev, &ibport);
769 reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
773 /* determine subnet and mask from internal TCP socket */
774 rc = smc_clc_netinfo_by_tcpsk(newclcsock, &subnet, &prefix_len);
776 reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
780 pclc = (struct smc_clc_msg_proposal *)&buf;
781 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
782 if (pclc_prfx->outgoing_subnet != subnet ||
783 pclc_prfx->prefix_len != prefix_len) {
784 reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
788 /* allocate connection / link group */
789 mutex_lock(&smc_create_lgr_pending);
790 local_contact = smc_conn_create(new_smc, smcibdev, ibport, &pclc->lcl,
792 if (local_contact < 0) {
795 reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
796 goto decline_rdma_unlock;
798 link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
800 /* create send buffer and rmb */
801 rc = smc_buf_create(new_smc);
803 reason_code = SMC_CLC_DECL_MEM;
804 goto decline_rdma_unlock;
807 smc_close_init(new_smc);
808 smc_rx_init(new_smc);
810 if (local_contact != SMC_FIRST_CONTACT) {
811 struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc;
813 if (!buf_desc->reused) {
814 /* register memory region for new rmb */
815 rc = smc_wr_reg_send(link,
816 buf_desc->mr_rx[SMC_SINGLE_LINK]);
818 reason_code = SMC_CLC_DECL_INTERR;
819 goto decline_rdma_unlock;
823 smc_rmb_sync_sg_for_device(&new_smc->conn);
825 rc = smc_clc_send_accept(new_smc, local_contact);
829 /* receive SMC Confirm CLC message */
830 reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
835 goto decline_rdma_unlock;
836 smc_conn_save_peer_info(new_smc, &cclc);
837 if (local_contact == SMC_FIRST_CONTACT)
838 smc_link_save_peer_info(link, &cclc);
840 rc = smc_rmb_rtoken_handling(&new_smc->conn, &cclc);
842 reason_code = SMC_CLC_DECL_INTERR;
843 goto decline_rdma_unlock;
846 if (local_contact == SMC_FIRST_CONTACT) {
847 rc = smc_ib_ready_link(link);
849 reason_code = SMC_CLC_DECL_INTERR;
850 goto decline_rdma_unlock;
852 /* QP confirmation over RoCE fabric */
853 reason_code = smc_serv_conf_first_link(new_smc);
855 /* peer is not aware of a problem */
858 goto decline_rdma_unlock;
861 smc_tx_init(new_smc);
862 mutex_unlock(&smc_create_lgr_pending);
865 sk_refcnt_debug_inc(newsmcsk);
866 if (newsmcsk->sk_state == SMC_INIT)
867 newsmcsk->sk_state = SMC_ACTIVE;
869 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
870 if (lsmc->sk.sk_state == SMC_LISTEN) {
871 smc_accept_enqueue(&lsmc->sk, newsmcsk);
872 } else { /* no longer listening */
873 smc_close_non_accepted(newsmcsk);
875 release_sock(&lsmc->sk);
878 lsmc->sk.sk_data_ready(&lsmc->sk);
879 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
883 if (local_contact == SMC_FIRST_CONTACT)
884 smc_lgr_forget(new_smc->conn.lgr);
885 mutex_unlock(&smc_create_lgr_pending);
887 /* RDMA setup failed, switch back to TCP */
888 smc_conn_free(&new_smc->conn);
889 new_smc->use_fallback = true;
890 if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
891 if (smc_clc_send_decline(new_smc, reason_code) < 0)
897 if (local_contact == SMC_FIRST_CONTACT)
898 smc_lgr_forget(new_smc->conn.lgr);
899 mutex_unlock(&smc_create_lgr_pending);
901 if (newsmcsk->sk_state == SMC_INIT)
902 sock_put(&new_smc->sk); /* passive closing */
903 newsmcsk->sk_state = SMC_CLOSED;
904 smc_conn_free(&new_smc->conn);
905 goto enqueue; /* queue new sock with sk_err set */
908 static void smc_tcp_listen_work(struct work_struct *work)
910 struct smc_sock *lsmc = container_of(work, struct smc_sock,
912 struct sock *lsk = &lsmc->sk;
913 struct smc_sock *new_smc;
917 while (lsk->sk_state == SMC_LISTEN) {
918 rc = smc_clcsock_accept(lsmc, &new_smc);
924 new_smc->listen_smc = lsmc;
925 new_smc->use_fallback = false; /* assume rdma capability first*/
926 sock_hold(lsk); /* sock_put in smc_listen_work */
927 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
928 smc_copy_sock_settings_to_smc(new_smc);
929 sock_hold(&new_smc->sk); /* sock_put in passive closing */
930 if (!schedule_work(&new_smc->smc_listen_work))
931 sock_put(&new_smc->sk);
936 sock_release(lsmc->clcsock);
937 lsmc->clcsock = NULL;
940 /* no more listening, wake up smc_close_wait_listen_clcsock and
943 lsk->sk_state_change(lsk);
944 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
947 static int smc_listen(struct socket *sock, int backlog)
949 struct sock *sk = sock->sk;
950 struct smc_sock *smc;
957 if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN))
961 if (sk->sk_state == SMC_LISTEN) {
962 sk->sk_max_ack_backlog = backlog;
965 /* some socket options are handled in core, so we could not apply
966 * them to the clc socket -- copy smc socket options to clc socket
968 smc_copy_sock_settings_to_clc(smc);
969 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
971 rc = kernel_listen(smc->clcsock, backlog);
974 sk->sk_max_ack_backlog = backlog;
975 sk->sk_ack_backlog = 0;
976 sk->sk_state = SMC_LISTEN;
977 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
978 sock_hold(sk); /* sock_hold in tcp_listen_worker */
979 if (!schedule_work(&smc->tcp_listen_work))
987 static int smc_accept(struct socket *sock, struct socket *new_sock,
988 int flags, bool kern)
990 struct sock *sk = sock->sk, *nsk;
991 DECLARE_WAITQUEUE(wait, current);
992 struct smc_sock *lsmc;
997 sock_hold(sk); /* sock_put below */
1000 if (lsmc->sk.sk_state != SMC_LISTEN) {
1005 /* Wait for an incoming connection */
1006 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1007 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1008 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
1009 set_current_state(TASK_INTERRUPTIBLE);
1015 timeo = schedule_timeout(timeo);
1016 /* wakeup by sk_data_ready in smc_listen_work() */
1017 sched_annotate_sleep();
1019 if (signal_pending(current)) {
1020 rc = sock_intr_errno(timeo);
1024 set_current_state(TASK_RUNNING);
1025 remove_wait_queue(sk_sleep(sk), &wait);
1028 rc = sock_error(nsk);
1032 sock_put(sk); /* sock_hold above */
1036 static int smc_getname(struct socket *sock, struct sockaddr *addr,
1039 struct smc_sock *smc;
1041 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
1042 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
1045 smc = smc_sk(sock->sk);
1047 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
1050 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1052 struct sock *sk = sock->sk;
1053 struct smc_sock *smc;
1058 if ((sk->sk_state != SMC_ACTIVE) &&
1059 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1060 (sk->sk_state != SMC_INIT))
1062 if (smc->use_fallback)
1063 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
1065 rc = smc_tx_sendmsg(smc, msg, len);
1071 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1074 struct sock *sk = sock->sk;
1075 struct smc_sock *smc;
1080 if ((sk->sk_state == SMC_INIT) ||
1081 (sk->sk_state == SMC_LISTEN) ||
1082 (sk->sk_state == SMC_CLOSED))
1085 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1090 if (smc->use_fallback)
1091 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
1093 rc = smc_rx_recvmsg(smc, msg, len, flags);
1100 static __poll_t smc_accept_poll(struct sock *parent)
1102 struct smc_sock *isk = smc_sk(parent);
1105 spin_lock(&isk->accept_q_lock);
1106 if (!list_empty(&isk->accept_q))
1107 mask = EPOLLIN | EPOLLRDNORM;
1108 spin_unlock(&isk->accept_q_lock);
1113 static __poll_t smc_poll(struct file *file, struct socket *sock,
1116 struct sock *sk = sock->sk;
1118 struct smc_sock *smc;
1124 smc = smc_sk(sock->sk);
1127 if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
1128 /* delegate to CLC child sock */
1130 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1131 /* if non-blocking connect finished ... */
1133 if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) {
1134 sk->sk_err = smc->clcsock->sk->sk_err;
1138 rc = smc_connect_rdma(smc);
1141 /* success cases including fallback */
1142 mask |= EPOLLOUT | EPOLLWRNORM;
1146 if (sk->sk_state != SMC_CLOSED) {
1148 sock_poll_wait(file, sk_sleep(sk), wait);
1153 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1154 (sk->sk_state == SMC_CLOSED))
1156 if (sk->sk_state == SMC_LISTEN) {
1157 /* woken up by sk_data_ready in smc_listen_work() */
1158 mask = smc_accept_poll(sk);
1160 if (atomic_read(&smc->conn.sndbuf_space) ||
1161 sk->sk_shutdown & SEND_SHUTDOWN) {
1162 mask |= EPOLLOUT | EPOLLWRNORM;
1164 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1165 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1167 if (atomic_read(&smc->conn.bytes_to_rcv))
1168 mask |= EPOLLIN | EPOLLRDNORM;
1169 if (sk->sk_shutdown & RCV_SHUTDOWN)
1170 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1171 if (sk->sk_state == SMC_APPCLOSEWAIT1)
1182 static int smc_shutdown(struct socket *sock, int how)
1184 struct sock *sk = sock->sk;
1185 struct smc_sock *smc;
1191 if ((how < SHUT_RD) || (how > SHUT_RDWR))
1197 if ((sk->sk_state != SMC_LISTEN) &&
1198 (sk->sk_state != SMC_ACTIVE) &&
1199 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
1200 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
1201 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1202 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
1203 (sk->sk_state != SMC_APPFINCLOSEWAIT))
1205 if (smc->use_fallback) {
1206 rc = kernel_sock_shutdown(smc->clcsock, how);
1207 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
1208 if (sk->sk_shutdown == SHUTDOWN_MASK)
1209 sk->sk_state = SMC_CLOSED;
1213 case SHUT_RDWR: /* shutdown in both directions */
1214 rc = smc_close_active(smc);
1217 rc = smc_close_shutdown_write(smc);
1220 if (sk->sk_state == SMC_LISTEN)
1221 rc = smc_close_active(smc);
1224 /* nothing more to do because peer is not involved */
1227 rc1 = kernel_sock_shutdown(smc->clcsock, how);
1228 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1229 sk->sk_shutdown |= how + 1;
1233 return rc ? rc : rc1;
1236 static int smc_setsockopt(struct socket *sock, int level, int optname,
1237 char __user *optval, unsigned int optlen)
1239 struct sock *sk = sock->sk;
1240 struct smc_sock *smc;
1244 /* generic setsockopts reaching us here always apply to the
1247 return smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
1251 static int smc_getsockopt(struct socket *sock, int level, int optname,
1252 char __user *optval, int __user *optlen)
1254 struct smc_sock *smc;
1256 smc = smc_sk(sock->sk);
1257 /* socket options apply to the CLC socket */
1258 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
1262 static int smc_ioctl(struct socket *sock, unsigned int cmd,
1265 struct smc_sock *smc;
1267 smc = smc_sk(sock->sk);
1268 if (smc->use_fallback)
1269 return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1271 return sock_no_ioctl(sock, cmd, arg);
1274 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1275 int offset, size_t size, int flags)
1277 struct sock *sk = sock->sk;
1278 struct smc_sock *smc;
1283 if (sk->sk_state != SMC_ACTIVE)
1285 if (smc->use_fallback)
1286 rc = kernel_sendpage(smc->clcsock, page, offset,
1289 rc = sock_no_sendpage(sock, page, offset, size, flags);
1296 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1297 struct pipe_inode_info *pipe, size_t len,
1300 struct sock *sk = sock->sk;
1301 struct smc_sock *smc;
1306 if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_CLOSED))
1308 if (smc->use_fallback) {
1309 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
1319 /* must look like tcp */
1320 static const struct proto_ops smc_sock_ops = {
1322 .owner = THIS_MODULE,
1323 .release = smc_release,
1325 .connect = smc_connect,
1326 .socketpair = sock_no_socketpair,
1327 .accept = smc_accept,
1328 .getname = smc_getname,
1331 .listen = smc_listen,
1332 .shutdown = smc_shutdown,
1333 .setsockopt = smc_setsockopt,
1334 .getsockopt = smc_getsockopt,
1335 .sendmsg = smc_sendmsg,
1336 .recvmsg = smc_recvmsg,
1337 .mmap = sock_no_mmap,
1338 .sendpage = smc_sendpage,
1339 .splice_read = smc_splice_read,
1342 static int smc_create(struct net *net, struct socket *sock, int protocol,
1345 struct smc_sock *smc;
1349 rc = -ESOCKTNOSUPPORT;
1350 if (sock->type != SOCK_STREAM)
1353 rc = -EPROTONOSUPPORT;
1354 if ((protocol != IPPROTO_IP) && (protocol != IPPROTO_TCP))
1358 sock->ops = &smc_sock_ops;
1359 sk = smc_sock_alloc(net, sock);
1363 /* create internal TCP socket for CLC handshake and fallback */
1365 smc->use_fallback = false; /* assume rdma capability first */
1366 rc = sock_create_kern(net, PF_INET, SOCK_STREAM,
1367 IPPROTO_TCP, &smc->clcsock);
1369 sk_common_release(sk);
1370 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
1371 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
1377 static const struct net_proto_family smc_sock_family_ops = {
1379 .owner = THIS_MODULE,
1380 .create = smc_create,
1383 static int __init smc_init(void)
1387 rc = smc_pnet_init();
1391 rc = smc_llc_init();
1393 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
1397 rc = smc_cdc_init();
1399 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
1403 rc = proto_register(&smc_proto, 1);
1405 pr_err("%s: proto_register fails with %d\n", __func__, rc);
1409 rc = sock_register(&smc_sock_family_ops);
1411 pr_err("%s: sock_register fails with %d\n", __func__, rc);
1414 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
1416 rc = smc_ib_register_client();
1418 pr_err("%s: ib_register fails with %d\n", __func__, rc);
1422 static_branch_enable(&tcp_have_smc);
1426 sock_unregister(PF_SMC);
1428 proto_unregister(&smc_proto);
1434 static void __exit smc_exit(void)
1436 struct smc_link_group *lgr, *lg;
1437 LIST_HEAD(lgr_freeing_list);
1439 spin_lock_bh(&smc_lgr_list.lock);
1440 if (!list_empty(&smc_lgr_list.list))
1441 list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
1442 spin_unlock_bh(&smc_lgr_list.lock);
1443 list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
1444 list_del_init(&lgr->list);
1445 smc_lgr_free(lgr); /* free link group */
1447 static_branch_disable(&tcp_have_smc);
1448 smc_ib_unregister_client();
1449 sock_unregister(PF_SMC);
1450 proto_unregister(&smc_proto);
1454 module_init(smc_init);
1455 module_exit(smc_exit);
1457 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
1458 MODULE_DESCRIPTION("smc socket address family");
1459 MODULE_LICENSE("GPL");
1460 MODULE_ALIAS_NETPROTO(PF_SMC);