1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
28 #include <linux/rcupdate_wait.h>
29 #include <linux/ctype.h>
34 #include <asm/ioctls.h>
36 #include <net/net_namespace.h>
37 #include <net/netns/generic.h>
38 #include "smc_netns.h"
48 #include "smc_netlink.h"
51 #include "smc_close.h"
52 #include "smc_stats.h"
54 static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
57 static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
61 struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
62 struct workqueue_struct *smc_close_wq; /* wq for close work */
64 static void smc_tcp_listen_work(struct work_struct *);
65 static void smc_connect_work(struct work_struct *);
67 static void smc_set_keepalive(struct sock *sk, int val)
69 struct smc_sock *smc = smc_sk(sk);
71 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
74 static struct smc_hashinfo smc_v4_hashinfo = {
75 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
78 static struct smc_hashinfo smc_v6_hashinfo = {
79 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
82 int smc_hash_sk(struct sock *sk)
84 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
85 struct hlist_head *head;
89 write_lock_bh(&h->lock);
90 sk_add_node(sk, head);
91 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
92 write_unlock_bh(&h->lock);
96 EXPORT_SYMBOL_GPL(smc_hash_sk);
98 void smc_unhash_sk(struct sock *sk)
100 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
102 write_lock_bh(&h->lock);
103 if (sk_del_node_init(sk))
104 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
105 write_unlock_bh(&h->lock);
107 EXPORT_SYMBOL_GPL(smc_unhash_sk);
109 struct proto smc_proto = {
111 .owner = THIS_MODULE,
112 .keepalive = smc_set_keepalive,
114 .unhash = smc_unhash_sk,
115 .obj_size = sizeof(struct smc_sock),
116 .h.smc_hash = &smc_v4_hashinfo,
117 .slab_flags = SLAB_TYPESAFE_BY_RCU,
119 EXPORT_SYMBOL_GPL(smc_proto);
121 struct proto smc_proto6 = {
123 .owner = THIS_MODULE,
124 .keepalive = smc_set_keepalive,
126 .unhash = smc_unhash_sk,
127 .obj_size = sizeof(struct smc_sock),
128 .h.smc_hash = &smc_v6_hashinfo,
129 .slab_flags = SLAB_TYPESAFE_BY_RCU,
131 EXPORT_SYMBOL_GPL(smc_proto6);
133 static void smc_restore_fallback_changes(struct smc_sock *smc)
135 if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
136 smc->clcsock->file->private_data = smc->sk.sk_socket;
137 smc->clcsock->file = NULL;
141 static int __smc_release(struct smc_sock *smc)
143 struct sock *sk = &smc->sk;
146 if (!smc->use_fallback) {
147 rc = smc_close_active(smc);
148 sock_set_flag(sk, SOCK_DEAD);
149 sk->sk_shutdown |= SHUTDOWN_MASK;
151 if (sk->sk_state != SMC_CLOSED) {
152 if (sk->sk_state != SMC_LISTEN &&
153 sk->sk_state != SMC_INIT)
154 sock_put(sk); /* passive closing */
155 if (sk->sk_state == SMC_LISTEN) {
156 /* wake up clcsock accept */
157 rc = kernel_sock_shutdown(smc->clcsock,
160 sk->sk_state = SMC_CLOSED;
161 sk->sk_state_change(sk);
163 smc_restore_fallback_changes(smc);
166 sk->sk_prot->unhash(sk);
168 if (sk->sk_state == SMC_CLOSED) {
171 smc_clcsock_release(smc);
174 if (!smc->use_fallback)
175 smc_conn_free(&smc->conn);
181 static int smc_release(struct socket *sock)
183 struct sock *sk = sock->sk;
184 struct smc_sock *smc;
190 sock_hold(sk); /* sock_put below */
193 /* cleanup for a dangling non-blocking connect */
194 if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
195 tcp_abort(smc->clcsock->sk, ECONNABORTED);
197 if (cancel_work_sync(&smc->connect_work))
198 sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
200 if (sk->sk_state == SMC_LISTEN)
201 /* smc_close_non_accepted() is called and acquires
202 * sock lock for child sockets again
204 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
208 rc = __smc_release(smc);
215 sock_put(sk); /* sock_hold above */
216 sock_put(sk); /* final sock_put */
221 static void smc_destruct(struct sock *sk)
223 if (sk->sk_state != SMC_CLOSED)
225 if (!sock_flag(sk, SOCK_DEAD))
228 sk_refcnt_debug_dec(sk);
231 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
234 struct smc_sock *smc;
238 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
239 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
243 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
244 sk->sk_state = SMC_INIT;
245 sk->sk_destruct = smc_destruct;
246 sk->sk_protocol = protocol;
248 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
249 INIT_WORK(&smc->connect_work, smc_connect_work);
250 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
251 INIT_LIST_HEAD(&smc->accept_q);
252 spin_lock_init(&smc->accept_q_lock);
253 spin_lock_init(&smc->conn.send_lock);
254 sk->sk_prot->hash(sk);
255 sk_refcnt_debug_inc(sk);
256 mutex_init(&smc->clcsock_release_lock);
261 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
264 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
265 struct sock *sk = sock->sk;
266 struct smc_sock *smc;
271 /* replicate tests from inet_bind(), to be safe wrt. future changes */
273 if (addr_len < sizeof(struct sockaddr_in))
277 if (addr->sin_family != AF_INET &&
278 addr->sin_family != AF_INET6 &&
279 addr->sin_family != AF_UNSPEC)
281 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
282 if (addr->sin_family == AF_UNSPEC &&
283 addr->sin_addr.s_addr != htonl(INADDR_ANY))
288 /* Check if socket is already active */
290 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
293 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
294 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
302 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
305 /* options we don't get control via setsockopt for */
306 nsk->sk_type = osk->sk_type;
307 nsk->sk_sndbuf = osk->sk_sndbuf;
308 nsk->sk_rcvbuf = osk->sk_rcvbuf;
309 nsk->sk_sndtimeo = osk->sk_sndtimeo;
310 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
311 nsk->sk_mark = osk->sk_mark;
312 nsk->sk_priority = osk->sk_priority;
313 nsk->sk_rcvlowat = osk->sk_rcvlowat;
314 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
315 nsk->sk_err = osk->sk_err;
317 nsk->sk_flags &= ~mask;
318 nsk->sk_flags |= osk->sk_flags & mask;
321 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
322 (1UL << SOCK_KEEPOPEN) | \
323 (1UL << SOCK_LINGER) | \
324 (1UL << SOCK_BROADCAST) | \
325 (1UL << SOCK_TIMESTAMP) | \
326 (1UL << SOCK_DBG) | \
327 (1UL << SOCK_RCVTSTAMP) | \
328 (1UL << SOCK_RCVTSTAMPNS) | \
329 (1UL << SOCK_LOCALROUTE) | \
330 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
331 (1UL << SOCK_RXQ_OVFL) | \
332 (1UL << SOCK_WIFI_STATUS) | \
333 (1UL << SOCK_NOFCS) | \
334 (1UL << SOCK_FILTER_LOCKED) | \
335 (1UL << SOCK_TSTAMP_NEW))
336 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
337 * clc socket (since smc is not called for these options from net/core)
339 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
341 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
344 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
345 (1UL << SOCK_KEEPOPEN) | \
346 (1UL << SOCK_LINGER) | \
348 /* copy only settings and flags relevant for smc from clc to smc socket */
349 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
351 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
354 /* register the new rmb on all links */
355 static int smcr_lgr_reg_rmbs(struct smc_link *link,
356 struct smc_buf_desc *rmb_desc)
358 struct smc_link_group *lgr = link->lgr;
361 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
364 /* protect against parallel smc_llc_cli_rkey_exchange() and
365 * parallel smcr_link_reg_rmb()
367 mutex_lock(&lgr->llc_conf_mutex);
368 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
369 if (!smc_link_active(&lgr->lnk[i]))
371 rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc);
376 /* exchange confirm_rkey msg with peer */
377 rc = smc_llc_do_confirm_rkey(link, rmb_desc);
382 rmb_desc->is_conf_rkey = true;
384 mutex_unlock(&lgr->llc_conf_mutex);
385 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
389 static int smcr_clnt_conf_first_link(struct smc_sock *smc)
391 struct smc_link *link = smc->conn.lnk;
392 struct smc_llc_qentry *qentry;
395 /* receive CONFIRM LINK request from server over RoCE fabric */
396 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
397 SMC_LLC_CONFIRM_LINK);
399 struct smc_clc_msg_decline dclc;
401 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
402 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
403 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
405 smc_llc_save_peer_uid(qentry);
406 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
407 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
409 return SMC_CLC_DECL_RMBE_EC;
411 rc = smc_ib_modify_qp_rts(link);
413 return SMC_CLC_DECL_ERR_RDYLNK;
415 smc_wr_remember_qp_attr(link);
417 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
418 return SMC_CLC_DECL_ERR_REGRMB;
420 /* confirm_rkey is implicit on 1st contact */
421 smc->conn.rmb_desc->is_conf_rkey = true;
423 /* send CONFIRM LINK response over RoCE fabric */
424 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
426 return SMC_CLC_DECL_TIMEOUT_CL;
428 smc_llc_link_active(link);
429 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
431 /* optional 2nd link, receive ADD LINK request from server */
432 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
435 struct smc_clc_msg_decline dclc;
437 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
438 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
440 rc = 0; /* no DECLINE received, go with one link */
443 smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
444 smc_llc_cli_add_link(link, qentry);
448 static void smcr_conn_save_peer_info(struct smc_sock *smc,
449 struct smc_clc_msg_accept_confirm *clc)
451 int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
453 smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
454 smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
455 smc->conn.peer_rmbe_size = bufsize;
456 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
457 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
460 static bool smc_isascii(char *hostname)
464 for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
465 if (!isascii(hostname[i]))
470 static void smcd_conn_save_peer_info(struct smc_sock *smc,
471 struct smc_clc_msg_accept_confirm *clc)
473 int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
475 smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
476 smc->conn.peer_token = clc->d0.token;
477 /* msg header takes up space in the buffer */
478 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
479 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
480 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
481 if (clc->hdr.version > SMC_V1 &&
482 (clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) {
483 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
484 (struct smc_clc_msg_accept_confirm_v2 *)clc;
485 struct smc_clc_first_contact_ext *fce =
486 (struct smc_clc_first_contact_ext *)
487 (((u8 *)clc_v2) + sizeof(*clc_v2));
489 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid,
491 smc->conn.lgr->peer_os = fce->os_type;
492 smc->conn.lgr->peer_smc_release = fce->release;
493 if (smc_isascii(fce->hostname))
494 memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
495 SMC_MAX_HOSTNAME_LEN);
499 static void smc_conn_save_peer_info(struct smc_sock *smc,
500 struct smc_clc_msg_accept_confirm *clc)
502 if (smc->conn.lgr->is_smcd)
503 smcd_conn_save_peer_info(smc, clc);
505 smcr_conn_save_peer_info(smc, clc);
508 static void smc_link_save_peer_info(struct smc_link *link,
509 struct smc_clc_msg_accept_confirm *clc)
511 link->peer_qpn = ntoh24(clc->r0.qpn);
512 memcpy(link->peer_gid, clc->r0.lcl.gid, SMC_GID_SIZE);
513 memcpy(link->peer_mac, clc->r0.lcl.mac, sizeof(link->peer_mac));
514 link->peer_psn = ntoh24(clc->r0.psn);
515 link->peer_mtu = clc->r0.qp_mtu;
518 static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
519 struct smc_stats_fback *fback_arr)
523 for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
524 if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
525 fback_arr[cnt].count++;
528 if (!fback_arr[cnt].fback_code) {
529 fback_arr[cnt].fback_code = smc->fallback_rsn;
530 fback_arr[cnt].count++;
536 static void smc_stat_fallback(struct smc_sock *smc)
538 struct net *net = sock_net(&smc->sk);
540 mutex_lock(&net->smc.mutex_fback_rsn);
541 if (smc->listen_smc) {
542 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
543 net->smc.fback_rsn->srv_fback_cnt++;
545 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
546 net->smc.fback_rsn->clnt_fback_cnt++;
548 mutex_unlock(&net->smc.mutex_fback_rsn);
551 /* must be called under rcu read lock */
552 static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key)
554 struct socket_wq *wq;
557 wq = rcu_dereference(smc->sk.sk_wq);
558 if (!skwq_has_sleeper(wq))
561 /* wake up smc sk->sk_wq */
563 /* sk_state_change */
564 wake_up_interruptible_all(&wq->wait);
566 flags = key_to_poll(key);
567 if (flags & (EPOLLIN | EPOLLOUT))
568 /* sk_data_ready or sk_write_space */
569 wake_up_interruptible_sync_poll(&wq->wait, flags);
570 else if (flags & EPOLLERR)
571 /* sk_error_report */
572 wake_up_interruptible_poll(&wq->wait, flags);
576 static int smc_fback_mark_woken(wait_queue_entry_t *wait,
577 unsigned int mode, int sync, void *key)
579 struct smc_mark_woken *mark =
580 container_of(wait, struct smc_mark_woken, wait_entry);
587 static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk,
588 void (*clcsock_callback)(struct sock *sk))
590 struct smc_mark_woken mark = { .woken = false };
591 struct socket_wq *wq;
593 init_waitqueue_func_entry(&mark.wait_entry,
594 smc_fback_mark_woken);
596 wq = rcu_dereference(clcsk->sk_wq);
599 add_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
600 clcsock_callback(clcsk);
601 remove_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
604 smc_fback_wakeup_waitqueue(smc, mark.key);
609 static void smc_fback_state_change(struct sock *clcsk)
611 struct smc_sock *smc =
612 smc_clcsock_user_data(clcsk);
616 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_state_change);
619 static void smc_fback_data_ready(struct sock *clcsk)
621 struct smc_sock *smc =
622 smc_clcsock_user_data(clcsk);
626 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_data_ready);
629 static void smc_fback_write_space(struct sock *clcsk)
631 struct smc_sock *smc =
632 smc_clcsock_user_data(clcsk);
636 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_write_space);
639 static void smc_fback_error_report(struct sock *clcsk)
641 struct smc_sock *smc =
642 smc_clcsock_user_data(clcsk);
646 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_error_report);
649 static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
654 mutex_lock(&smc->clcsock_release_lock);
659 clcsk = smc->clcsock->sk;
661 if (smc->use_fallback)
663 smc->use_fallback = true;
664 smc->fallback_rsn = reason_code;
665 smc_stat_fallback(smc);
666 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
667 smc->clcsock->file = smc->sk.sk_socket->file;
668 smc->clcsock->file->private_data = smc->clcsock;
669 smc->clcsock->wq.fasync_list =
670 smc->sk.sk_socket->wq.fasync_list;
672 /* There might be some wait entries remaining
673 * in smc sk->sk_wq and they should be woken up
674 * as clcsock's wait queue is woken up.
676 smc->clcsk_state_change = clcsk->sk_state_change;
677 smc->clcsk_data_ready = clcsk->sk_data_ready;
678 smc->clcsk_write_space = clcsk->sk_write_space;
679 smc->clcsk_error_report = clcsk->sk_error_report;
681 clcsk->sk_state_change = smc_fback_state_change;
682 clcsk->sk_data_ready = smc_fback_data_ready;
683 clcsk->sk_write_space = smc_fback_write_space;
684 clcsk->sk_error_report = smc_fback_error_report;
686 smc->clcsock->sk->sk_user_data =
687 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
690 mutex_unlock(&smc->clcsock_release_lock);
694 /* fall back during connect */
695 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
697 struct net *net = sock_net(&smc->sk);
700 rc = smc_switch_to_fallback(smc, reason_code);
701 if (rc) { /* fallback fails */
702 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
703 if (smc->sk.sk_state == SMC_INIT)
704 sock_put(&smc->sk); /* passive closing */
707 smc_copy_sock_settings_to_clc(smc);
708 smc->connect_nonblock = 0;
709 if (smc->sk.sk_state == SMC_INIT)
710 smc->sk.sk_state = SMC_ACTIVE;
714 /* decline and fall back during connect */
715 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
718 struct net *net = sock_net(&smc->sk);
721 if (reason_code < 0) { /* error, fallback is not possible */
722 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
723 if (smc->sk.sk_state == SMC_INIT)
724 sock_put(&smc->sk); /* passive closing */
727 if (reason_code != SMC_CLC_DECL_PEERDECL) {
728 rc = smc_clc_send_decline(smc, reason_code, version);
730 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
731 if (smc->sk.sk_state == SMC_INIT)
732 sock_put(&smc->sk); /* passive closing */
736 return smc_connect_fallback(smc, reason_code);
739 static void smc_conn_abort(struct smc_sock *smc, int local_first)
741 struct smc_connection *conn = &smc->conn;
742 struct smc_link_group *lgr = conn->lgr;
746 smc_lgr_cleanup_early(lgr);
749 /* check if there is a rdma device available for this connection. */
750 /* called for connect and listen */
751 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
753 /* PNET table look up: search active ib_device and port
754 * within same PNETID that also contains the ethernet device
755 * used for the internal TCP socket
757 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
759 return SMC_CLC_DECL_NOSMCRDEV;
763 /* check if there is an ISM device available for this connection. */
764 /* called for connect and listen */
765 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
767 /* Find ISM device with same PNETID as connecting interface */
768 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
769 if (!ini->ism_dev[0])
770 return SMC_CLC_DECL_NOSMCDDEV;
772 ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
776 /* is chid unique for the ism devices that are already determined? */
777 static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
780 int i = (!ini->ism_dev[0]) ? 1 : 0;
783 if (ini->ism_chid[i] == chid)
788 /* determine possible V2 ISM devices (either without PNETID or with PNETID plus
789 * PNETID matching net_device)
791 static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
792 struct smc_init_info *ini)
794 int rc = SMC_CLC_DECL_NOSMCDDEV;
795 struct smcd_dev *smcd;
799 if (smcd_indicated(ini->smc_type_v1))
800 rc = 0; /* already initialized for V1 */
801 mutex_lock(&smcd_dev_list.mutex);
802 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
803 if (smcd->going_away || smcd == ini->ism_dev[0])
805 chid = smc_ism_get_chid(smcd);
806 if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
808 if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
809 smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
810 ini->ism_dev[i] = smcd;
811 ini->ism_chid[i] = chid;
815 if (i > SMC_MAX_ISM_DEVS)
819 mutex_unlock(&smcd_dev_list.mutex);
820 ini->ism_offered_cnt = i - 1;
821 if (!ini->ism_dev[0] && !ini->ism_dev[1])
822 ini->smcd_version = 0;
827 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
828 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
829 struct smc_init_info *ini)
831 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
832 return SMC_CLC_DECL_ISMVLANERR;
836 static int smc_find_proposal_devices(struct smc_sock *smc,
837 struct smc_init_info *ini)
841 /* check if there is an ism device available */
842 if (ini->smcd_version & SMC_V1) {
843 if (smc_find_ism_device(smc, ini) ||
844 smc_connect_ism_vlan_setup(smc, ini)) {
845 if (ini->smc_type_v1 == SMC_TYPE_B)
846 ini->smc_type_v1 = SMC_TYPE_R;
848 ini->smc_type_v1 = SMC_TYPE_N;
849 } /* else ISM V1 is supported for this connection */
850 if (smc_find_rdma_device(smc, ini)) {
851 if (ini->smc_type_v1 == SMC_TYPE_B)
852 ini->smc_type_v1 = SMC_TYPE_D;
854 ini->smc_type_v1 = SMC_TYPE_N;
855 } /* else RDMA is supported for this connection */
857 if (smc_ism_is_v2_capable() && smc_find_ism_v2_device_clnt(smc, ini))
858 ini->smc_type_v2 = SMC_TYPE_N;
860 /* if neither ISM nor RDMA are supported, fallback */
861 if (!smcr_indicated(ini->smc_type_v1) &&
862 ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
863 rc = SMC_CLC_DECL_NOSMCDEV;
868 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
869 * used, the VLAN ID will be registered again during the connection setup.
871 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
872 struct smc_init_info *ini)
874 if (!smcd_indicated(ini->smc_type_v1))
876 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
877 return SMC_CLC_DECL_CNFERR;
881 #define SMC_CLC_MAX_ACCEPT_LEN \
882 (sizeof(struct smc_clc_msg_accept_confirm_v2) + \
883 sizeof(struct smc_clc_first_contact_ext) + \
884 sizeof(struct smc_clc_msg_trail))
886 /* CLC handshake during connect */
887 static int smc_connect_clc(struct smc_sock *smc,
888 struct smc_clc_msg_accept_confirm_v2 *aclc2,
889 struct smc_init_info *ini)
893 /* do inband token exchange */
894 rc = smc_clc_send_proposal(smc, ini);
897 /* receive SMC Accept CLC message */
898 return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN,
899 SMC_CLC_ACCEPT, CLC_WAIT_TIME);
902 /* setup for RDMA connection of client */
903 static int smc_connect_rdma(struct smc_sock *smc,
904 struct smc_clc_msg_accept_confirm *aclc,
905 struct smc_init_info *ini)
907 int i, reason_code = 0;
908 struct smc_link *link;
910 ini->is_smcd = false;
911 ini->ib_lcl = &aclc->r0.lcl;
912 ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
913 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
915 mutex_lock(&smc_client_lgr_pending);
916 reason_code = smc_conn_create(smc, ini);
918 mutex_unlock(&smc_client_lgr_pending);
922 smc_conn_save_peer_info(smc, aclc);
924 if (ini->first_contact_local) {
925 link = smc->conn.lnk;
927 /* set link that was assigned by server */
929 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
930 struct smc_link *l = &smc->conn.lgr->lnk[i];
932 if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
933 !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
935 !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
936 sizeof(l->peer_mac))) {
942 reason_code = SMC_CLC_DECL_NOSRVLINK;
945 smc_switch_link_and_count(&smc->conn, link);
948 /* create send buffer and rmb */
949 if (smc_buf_create(smc, false)) {
950 reason_code = SMC_CLC_DECL_MEM;
954 if (ini->first_contact_local)
955 smc_link_save_peer_info(link, aclc);
957 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
958 reason_code = SMC_CLC_DECL_ERR_RTOK;
965 if (ini->first_contact_local) {
966 if (smc_ib_ready_link(link)) {
967 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
971 if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
972 reason_code = SMC_CLC_DECL_ERR_REGRMB;
976 smc_rmb_sync_sg_for_device(&smc->conn);
978 reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
985 if (ini->first_contact_local) {
986 /* QP confirmation over RoCE fabric */
987 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
988 reason_code = smcr_clnt_conf_first_link(smc);
989 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
993 mutex_unlock(&smc_client_lgr_pending);
995 smc_copy_sock_settings_to_clc(smc);
996 smc->connect_nonblock = 0;
997 if (smc->sk.sk_state == SMC_INIT)
998 smc->sk.sk_state = SMC_ACTIVE;
1002 smc_conn_abort(smc, ini->first_contact_local);
1003 mutex_unlock(&smc_client_lgr_pending);
1004 smc->connect_nonblock = 0;
1009 /* The server has chosen one of the proposed ISM devices for the communication.
1010 * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
1013 smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc,
1014 struct smc_init_info *ini)
1018 for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
1019 if (ini->ism_chid[i] == ntohs(aclc->chid)) {
1020 ini->ism_selected = i;
1028 /* setup for ISM connection of client */
1029 static int smc_connect_ism(struct smc_sock *smc,
1030 struct smc_clc_msg_accept_confirm *aclc,
1031 struct smc_init_info *ini)
1035 ini->is_smcd = true;
1036 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1038 if (aclc->hdr.version == SMC_V2) {
1039 struct smc_clc_msg_accept_confirm_v2 *aclc_v2 =
1040 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
1042 rc = smc_v2_determine_accepted_chid(aclc_v2, ini);
1046 ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
1048 /* there is only one lgr role for SMC-D; use server lock */
1049 mutex_lock(&smc_server_lgr_pending);
1050 rc = smc_conn_create(smc, ini);
1052 mutex_unlock(&smc_server_lgr_pending);
1056 /* Create send and receive buffers */
1057 rc = smc_buf_create(smc, true);
1059 rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
1063 smc_conn_save_peer_info(smc, aclc);
1064 smc_close_init(smc);
1068 rc = smc_clc_send_confirm(smc, ini->first_contact_local,
1072 mutex_unlock(&smc_server_lgr_pending);
1074 smc_copy_sock_settings_to_clc(smc);
1075 smc->connect_nonblock = 0;
1076 if (smc->sk.sk_state == SMC_INIT)
1077 smc->sk.sk_state = SMC_ACTIVE;
1081 smc_conn_abort(smc, ini->first_contact_local);
1082 mutex_unlock(&smc_server_lgr_pending);
1083 smc->connect_nonblock = 0;
1088 /* check if received accept type and version matches a proposed one */
1089 static int smc_connect_check_aclc(struct smc_init_info *ini,
1090 struct smc_clc_msg_accept_confirm *aclc)
1092 if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1093 !smcr_indicated(ini->smc_type_v1)) ||
1094 (aclc->hdr.typev1 == SMC_TYPE_D &&
1095 ((!smcd_indicated(ini->smc_type_v1) &&
1096 !smcd_indicated(ini->smc_type_v2)) ||
1097 (aclc->hdr.version == SMC_V1 &&
1098 !smcd_indicated(ini->smc_type_v1)) ||
1099 (aclc->hdr.version == SMC_V2 &&
1100 !smcd_indicated(ini->smc_type_v2)))))
1101 return SMC_CLC_DECL_MODEUNSUPP;
1106 /* perform steps before actually connecting */
1107 static int __smc_connect(struct smc_sock *smc)
1109 u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
1110 struct smc_clc_msg_accept_confirm_v2 *aclc2;
1111 struct smc_clc_msg_accept_confirm *aclc;
1112 struct smc_init_info *ini = NULL;
1116 if (smc->use_fallback)
1117 return smc_connect_fallback(smc, smc->fallback_rsn);
1119 /* if peer has not signalled SMC-capability, fall back */
1120 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
1121 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
1123 /* IPSec connections opt out of SMC optimizations */
1124 if (using_ipsec(smc))
1125 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
1128 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1130 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
1133 ini->smcd_version = SMC_V1;
1134 ini->smcd_version |= smc_ism_is_v2_capable() ? SMC_V2 : 0;
1135 ini->smc_type_v1 = SMC_TYPE_B;
1136 ini->smc_type_v2 = smc_ism_is_v2_capable() ? SMC_TYPE_D : SMC_TYPE_N;
1138 /* get vlan id from IP device */
1139 if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
1140 ini->smcd_version &= ~SMC_V1;
1141 ini->smc_type_v1 = SMC_TYPE_N;
1142 if (!ini->smcd_version) {
1143 rc = SMC_CLC_DECL_GETVLANERR;
1148 rc = smc_find_proposal_devices(smc, ini);
1152 buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
1154 rc = SMC_CLC_DECL_MEM;
1157 aclc2 = (struct smc_clc_msg_accept_confirm_v2 *)buf;
1158 aclc = (struct smc_clc_msg_accept_confirm *)aclc2;
1160 /* perform CLC handshake */
1161 rc = smc_connect_clc(smc, aclc2, ini);
1165 /* check if smc modes and versions of CLC proposal and accept match */
1166 rc = smc_connect_check_aclc(ini, aclc);
1167 version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
1168 ini->smcd_version = version;
1172 /* depending on previous steps, connect using rdma or ism */
1173 if (aclc->hdr.typev1 == SMC_TYPE_R)
1174 rc = smc_connect_rdma(smc, aclc, ini);
1175 else if (aclc->hdr.typev1 == SMC_TYPE_D)
1176 rc = smc_connect_ism(smc, aclc, ini);
1180 SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
1181 smc_connect_ism_vlan_cleanup(smc, ini);
1187 smc_connect_ism_vlan_cleanup(smc, ini);
1191 return smc_connect_decline_fallback(smc, rc, version);
1194 static void smc_connect_work(struct work_struct *work)
1196 struct smc_sock *smc = container_of(work, struct smc_sock,
1198 long timeo = smc->sk.sk_sndtimeo;
1202 timeo = MAX_SCHEDULE_TIMEOUT;
1203 lock_sock(smc->clcsock->sk);
1204 if (smc->clcsock->sk->sk_err) {
1205 smc->sk.sk_err = smc->clcsock->sk->sk_err;
1206 } else if ((1 << smc->clcsock->sk->sk_state) &
1207 (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1208 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1209 if ((rc == -EPIPE) &&
1210 ((1 << smc->clcsock->sk->sk_state) &
1211 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
1214 release_sock(smc->clcsock->sk);
1215 lock_sock(&smc->sk);
1216 if (rc != 0 || smc->sk.sk_err) {
1217 smc->sk.sk_state = SMC_CLOSED;
1218 if (rc == -EPIPE || rc == -EAGAIN)
1219 smc->sk.sk_err = EPIPE;
1220 else if (signal_pending(current))
1221 smc->sk.sk_err = -sock_intr_errno(timeo);
1222 sock_put(&smc->sk); /* passive closing */
1226 rc = __smc_connect(smc);
1228 smc->sk.sk_err = -rc;
1231 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1232 if (smc->sk.sk_err) {
1233 smc->sk.sk_state_change(&smc->sk);
1234 } else { /* allow polling before and after fallback decision */
1235 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1236 smc->sk.sk_write_space(&smc->sk);
1239 release_sock(&smc->sk);
1242 static int smc_connect(struct socket *sock, struct sockaddr *addr,
1243 int alen, int flags)
1245 struct sock *sk = sock->sk;
1246 struct smc_sock *smc;
1251 /* separate smc parameter checking to be safe */
1252 if (alen < sizeof(addr->sa_family))
1254 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
1258 switch (sk->sk_state) {
1268 smc_copy_sock_settings_to_clc(smc);
1269 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1270 if (smc->connect_nonblock) {
1274 rc = kernel_connect(smc->clcsock, addr, alen, flags);
1275 if (rc && rc != -EINPROGRESS)
1278 sock_hold(&smc->sk); /* sock put in passive closing */
1279 if (smc->use_fallback)
1281 if (flags & O_NONBLOCK) {
1282 if (queue_work(smc_hs_wq, &smc->connect_work))
1283 smc->connect_nonblock = 1;
1286 rc = __smc_connect(smc);
1290 rc = 0; /* success cases including fallback */
1299 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
1301 struct socket *new_clcsock = NULL;
1302 struct sock *lsk = &lsmc->sk;
1303 struct sock *new_sk;
1307 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
1310 lsk->sk_err = ENOMEM;
1315 *new_smc = smc_sk(new_sk);
1317 mutex_lock(&lsmc->clcsock_release_lock);
1319 rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
1320 mutex_unlock(&lsmc->clcsock_release_lock);
1322 if (rc < 0 && rc != -EAGAIN)
1324 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
1325 new_sk->sk_prot->unhash(new_sk);
1327 sock_release(new_clcsock);
1328 new_sk->sk_state = SMC_CLOSED;
1329 sock_set_flag(new_sk, SOCK_DEAD);
1330 sock_put(new_sk); /* final */
1335 /* new clcsock has inherited the smc listen-specific sk_data_ready
1336 * function; switch it back to the original sk_data_ready function
1338 new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
1339 (*new_smc)->clcsock = new_clcsock;
1344 /* add a just created sock to the accept queue of the listen sock as
1345 * candidate for a following socket accept call from user space
1347 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
1349 struct smc_sock *par = smc_sk(parent);
1351 sock_hold(sk); /* sock_put in smc_accept_unlink () */
1352 spin_lock(&par->accept_q_lock);
1353 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
1354 spin_unlock(&par->accept_q_lock);
1355 sk_acceptq_added(parent);
1358 /* remove a socket from the accept queue of its parental listening socket */
1359 static void smc_accept_unlink(struct sock *sk)
1361 struct smc_sock *par = smc_sk(sk)->listen_smc;
1363 spin_lock(&par->accept_q_lock);
1364 list_del_init(&smc_sk(sk)->accept_q);
1365 spin_unlock(&par->accept_q_lock);
1366 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
1367 sock_put(sk); /* sock_hold in smc_accept_enqueue */
1370 /* remove a sock from the accept queue to bind it to a new socket created
1371 * for a socket accept call from user space
1373 struct sock *smc_accept_dequeue(struct sock *parent,
1374 struct socket *new_sock)
1376 struct smc_sock *isk, *n;
1377 struct sock *new_sk;
1379 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
1380 new_sk = (struct sock *)isk;
1382 smc_accept_unlink(new_sk);
1383 if (new_sk->sk_state == SMC_CLOSED) {
1384 new_sk->sk_prot->unhash(new_sk);
1386 sock_release(isk->clcsock);
1387 isk->clcsock = NULL;
1389 sock_put(new_sk); /* final */
1393 sock_graft(new_sk, new_sock);
1394 if (isk->use_fallback) {
1395 smc_sk(new_sk)->clcsock->file = new_sock->file;
1396 isk->clcsock->file->private_data = isk->clcsock;
1404 /* clean up for a created but never accepted sock */
1405 void smc_close_non_accepted(struct sock *sk)
1407 struct smc_sock *smc = smc_sk(sk);
1409 sock_hold(sk); /* sock_put below */
1411 if (!sk->sk_lingertime)
1412 /* wait for peer closing */
1413 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
1416 sock_put(sk); /* sock_hold above */
1417 sock_put(sk); /* final sock_put */
1420 static int smcr_serv_conf_first_link(struct smc_sock *smc)
1422 struct smc_link *link = smc->conn.lnk;
1423 struct smc_llc_qentry *qentry;
1426 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
1427 return SMC_CLC_DECL_ERR_REGRMB;
1429 /* send CONFIRM LINK request to client over the RoCE fabric */
1430 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1432 return SMC_CLC_DECL_TIMEOUT_CL;
1434 /* receive CONFIRM LINK response from client over the RoCE fabric */
1435 qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
1436 SMC_LLC_CONFIRM_LINK);
1438 struct smc_clc_msg_decline dclc;
1440 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1441 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1442 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1444 smc_llc_save_peer_uid(qentry);
1445 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
1446 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
1448 return SMC_CLC_DECL_RMBE_EC;
1450 /* confirm_rkey is implicit on 1st contact */
1451 smc->conn.rmb_desc->is_conf_rkey = true;
1453 smc_llc_link_active(link);
1454 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
1456 /* initial contact - try to establish second link */
1457 smc_llc_srv_add_link(link);
1461 /* listen worker: finish */
1462 static void smc_listen_out(struct smc_sock *new_smc)
1464 struct smc_sock *lsmc = new_smc->listen_smc;
1465 struct sock *newsmcsk = &new_smc->sk;
1467 if (lsmc->sk.sk_state == SMC_LISTEN) {
1468 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1469 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1470 release_sock(&lsmc->sk);
1471 } else { /* no longer listening */
1472 smc_close_non_accepted(newsmcsk);
1475 /* Wake up accept */
1476 lsmc->sk.sk_data_ready(&lsmc->sk);
1477 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1480 /* listen worker: finish in state connected */
1481 static void smc_listen_out_connected(struct smc_sock *new_smc)
1483 struct sock *newsmcsk = &new_smc->sk;
1485 sk_refcnt_debug_inc(newsmcsk);
1486 if (newsmcsk->sk_state == SMC_INIT)
1487 newsmcsk->sk_state = SMC_ACTIVE;
1489 smc_listen_out(new_smc);
1492 /* listen worker: finish in error state */
1493 static void smc_listen_out_err(struct smc_sock *new_smc)
1495 struct sock *newsmcsk = &new_smc->sk;
1496 struct net *net = sock_net(newsmcsk);
1498 this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
1499 if (newsmcsk->sk_state == SMC_INIT)
1500 sock_put(&new_smc->sk); /* passive closing */
1501 newsmcsk->sk_state = SMC_CLOSED;
1503 smc_listen_out(new_smc);
1506 /* listen worker: decline and fall back if possible */
1507 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1508 int local_first, u8 version)
1510 /* RDMA setup failed, switch back to TCP */
1511 smc_conn_abort(new_smc, local_first);
1512 if (reason_code < 0 ||
1513 smc_switch_to_fallback(new_smc, reason_code)) {
1514 /* error, no fallback possible */
1515 smc_listen_out_err(new_smc);
1518 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1519 if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
1520 smc_listen_out_err(new_smc);
1524 smc_listen_out_connected(new_smc);
1527 /* listen worker: version checking */
1528 static int smc_listen_v2_check(struct smc_sock *new_smc,
1529 struct smc_clc_msg_proposal *pclc,
1530 struct smc_init_info *ini)
1532 struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
1533 struct smc_clc_v2_extension *pclc_v2_ext;
1534 int rc = SMC_CLC_DECL_PEERNOSMC;
1536 ini->smc_type_v1 = pclc->hdr.typev1;
1537 ini->smc_type_v2 = pclc->hdr.typev2;
1538 ini->smcd_version = ini->smc_type_v1 != SMC_TYPE_N ? SMC_V1 : 0;
1539 if (pclc->hdr.version > SMC_V1)
1540 ini->smcd_version |=
1541 ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0;
1542 if (!(ini->smcd_version & SMC_V2)) {
1543 rc = SMC_CLC_DECL_PEERNOSMC;
1546 if (!smc_ism_is_v2_capable()) {
1547 ini->smcd_version &= ~SMC_V2;
1548 rc = SMC_CLC_DECL_NOISM2SUPP;
1551 pclc_v2_ext = smc_get_clc_v2_ext(pclc);
1553 ini->smcd_version &= ~SMC_V2;
1554 rc = SMC_CLC_DECL_NOV2EXT;
1557 pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
1558 if (!pclc_smcd_v2_ext) {
1559 ini->smcd_version &= ~SMC_V2;
1560 rc = SMC_CLC_DECL_NOV2DEXT;
1564 if (!ini->smcd_version)
1570 /* listen worker: check prefixes */
1571 static int smc_listen_prfx_check(struct smc_sock *new_smc,
1572 struct smc_clc_msg_proposal *pclc)
1574 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1575 struct socket *newclcsock = new_smc->clcsock;
1577 if (pclc->hdr.typev1 == SMC_TYPE_N)
1579 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1580 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1581 return SMC_CLC_DECL_DIFFPREFIX;
1586 /* listen worker: initialize connection and buffers */
1587 static int smc_listen_rdma_init(struct smc_sock *new_smc,
1588 struct smc_init_info *ini)
1592 /* allocate connection / link group */
1593 rc = smc_conn_create(new_smc, ini);
1597 /* create send buffer and rmb */
1598 if (smc_buf_create(new_smc, false))
1599 return SMC_CLC_DECL_MEM;
1604 /* listen worker: initialize connection and buffers for SMC-D */
1605 static int smc_listen_ism_init(struct smc_sock *new_smc,
1606 struct smc_init_info *ini)
1610 rc = smc_conn_create(new_smc, ini);
1614 /* Create send and receive buffers */
1615 rc = smc_buf_create(new_smc, true);
1617 smc_conn_abort(new_smc, ini->first_contact_local);
1618 return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
1625 static bool smc_is_already_selected(struct smcd_dev *smcd,
1626 struct smc_init_info *ini,
1631 for (i = 0; i < matches; i++)
1632 if (smcd == ini->ism_dev[i])
1638 /* check for ISM devices matching proposed ISM devices */
1639 static void smc_check_ism_v2_match(struct smc_init_info *ini,
1640 u16 proposed_chid, u64 proposed_gid,
1641 unsigned int *matches)
1643 struct smcd_dev *smcd;
1645 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1646 if (smcd->going_away)
1648 if (smc_is_already_selected(smcd, ini, *matches))
1650 if (smc_ism_get_chid(smcd) == proposed_chid &&
1651 !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
1652 ini->ism_peer_gid[*matches] = proposed_gid;
1653 ini->ism_dev[*matches] = smcd;
1660 static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini)
1666 static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
1667 struct smc_clc_msg_proposal *pclc,
1668 struct smc_init_info *ini)
1670 struct smc_clc_smcd_v2_extension *smcd_v2_ext;
1671 struct smc_clc_v2_extension *smc_v2_ext;
1672 struct smc_clc_msg_smcd *pclc_smcd;
1673 unsigned int matches = 0;
1678 if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
1681 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1682 smc_v2_ext = smc_get_clc_v2_ext(pclc);
1683 smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
1685 !smc_v2_ext->hdr.flag.seid) { /* no system EID support for SMCD */
1686 smc_find_ism_store_rc(SMC_CLC_DECL_NOSEID, ini);
1690 mutex_lock(&smcd_dev_list.mutex);
1691 if (pclc_smcd->ism.chid)
1692 /* check for ISM device matching proposed native ISM device */
1693 smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
1694 ntohll(pclc_smcd->ism.gid), &matches);
1695 for (i = 1; i <= smc_v2_ext->hdr.ism_gid_cnt; i++) {
1696 /* check for ISM devices matching proposed non-native ISM
1699 smc_check_ism_v2_match(ini,
1700 ntohs(smcd_v2_ext->gidchid[i - 1].chid),
1701 ntohll(smcd_v2_ext->gidchid[i - 1].gid),
1704 mutex_unlock(&smcd_dev_list.mutex);
1706 if (ini->ism_dev[0]) {
1707 smc_ism_get_system_eid(ini->ism_dev[0], &eid);
1708 if (memcmp(eid, smcd_v2_ext->system_eid, SMC_MAX_EID_LEN))
1714 /* separate - outside the smcd_dev_list.lock */
1715 smcd_version = ini->smcd_version;
1716 for (i = 0; i < matches; i++) {
1717 ini->smcd_version = SMC_V2;
1718 ini->is_smcd = true;
1719 ini->ism_selected = i;
1720 rc = smc_listen_ism_init(new_smc, ini);
1722 smc_find_ism_store_rc(rc, ini);
1723 /* try next active ISM device */
1726 return; /* matching and usable V2 ISM device found */
1728 /* no V2 ISM device could be initialized */
1729 ini->smcd_version = smcd_version; /* restore original value */
1732 ini->smcd_version &= ~SMC_V2;
1733 ini->ism_dev[0] = NULL;
1734 ini->is_smcd = false;
1737 static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
1738 struct smc_clc_msg_proposal *pclc,
1739 struct smc_init_info *ini)
1741 struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
1744 /* check if ISM V1 is available */
1745 if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
1747 ini->is_smcd = true; /* prepare ISM check */
1748 ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid);
1749 rc = smc_find_ism_device(new_smc, ini);
1752 ini->ism_selected = 0;
1753 rc = smc_listen_ism_init(new_smc, ini);
1755 return; /* V1 ISM device found */
1758 smc_find_ism_store_rc(rc, ini);
1759 ini->ism_dev[0] = NULL;
1760 ini->is_smcd = false;
1763 /* listen worker: register buffers */
1764 static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
1766 struct smc_connection *conn = &new_smc->conn;
1769 if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
1770 return SMC_CLC_DECL_ERR_REGRMB;
1772 smc_rmb_sync_sg_for_device(&new_smc->conn);
1777 static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
1778 struct smc_clc_msg_proposal *pclc,
1779 struct smc_init_info *ini)
1783 if (!smcr_indicated(ini->smc_type_v1))
1784 return SMC_CLC_DECL_NOSMCDEV;
1786 /* prepare RDMA check */
1787 ini->ib_lcl = &pclc->lcl;
1788 rc = smc_find_rdma_device(new_smc, ini);
1790 /* no RDMA device found */
1791 if (ini->smc_type_v1 == SMC_TYPE_B)
1792 /* neither ISM nor RDMA device found */
1793 rc = SMC_CLC_DECL_NOSMCDEV;
1796 rc = smc_listen_rdma_init(new_smc, ini);
1799 return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
1802 /* determine the local device matching to proposal */
1803 static int smc_listen_find_device(struct smc_sock *new_smc,
1804 struct smc_clc_msg_proposal *pclc,
1805 struct smc_init_info *ini)
1809 /* check for ISM device matching V2 proposed device */
1810 smc_find_ism_v2_device_serv(new_smc, pclc, ini);
1811 if (ini->ism_dev[0])
1814 if (!(ini->smcd_version & SMC_V1))
1815 return ini->rc ?: SMC_CLC_DECL_NOSMCD2DEV;
1817 /* check for matching IP prefix and subnet length */
1818 rc = smc_listen_prfx_check(new_smc, pclc);
1820 return ini->rc ?: rc;
1822 /* get vlan id from IP device */
1823 if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
1824 return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
1826 /* check for ISM device matching V1 proposed device */
1827 smc_find_ism_v1_device_serv(new_smc, pclc, ini);
1828 if (ini->ism_dev[0])
1831 if (pclc->hdr.typev1 == SMC_TYPE_D)
1832 /* skip RDMA and decline */
1833 return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
1835 /* check if RDMA is available */
1836 rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
1837 smc_find_ism_store_rc(rc, ini);
1839 return (!rc) ? 0 : ini->rc;
1842 /* listen worker: finish RDMA setup */
1843 static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1844 struct smc_clc_msg_accept_confirm *cclc,
1847 struct smc_link *link = new_smc->conn.lnk;
1848 int reason_code = 0;
1851 smc_link_save_peer_info(link, cclc);
1853 if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
1854 return SMC_CLC_DECL_ERR_RTOK;
1857 if (smc_ib_ready_link(link))
1858 return SMC_CLC_DECL_ERR_RDYLNK;
1859 /* QP confirmation over RoCE fabric */
1860 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
1861 reason_code = smcr_serv_conf_first_link(new_smc);
1862 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
1867 /* setup for connection of server */
1868 static void smc_listen_work(struct work_struct *work)
1870 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1872 u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
1873 struct socket *newclcsock = new_smc->clcsock;
1874 struct smc_clc_msg_accept_confirm *cclc;
1875 struct smc_clc_msg_proposal_area *buf;
1876 struct smc_clc_msg_proposal *pclc;
1877 struct smc_init_info *ini = NULL;
1880 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
1881 return smc_listen_out_err(new_smc);
1883 if (new_smc->use_fallback) {
1884 smc_listen_out_connected(new_smc);
1888 /* check if peer is smc capable */
1889 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1890 rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
1892 smc_listen_out_err(new_smc);
1894 smc_listen_out_connected(new_smc);
1898 /* do inband token exchange -
1899 * wait for and receive SMC Proposal CLC message
1901 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
1903 rc = SMC_CLC_DECL_MEM;
1906 pclc = (struct smc_clc_msg_proposal *)buf;
1907 rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
1908 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
1911 version = pclc->hdr.version == SMC_V1 ? SMC_V1 : version;
1913 /* IPSec connections opt out of SMC optimizations */
1914 if (using_ipsec(new_smc)) {
1915 rc = SMC_CLC_DECL_IPSEC;
1919 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1921 rc = SMC_CLC_DECL_MEM;
1925 /* initial version checking */
1926 rc = smc_listen_v2_check(new_smc, pclc, ini);
1930 mutex_lock(&smc_server_lgr_pending);
1931 smc_close_init(new_smc);
1932 smc_rx_init(new_smc);
1933 smc_tx_init(new_smc);
1935 /* determine ISM or RoCE device used for connection */
1936 rc = smc_listen_find_device(new_smc, pclc, ini);
1940 /* send SMC Accept CLC message */
1941 rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
1942 ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1);
1946 /* SMC-D does not need this lock any more */
1948 mutex_unlock(&smc_server_lgr_pending);
1950 /* receive SMC Confirm CLC message */
1951 memset(buf, 0, sizeof(*buf));
1952 cclc = (struct smc_clc_msg_accept_confirm *)buf;
1953 rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
1954 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
1962 if (!ini->is_smcd) {
1963 rc = smc_listen_rdma_finish(new_smc, cclc,
1964 ini->first_contact_local);
1967 mutex_unlock(&smc_server_lgr_pending);
1969 smc_conn_save_peer_info(new_smc, cclc);
1970 smc_listen_out_connected(new_smc);
1971 SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
1975 mutex_unlock(&smc_server_lgr_pending);
1977 smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
1984 static void smc_tcp_listen_work(struct work_struct *work)
1986 struct smc_sock *lsmc = container_of(work, struct smc_sock,
1988 struct sock *lsk = &lsmc->sk;
1989 struct smc_sock *new_smc;
1993 while (lsk->sk_state == SMC_LISTEN) {
1994 rc = smc_clcsock_accept(lsmc, &new_smc);
1995 if (rc) /* clcsock accept queue empty or error */
2000 new_smc->listen_smc = lsmc;
2001 new_smc->use_fallback = lsmc->use_fallback;
2002 new_smc->fallback_rsn = lsmc->fallback_rsn;
2003 sock_hold(lsk); /* sock_put in smc_listen_work */
2004 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
2005 smc_copy_sock_settings_to_smc(new_smc);
2006 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
2007 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
2008 sock_hold(&new_smc->sk); /* sock_put in passive closing */
2009 if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
2010 sock_put(&new_smc->sk);
2015 sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
2018 static void smc_clcsock_data_ready(struct sock *listen_clcsock)
2020 struct smc_sock *lsmc =
2021 smc_clcsock_user_data(listen_clcsock);
2025 lsmc->clcsk_data_ready(listen_clcsock);
2026 if (lsmc->sk.sk_state == SMC_LISTEN) {
2027 sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
2028 if (!queue_work(smc_hs_wq, &lsmc->tcp_listen_work))
2029 sock_put(&lsmc->sk);
2033 static int smc_listen(struct socket *sock, int backlog)
2035 struct sock *sk = sock->sk;
2036 struct smc_sock *smc;
2043 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
2044 smc->connect_nonblock)
2048 if (sk->sk_state == SMC_LISTEN) {
2049 sk->sk_max_ack_backlog = backlog;
2052 /* some socket options are handled in core, so we could not apply
2053 * them to the clc socket -- copy smc socket options to clc socket
2055 smc_copy_sock_settings_to_clc(smc);
2056 if (!smc->use_fallback)
2057 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
2059 /* save original sk_data_ready function and establish
2060 * smc-specific sk_data_ready function
2062 smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
2063 smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
2064 smc->clcsock->sk->sk_user_data =
2065 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
2066 rc = kernel_listen(smc->clcsock, backlog);
2068 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
2071 sk->sk_max_ack_backlog = backlog;
2072 sk->sk_ack_backlog = 0;
2073 sk->sk_state = SMC_LISTEN;
2080 static int smc_accept(struct socket *sock, struct socket *new_sock,
2081 int flags, bool kern)
2083 struct sock *sk = sock->sk, *nsk;
2084 DECLARE_WAITQUEUE(wait, current);
2085 struct smc_sock *lsmc;
2090 sock_hold(sk); /* sock_put below */
2093 if (lsmc->sk.sk_state != SMC_LISTEN) {
2099 /* Wait for an incoming connection */
2100 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2101 add_wait_queue_exclusive(sk_sleep(sk), &wait);
2102 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
2103 set_current_state(TASK_INTERRUPTIBLE);
2109 timeo = schedule_timeout(timeo);
2110 /* wakeup by sk_data_ready in smc_listen_work() */
2111 sched_annotate_sleep();
2113 if (signal_pending(current)) {
2114 rc = sock_intr_errno(timeo);
2118 set_current_state(TASK_RUNNING);
2119 remove_wait_queue(sk_sleep(sk), &wait);
2122 rc = sock_error(nsk);
2127 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
2128 /* wait till data arrives on the socket */
2129 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
2131 if (smc_sk(nsk)->use_fallback) {
2132 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
2135 if (skb_queue_empty(&clcsk->sk_receive_queue))
2136 sk_wait_data(clcsk, &timeo, NULL);
2137 release_sock(clcsk);
2138 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
2140 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
2146 sock_put(sk); /* sock_hold above */
2150 static int smc_getname(struct socket *sock, struct sockaddr *addr,
2153 struct smc_sock *smc;
2155 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
2156 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
2159 smc = smc_sk(sock->sk);
2161 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
2164 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2166 struct sock *sk = sock->sk;
2167 struct smc_sock *smc;
2172 if ((sk->sk_state != SMC_ACTIVE) &&
2173 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2174 (sk->sk_state != SMC_INIT))
2177 if (msg->msg_flags & MSG_FASTOPEN) {
2178 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2179 rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2188 if (smc->use_fallback) {
2189 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2191 rc = smc_tx_sendmsg(smc, msg, len);
2192 SMC_STAT_TX_PAYLOAD(smc, len, rc);
2199 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2202 struct sock *sk = sock->sk;
2203 struct smc_sock *smc;
2208 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2209 /* socket was connected before, no more data to read */
2213 if ((sk->sk_state == SMC_INIT) ||
2214 (sk->sk_state == SMC_LISTEN) ||
2215 (sk->sk_state == SMC_CLOSED))
2218 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2223 if (smc->use_fallback) {
2224 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2226 msg->msg_namelen = 0;
2227 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2228 SMC_STAT_RX_PAYLOAD(smc, rc, rc);
2236 static __poll_t smc_accept_poll(struct sock *parent)
2238 struct smc_sock *isk = smc_sk(parent);
2241 spin_lock(&isk->accept_q_lock);
2242 if (!list_empty(&isk->accept_q))
2243 mask = EPOLLIN | EPOLLRDNORM;
2244 spin_unlock(&isk->accept_q_lock);
2249 static __poll_t smc_poll(struct file *file, struct socket *sock,
2252 struct sock *sk = sock->sk;
2253 struct smc_sock *smc;
2259 smc = smc_sk(sock->sk);
2260 if (smc->use_fallback) {
2261 /* delegate to CLC child sock */
2262 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2263 sk->sk_err = smc->clcsock->sk->sk_err;
2265 if (sk->sk_state != SMC_CLOSED)
2266 sock_poll_wait(file, sock, wait);
2269 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
2270 (sk->sk_state == SMC_CLOSED))
2272 if (sk->sk_state == SMC_LISTEN) {
2273 /* woken up by sk_data_ready in smc_listen_work() */
2274 mask |= smc_accept_poll(sk);
2275 } else if (smc->use_fallback) { /* as result of connect_work()*/
2276 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2278 sk->sk_err = smc->clcsock->sk->sk_err;
2280 if ((sk->sk_state != SMC_INIT &&
2281 atomic_read(&smc->conn.sndbuf_space)) ||
2282 sk->sk_shutdown & SEND_SHUTDOWN) {
2283 mask |= EPOLLOUT | EPOLLWRNORM;
2285 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2286 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2288 if (atomic_read(&smc->conn.bytes_to_rcv))
2289 mask |= EPOLLIN | EPOLLRDNORM;
2290 if (sk->sk_shutdown & RCV_SHUTDOWN)
2291 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2292 if (sk->sk_state == SMC_APPCLOSEWAIT1)
2294 if (smc->conn.urg_state == SMC_URG_VALID)
2302 static int smc_shutdown(struct socket *sock, int how)
2304 struct sock *sk = sock->sk;
2305 bool do_shutdown = true;
2306 struct smc_sock *smc;
2313 if ((how < SHUT_RD) || (how > SHUT_RDWR))
2319 if ((sk->sk_state != SMC_ACTIVE) &&
2320 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
2321 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
2322 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2323 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
2324 (sk->sk_state != SMC_APPFINCLOSEWAIT))
2326 if (smc->use_fallback) {
2327 rc = kernel_sock_shutdown(smc->clcsock, how);
2328 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2329 if (sk->sk_shutdown == SHUTDOWN_MASK)
2330 sk->sk_state = SMC_CLOSED;
2334 case SHUT_RDWR: /* shutdown in both directions */
2335 old_state = sk->sk_state;
2336 rc = smc_close_active(smc);
2337 if (old_state == SMC_ACTIVE &&
2338 sk->sk_state == SMC_PEERCLOSEWAIT1)
2339 do_shutdown = false;
2342 rc = smc_close_shutdown_write(smc);
2346 /* nothing more to do because peer is not involved */
2349 if (do_shutdown && smc->clcsock)
2350 rc1 = kernel_sock_shutdown(smc->clcsock, how);
2351 /* map sock_shutdown_cmd constants to sk_shutdown value range */
2352 sk->sk_shutdown |= how + 1;
2356 return rc ? rc : rc1;
2359 static int smc_setsockopt(struct socket *sock, int level, int optname,
2360 sockptr_t optval, unsigned int optlen)
2362 struct sock *sk = sock->sk;
2363 struct smc_sock *smc;
2366 if (level == SOL_TCP && optname == TCP_ULP)
2371 /* generic setsockopts reaching us here always apply to the
2374 mutex_lock(&smc->clcsock_release_lock);
2375 if (!smc->clcsock) {
2376 mutex_unlock(&smc->clcsock_release_lock);
2379 if (unlikely(!smc->clcsock->ops->setsockopt))
2382 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
2384 if (smc->clcsock->sk->sk_err) {
2385 sk->sk_err = smc->clcsock->sk->sk_err;
2386 sk_error_report(sk);
2388 mutex_unlock(&smc->clcsock_release_lock);
2390 if (optlen < sizeof(int))
2392 if (copy_from_sockptr(&val, optval, sizeof(int)))
2396 if (rc || smc->use_fallback)
2400 case TCP_FASTOPEN_CONNECT:
2401 case TCP_FASTOPEN_KEY:
2402 case TCP_FASTOPEN_NO_COOKIE:
2403 /* option not supported by SMC */
2404 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2405 rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2411 if (sk->sk_state != SMC_INIT &&
2412 sk->sk_state != SMC_LISTEN &&
2413 sk->sk_state != SMC_CLOSED) {
2415 SMC_STAT_INC(smc, ndly_cnt);
2416 mod_delayed_work(smc->conn.lgr->tx_wq,
2417 &smc->conn.tx_work, 0);
2422 if (sk->sk_state != SMC_INIT &&
2423 sk->sk_state != SMC_LISTEN &&
2424 sk->sk_state != SMC_CLOSED) {
2426 SMC_STAT_INC(smc, cork_cnt);
2427 mod_delayed_work(smc->conn.lgr->tx_wq,
2428 &smc->conn.tx_work, 0);
2432 case TCP_DEFER_ACCEPT:
2433 smc->sockopt_defer_accept = val;
2444 static int smc_getsockopt(struct socket *sock, int level, int optname,
2445 char __user *optval, int __user *optlen)
2447 struct smc_sock *smc;
2450 smc = smc_sk(sock->sk);
2451 mutex_lock(&smc->clcsock_release_lock);
2452 if (!smc->clcsock) {
2453 mutex_unlock(&smc->clcsock_release_lock);
2456 /* socket options apply to the CLC socket */
2457 if (unlikely(!smc->clcsock->ops->getsockopt)) {
2458 mutex_unlock(&smc->clcsock_release_lock);
2461 rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
2463 mutex_unlock(&smc->clcsock_release_lock);
2467 static int smc_ioctl(struct socket *sock, unsigned int cmd,
2470 union smc_host_cursor cons, urg;
2471 struct smc_connection *conn;
2472 struct smc_sock *smc;
2475 smc = smc_sk(sock->sk);
2477 lock_sock(&smc->sk);
2478 if (smc->use_fallback) {
2479 if (!smc->clcsock) {
2480 release_sock(&smc->sk);
2483 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
2484 release_sock(&smc->sk);
2488 case SIOCINQ: /* same as FIONREAD */
2489 if (smc->sk.sk_state == SMC_LISTEN) {
2490 release_sock(&smc->sk);
2493 if (smc->sk.sk_state == SMC_INIT ||
2494 smc->sk.sk_state == SMC_CLOSED)
2497 answ = atomic_read(&smc->conn.bytes_to_rcv);
2500 /* output queue size (not send + not acked) */
2501 if (smc->sk.sk_state == SMC_LISTEN) {
2502 release_sock(&smc->sk);
2505 if (smc->sk.sk_state == SMC_INIT ||
2506 smc->sk.sk_state == SMC_CLOSED)
2509 answ = smc->conn.sndbuf_desc->len -
2510 atomic_read(&smc->conn.sndbuf_space);
2513 /* output queue size (not send only) */
2514 if (smc->sk.sk_state == SMC_LISTEN) {
2515 release_sock(&smc->sk);
2518 if (smc->sk.sk_state == SMC_INIT ||
2519 smc->sk.sk_state == SMC_CLOSED)
2522 answ = smc_tx_prepared_sends(&smc->conn);
2525 if (smc->sk.sk_state == SMC_LISTEN) {
2526 release_sock(&smc->sk);
2529 if (smc->sk.sk_state == SMC_INIT ||
2530 smc->sk.sk_state == SMC_CLOSED) {
2533 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
2534 smc_curs_copy(&urg, &conn->urg_curs, conn);
2535 answ = smc_curs_diff(conn->rmb_desc->len,
2540 release_sock(&smc->sk);
2541 return -ENOIOCTLCMD;
2543 release_sock(&smc->sk);
2545 return put_user(answ, (int __user *)arg);
2548 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
2549 int offset, size_t size, int flags)
2551 struct sock *sk = sock->sk;
2552 struct smc_sock *smc;
2557 if (sk->sk_state != SMC_ACTIVE) {
2562 if (smc->use_fallback) {
2563 rc = kernel_sendpage(smc->clcsock, page, offset,
2566 SMC_STAT_INC(smc, sendpage_cnt);
2567 rc = sock_no_sendpage(sock, page, offset, size, flags);
2574 /* Map the affected portions of the rmbe into an spd, note the number of bytes
2575 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
2576 * updates till whenever a respective page has been fully processed.
2577 * Note that subsequent recv() calls have to wait till all splice() processing
2580 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
2581 struct pipe_inode_info *pipe, size_t len,
2584 struct sock *sk = sock->sk;
2585 struct smc_sock *smc;
2590 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2591 /* socket was connected before, no more data to read */
2595 if (sk->sk_state == SMC_INIT ||
2596 sk->sk_state == SMC_LISTEN ||
2597 sk->sk_state == SMC_CLOSED)
2600 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2605 if (smc->use_fallback) {
2606 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
2613 if (flags & SPLICE_F_NONBLOCK)
2614 flags = MSG_DONTWAIT;
2617 SMC_STAT_INC(smc, splice_cnt);
2618 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
2626 /* must look like tcp */
2627 static const struct proto_ops smc_sock_ops = {
2629 .owner = THIS_MODULE,
2630 .release = smc_release,
2632 .connect = smc_connect,
2633 .socketpair = sock_no_socketpair,
2634 .accept = smc_accept,
2635 .getname = smc_getname,
2638 .listen = smc_listen,
2639 .shutdown = smc_shutdown,
2640 .setsockopt = smc_setsockopt,
2641 .getsockopt = smc_getsockopt,
2642 .sendmsg = smc_sendmsg,
2643 .recvmsg = smc_recvmsg,
2644 .mmap = sock_no_mmap,
2645 .sendpage = smc_sendpage,
2646 .splice_read = smc_splice_read,
2649 static int smc_create(struct net *net, struct socket *sock, int protocol,
2652 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
2653 struct smc_sock *smc;
2657 rc = -ESOCKTNOSUPPORT;
2658 if (sock->type != SOCK_STREAM)
2661 rc = -EPROTONOSUPPORT;
2662 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
2666 sock->ops = &smc_sock_ops;
2667 sk = smc_sock_alloc(net, sock, protocol);
2671 /* create internal TCP socket for CLC handshake and fallback */
2673 smc->use_fallback = false; /* assume rdma capability first */
2674 smc->fallback_rsn = 0;
2675 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
2678 sk_common_release(sk);
2681 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
2682 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
2688 static const struct net_proto_family smc_sock_family_ops = {
2690 .owner = THIS_MODULE,
2691 .create = smc_create,
2694 unsigned int smc_net_id;
2696 static __net_init int smc_net_init(struct net *net)
2698 return smc_pnet_net_init(net);
2701 static void __net_exit smc_net_exit(struct net *net)
2703 smc_pnet_net_exit(net);
2706 static __net_init int smc_net_stat_init(struct net *net)
2708 return smc_stats_init(net);
2711 static void __net_exit smc_net_stat_exit(struct net *net)
2713 smc_stats_exit(net);
2716 static struct pernet_operations smc_net_ops = {
2717 .init = smc_net_init,
2718 .exit = smc_net_exit,
2720 .size = sizeof(struct smc_net),
2723 static struct pernet_operations smc_net_stat_ops = {
2724 .init = smc_net_stat_init,
2725 .exit = smc_net_stat_exit,
2728 static int __init smc_init(void)
2732 rc = register_pernet_subsys(&smc_net_ops);
2736 rc = register_pernet_subsys(&smc_net_stat_ops);
2745 goto out_pernet_subsys;
2747 rc = smc_pnet_init();
2752 smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
2756 smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
2758 goto out_alloc_hs_wq;
2760 rc = smc_core_init();
2762 pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
2766 rc = smc_llc_init();
2768 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
2772 rc = smc_cdc_init();
2774 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
2778 rc = proto_register(&smc_proto, 1);
2780 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
2784 rc = proto_register(&smc_proto6, 1);
2786 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
2790 rc = sock_register(&smc_sock_family_ops);
2792 pr_err("%s: sock_register fails with %d\n", __func__, rc);
2795 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
2796 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
2798 rc = smc_ib_register_client();
2800 pr_err("%s: ib_register fails with %d\n", __func__, rc);
2804 static_branch_enable(&tcp_have_smc);
2808 sock_unregister(PF_SMC);
2810 proto_unregister(&smc_proto6);
2812 proto_unregister(&smc_proto);
2816 destroy_workqueue(smc_close_wq);
2818 destroy_workqueue(smc_hs_wq);
2824 unregister_pernet_subsys(&smc_net_ops);
2829 static void __exit smc_exit(void)
2831 static_branch_disable(&tcp_have_smc);
2832 sock_unregister(PF_SMC);
2834 smc_ib_unregister_client();
2835 destroy_workqueue(smc_close_wq);
2836 destroy_workqueue(smc_hs_wq);
2837 proto_unregister(&smc_proto6);
2838 proto_unregister(&smc_proto);
2841 unregister_pernet_subsys(&smc_net_stat_ops);
2842 unregister_pernet_subsys(&smc_net_ops);
2846 module_init(smc_init);
2847 module_exit(smc_exit);
2849 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2850 MODULE_DESCRIPTION("smc socket address family");
2851 MODULE_LICENSE("GPL");
2852 MODULE_ALIAS_NETPROTO(PF_SMC);