1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
28 #include <linux/rcupdate_wait.h>
29 #include <linux/ctype.h>
34 #include <asm/ioctls.h>
36 #include <net/net_namespace.h>
37 #include <net/netns/generic.h>
38 #include "smc_netns.h"
48 #include "smc_netlink.h"
51 #include "smc_close.h"
52 #include "smc_stats.h"
54 static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
57 static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
61 struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
62 struct workqueue_struct *smc_close_wq; /* wq for close work */
64 static void smc_tcp_listen_work(struct work_struct *);
65 static void smc_connect_work(struct work_struct *);
67 static void smc_set_keepalive(struct sock *sk, int val)
69 struct smc_sock *smc = smc_sk(sk);
71 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
74 static struct smc_hashinfo smc_v4_hashinfo = {
75 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
78 static struct smc_hashinfo smc_v6_hashinfo = {
79 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
82 int smc_hash_sk(struct sock *sk)
84 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
85 struct hlist_head *head;
89 write_lock_bh(&h->lock);
90 sk_add_node(sk, head);
91 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
92 write_unlock_bh(&h->lock);
96 EXPORT_SYMBOL_GPL(smc_hash_sk);
98 void smc_unhash_sk(struct sock *sk)
100 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
102 write_lock_bh(&h->lock);
103 if (sk_del_node_init(sk))
104 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
105 write_unlock_bh(&h->lock);
107 EXPORT_SYMBOL_GPL(smc_unhash_sk);
109 struct proto smc_proto = {
111 .owner = THIS_MODULE,
112 .keepalive = smc_set_keepalive,
114 .unhash = smc_unhash_sk,
115 .obj_size = sizeof(struct smc_sock),
116 .h.smc_hash = &smc_v4_hashinfo,
117 .slab_flags = SLAB_TYPESAFE_BY_RCU,
119 EXPORT_SYMBOL_GPL(smc_proto);
121 struct proto smc_proto6 = {
123 .owner = THIS_MODULE,
124 .keepalive = smc_set_keepalive,
126 .unhash = smc_unhash_sk,
127 .obj_size = sizeof(struct smc_sock),
128 .h.smc_hash = &smc_v6_hashinfo,
129 .slab_flags = SLAB_TYPESAFE_BY_RCU,
131 EXPORT_SYMBOL_GPL(smc_proto6);
133 static void smc_restore_fallback_changes(struct smc_sock *smc)
135 if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
136 smc->clcsock->file->private_data = smc->sk.sk_socket;
137 smc->clcsock->file = NULL;
141 static int __smc_release(struct smc_sock *smc)
143 struct sock *sk = &smc->sk;
146 if (!smc->use_fallback) {
147 rc = smc_close_active(smc);
148 sock_set_flag(sk, SOCK_DEAD);
149 sk->sk_shutdown |= SHUTDOWN_MASK;
151 if (sk->sk_state != SMC_CLOSED) {
152 if (sk->sk_state != SMC_LISTEN &&
153 sk->sk_state != SMC_INIT)
154 sock_put(sk); /* passive closing */
155 if (sk->sk_state == SMC_LISTEN) {
156 /* wake up clcsock accept */
157 rc = kernel_sock_shutdown(smc->clcsock,
160 sk->sk_state = SMC_CLOSED;
161 sk->sk_state_change(sk);
163 smc_restore_fallback_changes(smc);
166 sk->sk_prot->unhash(sk);
168 if (sk->sk_state == SMC_CLOSED) {
171 smc_clcsock_release(smc);
174 if (!smc->use_fallback)
175 smc_conn_free(&smc->conn);
181 static int smc_release(struct socket *sock)
183 struct sock *sk = sock->sk;
184 struct smc_sock *smc;
190 sock_hold(sk); /* sock_put below */
193 /* cleanup for a dangling non-blocking connect */
194 if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
195 tcp_abort(smc->clcsock->sk, ECONNABORTED);
196 flush_work(&smc->connect_work);
198 if (sk->sk_state == SMC_LISTEN)
199 /* smc_close_non_accepted() is called and acquires
200 * sock lock for child sockets again
202 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
206 rc = __smc_release(smc);
213 sock_put(sk); /* sock_hold above */
214 sock_put(sk); /* final sock_put */
219 static void smc_destruct(struct sock *sk)
221 if (sk->sk_state != SMC_CLOSED)
223 if (!sock_flag(sk, SOCK_DEAD))
226 sk_refcnt_debug_dec(sk);
229 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
232 struct smc_sock *smc;
236 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
237 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
241 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
242 sk->sk_state = SMC_INIT;
243 sk->sk_destruct = smc_destruct;
244 sk->sk_protocol = protocol;
246 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
247 INIT_WORK(&smc->connect_work, smc_connect_work);
248 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
249 INIT_LIST_HEAD(&smc->accept_q);
250 spin_lock_init(&smc->accept_q_lock);
251 spin_lock_init(&smc->conn.send_lock);
252 sk->sk_prot->hash(sk);
253 sk_refcnt_debug_inc(sk);
254 mutex_init(&smc->clcsock_release_lock);
259 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
262 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
263 struct sock *sk = sock->sk;
264 struct smc_sock *smc;
269 /* replicate tests from inet_bind(), to be safe wrt. future changes */
271 if (addr_len < sizeof(struct sockaddr_in))
275 if (addr->sin_family != AF_INET &&
276 addr->sin_family != AF_INET6 &&
277 addr->sin_family != AF_UNSPEC)
279 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
280 if (addr->sin_family == AF_UNSPEC &&
281 addr->sin_addr.s_addr != htonl(INADDR_ANY))
286 /* Check if socket is already active */
288 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
291 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
292 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
300 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
303 /* options we don't get control via setsockopt for */
304 nsk->sk_type = osk->sk_type;
305 nsk->sk_sndbuf = osk->sk_sndbuf;
306 nsk->sk_rcvbuf = osk->sk_rcvbuf;
307 nsk->sk_sndtimeo = osk->sk_sndtimeo;
308 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
309 nsk->sk_mark = osk->sk_mark;
310 nsk->sk_priority = osk->sk_priority;
311 nsk->sk_rcvlowat = osk->sk_rcvlowat;
312 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
313 nsk->sk_err = osk->sk_err;
315 nsk->sk_flags &= ~mask;
316 nsk->sk_flags |= osk->sk_flags & mask;
319 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
320 (1UL << SOCK_KEEPOPEN) | \
321 (1UL << SOCK_LINGER) | \
322 (1UL << SOCK_BROADCAST) | \
323 (1UL << SOCK_TIMESTAMP) | \
324 (1UL << SOCK_DBG) | \
325 (1UL << SOCK_RCVTSTAMP) | \
326 (1UL << SOCK_RCVTSTAMPNS) | \
327 (1UL << SOCK_LOCALROUTE) | \
328 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
329 (1UL << SOCK_RXQ_OVFL) | \
330 (1UL << SOCK_WIFI_STATUS) | \
331 (1UL << SOCK_NOFCS) | \
332 (1UL << SOCK_FILTER_LOCKED) | \
333 (1UL << SOCK_TSTAMP_NEW))
334 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
335 * clc socket (since smc is not called for these options from net/core)
337 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
339 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
342 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
343 (1UL << SOCK_KEEPOPEN) | \
344 (1UL << SOCK_LINGER) | \
346 /* copy only settings and flags relevant for smc from clc to smc socket */
347 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
349 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
352 /* register the new rmb on all links */
353 static int smcr_lgr_reg_rmbs(struct smc_link *link,
354 struct smc_buf_desc *rmb_desc)
356 struct smc_link_group *lgr = link->lgr;
359 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
362 /* protect against parallel smc_llc_cli_rkey_exchange() and
363 * parallel smcr_link_reg_rmb()
365 mutex_lock(&lgr->llc_conf_mutex);
366 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
367 if (!smc_link_active(&lgr->lnk[i]))
369 rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc);
374 /* exchange confirm_rkey msg with peer */
375 rc = smc_llc_do_confirm_rkey(link, rmb_desc);
380 rmb_desc->is_conf_rkey = true;
382 mutex_unlock(&lgr->llc_conf_mutex);
383 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
387 static int smcr_clnt_conf_first_link(struct smc_sock *smc)
389 struct smc_link *link = smc->conn.lnk;
390 struct smc_llc_qentry *qentry;
393 /* receive CONFIRM LINK request from server over RoCE fabric */
394 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
395 SMC_LLC_CONFIRM_LINK);
397 struct smc_clc_msg_decline dclc;
399 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
400 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
401 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
403 smc_llc_save_peer_uid(qentry);
404 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
405 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
407 return SMC_CLC_DECL_RMBE_EC;
409 rc = smc_ib_modify_qp_rts(link);
411 return SMC_CLC_DECL_ERR_RDYLNK;
413 smc_wr_remember_qp_attr(link);
415 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
416 return SMC_CLC_DECL_ERR_REGRMB;
418 /* confirm_rkey is implicit on 1st contact */
419 smc->conn.rmb_desc->is_conf_rkey = true;
421 /* send CONFIRM LINK response over RoCE fabric */
422 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
424 return SMC_CLC_DECL_TIMEOUT_CL;
426 smc_llc_link_active(link);
427 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
429 /* optional 2nd link, receive ADD LINK request from server */
430 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
433 struct smc_clc_msg_decline dclc;
435 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
436 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
438 rc = 0; /* no DECLINE received, go with one link */
441 smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
442 smc_llc_cli_add_link(link, qentry);
446 static void smcr_conn_save_peer_info(struct smc_sock *smc,
447 struct smc_clc_msg_accept_confirm *clc)
449 int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
451 smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
452 smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
453 smc->conn.peer_rmbe_size = bufsize;
454 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
455 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
458 static bool smc_isascii(char *hostname)
462 for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
463 if (!isascii(hostname[i]))
468 static void smcd_conn_save_peer_info(struct smc_sock *smc,
469 struct smc_clc_msg_accept_confirm *clc)
471 int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
473 smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
474 smc->conn.peer_token = clc->d0.token;
475 /* msg header takes up space in the buffer */
476 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
477 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
478 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
479 if (clc->hdr.version > SMC_V1 &&
480 (clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) {
481 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
482 (struct smc_clc_msg_accept_confirm_v2 *)clc;
483 struct smc_clc_first_contact_ext *fce =
484 (struct smc_clc_first_contact_ext *)
485 (((u8 *)clc_v2) + sizeof(*clc_v2));
487 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid,
489 smc->conn.lgr->peer_os = fce->os_type;
490 smc->conn.lgr->peer_smc_release = fce->release;
491 if (smc_isascii(fce->hostname))
492 memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
493 SMC_MAX_HOSTNAME_LEN);
497 static void smc_conn_save_peer_info(struct smc_sock *smc,
498 struct smc_clc_msg_accept_confirm *clc)
500 if (smc->conn.lgr->is_smcd)
501 smcd_conn_save_peer_info(smc, clc);
503 smcr_conn_save_peer_info(smc, clc);
506 static void smc_link_save_peer_info(struct smc_link *link,
507 struct smc_clc_msg_accept_confirm *clc)
509 link->peer_qpn = ntoh24(clc->r0.qpn);
510 memcpy(link->peer_gid, clc->r0.lcl.gid, SMC_GID_SIZE);
511 memcpy(link->peer_mac, clc->r0.lcl.mac, sizeof(link->peer_mac));
512 link->peer_psn = ntoh24(clc->r0.psn);
513 link->peer_mtu = clc->r0.qp_mtu;
516 static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
517 struct smc_stats_fback *fback_arr)
521 for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
522 if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
523 fback_arr[cnt].count++;
526 if (!fback_arr[cnt].fback_code) {
527 fback_arr[cnt].fback_code = smc->fallback_rsn;
528 fback_arr[cnt].count++;
534 static void smc_stat_fallback(struct smc_sock *smc)
536 struct net *net = sock_net(&smc->sk);
538 mutex_lock(&net->smc.mutex_fback_rsn);
539 if (smc->listen_smc) {
540 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
541 net->smc.fback_rsn->srv_fback_cnt++;
543 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
544 net->smc.fback_rsn->clnt_fback_cnt++;
546 mutex_unlock(&net->smc.mutex_fback_rsn);
549 static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
551 smc->use_fallback = true;
552 smc->fallback_rsn = reason_code;
553 smc_stat_fallback(smc);
554 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
555 smc->clcsock->file = smc->sk.sk_socket->file;
556 smc->clcsock->file->private_data = smc->clcsock;
557 smc->clcsock->wq.fasync_list =
558 smc->sk.sk_socket->wq.fasync_list;
562 /* fall back during connect */
563 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
565 smc_switch_to_fallback(smc, reason_code);
566 smc_copy_sock_settings_to_clc(smc);
567 smc->connect_nonblock = 0;
568 if (smc->sk.sk_state == SMC_INIT)
569 smc->sk.sk_state = SMC_ACTIVE;
573 /* decline and fall back during connect */
574 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
577 struct net *net = sock_net(&smc->sk);
580 if (reason_code < 0) { /* error, fallback is not possible */
581 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
582 if (smc->sk.sk_state == SMC_INIT)
583 sock_put(&smc->sk); /* passive closing */
586 if (reason_code != SMC_CLC_DECL_PEERDECL) {
587 rc = smc_clc_send_decline(smc, reason_code, version);
589 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
590 if (smc->sk.sk_state == SMC_INIT)
591 sock_put(&smc->sk); /* passive closing */
595 return smc_connect_fallback(smc, reason_code);
598 static void smc_conn_abort(struct smc_sock *smc, int local_first)
601 smc_lgr_cleanup_early(&smc->conn);
603 smc_conn_free(&smc->conn);
606 /* check if there is a rdma device available for this connection. */
607 /* called for connect and listen */
608 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
610 /* PNET table look up: search active ib_device and port
611 * within same PNETID that also contains the ethernet device
612 * used for the internal TCP socket
614 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
616 return SMC_CLC_DECL_NOSMCRDEV;
620 /* check if there is an ISM device available for this connection. */
621 /* called for connect and listen */
622 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
624 /* Find ISM device with same PNETID as connecting interface */
625 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
626 if (!ini->ism_dev[0])
627 return SMC_CLC_DECL_NOSMCDDEV;
629 ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
633 /* is chid unique for the ism devices that are already determined? */
634 static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
637 int i = (!ini->ism_dev[0]) ? 1 : 0;
640 if (ini->ism_chid[i] == chid)
645 /* determine possible V2 ISM devices (either without PNETID or with PNETID plus
646 * PNETID matching net_device)
648 static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
649 struct smc_init_info *ini)
651 int rc = SMC_CLC_DECL_NOSMCDDEV;
652 struct smcd_dev *smcd;
656 if (smcd_indicated(ini->smc_type_v1))
657 rc = 0; /* already initialized for V1 */
658 mutex_lock(&smcd_dev_list.mutex);
659 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
660 if (smcd->going_away || smcd == ini->ism_dev[0])
662 chid = smc_ism_get_chid(smcd);
663 if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
665 if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
666 smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
667 ini->ism_dev[i] = smcd;
668 ini->ism_chid[i] = chid;
672 if (i > SMC_MAX_ISM_DEVS)
676 mutex_unlock(&smcd_dev_list.mutex);
677 ini->ism_offered_cnt = i - 1;
678 if (!ini->ism_dev[0] && !ini->ism_dev[1])
679 ini->smcd_version = 0;
684 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
685 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
686 struct smc_init_info *ini)
688 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
689 return SMC_CLC_DECL_ISMVLANERR;
693 static int smc_find_proposal_devices(struct smc_sock *smc,
694 struct smc_init_info *ini)
698 /* check if there is an ism device available */
699 if (ini->smcd_version & SMC_V1) {
700 if (smc_find_ism_device(smc, ini) ||
701 smc_connect_ism_vlan_setup(smc, ini)) {
702 if (ini->smc_type_v1 == SMC_TYPE_B)
703 ini->smc_type_v1 = SMC_TYPE_R;
705 ini->smc_type_v1 = SMC_TYPE_N;
706 } /* else ISM V1 is supported for this connection */
707 if (smc_find_rdma_device(smc, ini)) {
708 if (ini->smc_type_v1 == SMC_TYPE_B)
709 ini->smc_type_v1 = SMC_TYPE_D;
711 ini->smc_type_v1 = SMC_TYPE_N;
712 } /* else RDMA is supported for this connection */
714 if (smc_ism_is_v2_capable() && smc_find_ism_v2_device_clnt(smc, ini))
715 ini->smc_type_v2 = SMC_TYPE_N;
717 /* if neither ISM nor RDMA are supported, fallback */
718 if (!smcr_indicated(ini->smc_type_v1) &&
719 ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
720 rc = SMC_CLC_DECL_NOSMCDEV;
725 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
726 * used, the VLAN ID will be registered again during the connection setup.
728 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
729 struct smc_init_info *ini)
731 if (!smcd_indicated(ini->smc_type_v1))
733 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
734 return SMC_CLC_DECL_CNFERR;
738 #define SMC_CLC_MAX_ACCEPT_LEN \
739 (sizeof(struct smc_clc_msg_accept_confirm_v2) + \
740 sizeof(struct smc_clc_first_contact_ext) + \
741 sizeof(struct smc_clc_msg_trail))
743 /* CLC handshake during connect */
744 static int smc_connect_clc(struct smc_sock *smc,
745 struct smc_clc_msg_accept_confirm_v2 *aclc2,
746 struct smc_init_info *ini)
750 /* do inband token exchange */
751 rc = smc_clc_send_proposal(smc, ini);
754 /* receive SMC Accept CLC message */
755 return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN,
756 SMC_CLC_ACCEPT, CLC_WAIT_TIME);
759 /* setup for RDMA connection of client */
760 static int smc_connect_rdma(struct smc_sock *smc,
761 struct smc_clc_msg_accept_confirm *aclc,
762 struct smc_init_info *ini)
764 int i, reason_code = 0;
765 struct smc_link *link;
767 ini->is_smcd = false;
768 ini->ib_lcl = &aclc->r0.lcl;
769 ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
770 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
772 mutex_lock(&smc_client_lgr_pending);
773 reason_code = smc_conn_create(smc, ini);
775 mutex_unlock(&smc_client_lgr_pending);
779 smc_conn_save_peer_info(smc, aclc);
781 if (ini->first_contact_local) {
782 link = smc->conn.lnk;
784 /* set link that was assigned by server */
786 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
787 struct smc_link *l = &smc->conn.lgr->lnk[i];
789 if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
790 !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
792 !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
793 sizeof(l->peer_mac))) {
799 reason_code = SMC_CLC_DECL_NOSRVLINK;
802 smc_switch_link_and_count(&smc->conn, link);
805 /* create send buffer and rmb */
806 if (smc_buf_create(smc, false)) {
807 reason_code = SMC_CLC_DECL_MEM;
811 if (ini->first_contact_local)
812 smc_link_save_peer_info(link, aclc);
814 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
815 reason_code = SMC_CLC_DECL_ERR_RTOK;
822 if (ini->first_contact_local) {
823 if (smc_ib_ready_link(link)) {
824 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
828 if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
829 reason_code = SMC_CLC_DECL_ERR_REGRMB;
833 smc_rmb_sync_sg_for_device(&smc->conn);
835 reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
842 if (ini->first_contact_local) {
843 /* QP confirmation over RoCE fabric */
844 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
845 reason_code = smcr_clnt_conf_first_link(smc);
846 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
850 mutex_unlock(&smc_client_lgr_pending);
852 smc_copy_sock_settings_to_clc(smc);
853 smc->connect_nonblock = 0;
854 if (smc->sk.sk_state == SMC_INIT)
855 smc->sk.sk_state = SMC_ACTIVE;
859 smc_conn_abort(smc, ini->first_contact_local);
860 mutex_unlock(&smc_client_lgr_pending);
861 smc->connect_nonblock = 0;
866 /* The server has chosen one of the proposed ISM devices for the communication.
867 * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
870 smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc,
871 struct smc_init_info *ini)
875 for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
876 if (ini->ism_chid[i] == ntohs(aclc->chid)) {
877 ini->ism_selected = i;
885 /* setup for ISM connection of client */
886 static int smc_connect_ism(struct smc_sock *smc,
887 struct smc_clc_msg_accept_confirm *aclc,
888 struct smc_init_info *ini)
893 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
895 if (aclc->hdr.version == SMC_V2) {
896 struct smc_clc_msg_accept_confirm_v2 *aclc_v2 =
897 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
899 rc = smc_v2_determine_accepted_chid(aclc_v2, ini);
903 ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
905 /* there is only one lgr role for SMC-D; use server lock */
906 mutex_lock(&smc_server_lgr_pending);
907 rc = smc_conn_create(smc, ini);
909 mutex_unlock(&smc_server_lgr_pending);
913 /* Create send and receive buffers */
914 rc = smc_buf_create(smc, true);
916 rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
920 smc_conn_save_peer_info(smc, aclc);
925 rc = smc_clc_send_confirm(smc, ini->first_contact_local,
929 mutex_unlock(&smc_server_lgr_pending);
931 smc_copy_sock_settings_to_clc(smc);
932 smc->connect_nonblock = 0;
933 if (smc->sk.sk_state == SMC_INIT)
934 smc->sk.sk_state = SMC_ACTIVE;
938 smc_conn_abort(smc, ini->first_contact_local);
939 mutex_unlock(&smc_server_lgr_pending);
940 smc->connect_nonblock = 0;
945 /* check if received accept type and version matches a proposed one */
946 static int smc_connect_check_aclc(struct smc_init_info *ini,
947 struct smc_clc_msg_accept_confirm *aclc)
949 if ((aclc->hdr.typev1 == SMC_TYPE_R &&
950 !smcr_indicated(ini->smc_type_v1)) ||
951 (aclc->hdr.typev1 == SMC_TYPE_D &&
952 ((!smcd_indicated(ini->smc_type_v1) &&
953 !smcd_indicated(ini->smc_type_v2)) ||
954 (aclc->hdr.version == SMC_V1 &&
955 !smcd_indicated(ini->smc_type_v1)) ||
956 (aclc->hdr.version == SMC_V2 &&
957 !smcd_indicated(ini->smc_type_v2)))))
958 return SMC_CLC_DECL_MODEUNSUPP;
963 /* perform steps before actually connecting */
964 static int __smc_connect(struct smc_sock *smc)
966 u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
967 struct smc_clc_msg_accept_confirm_v2 *aclc2;
968 struct smc_clc_msg_accept_confirm *aclc;
969 struct smc_init_info *ini = NULL;
973 if (smc->use_fallback)
974 return smc_connect_fallback(smc, smc->fallback_rsn);
976 /* if peer has not signalled SMC-capability, fall back */
977 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
978 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
980 /* IPSec connections opt out of SMC optimizations */
981 if (using_ipsec(smc))
982 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
985 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
987 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
990 ini->smcd_version = SMC_V1;
991 ini->smcd_version |= smc_ism_is_v2_capable() ? SMC_V2 : 0;
992 ini->smc_type_v1 = SMC_TYPE_B;
993 ini->smc_type_v2 = smc_ism_is_v2_capable() ? SMC_TYPE_D : SMC_TYPE_N;
995 /* get vlan id from IP device */
996 if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
997 ini->smcd_version &= ~SMC_V1;
998 ini->smc_type_v1 = SMC_TYPE_N;
999 if (!ini->smcd_version) {
1000 rc = SMC_CLC_DECL_GETVLANERR;
1005 rc = smc_find_proposal_devices(smc, ini);
1009 buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
1011 rc = SMC_CLC_DECL_MEM;
1014 aclc2 = (struct smc_clc_msg_accept_confirm_v2 *)buf;
1015 aclc = (struct smc_clc_msg_accept_confirm *)aclc2;
1017 /* perform CLC handshake */
1018 rc = smc_connect_clc(smc, aclc2, ini);
1022 /* check if smc modes and versions of CLC proposal and accept match */
1023 rc = smc_connect_check_aclc(ini, aclc);
1024 version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
1025 ini->smcd_version = version;
1029 /* depending on previous steps, connect using rdma or ism */
1030 if (aclc->hdr.typev1 == SMC_TYPE_R)
1031 rc = smc_connect_rdma(smc, aclc, ini);
1032 else if (aclc->hdr.typev1 == SMC_TYPE_D)
1033 rc = smc_connect_ism(smc, aclc, ini);
1037 SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
1038 smc_connect_ism_vlan_cleanup(smc, ini);
1044 smc_connect_ism_vlan_cleanup(smc, ini);
1048 return smc_connect_decline_fallback(smc, rc, version);
1051 static void smc_connect_work(struct work_struct *work)
1053 struct smc_sock *smc = container_of(work, struct smc_sock,
1055 long timeo = smc->sk.sk_sndtimeo;
1059 timeo = MAX_SCHEDULE_TIMEOUT;
1060 lock_sock(smc->clcsock->sk);
1061 if (smc->clcsock->sk->sk_err) {
1062 smc->sk.sk_err = smc->clcsock->sk->sk_err;
1063 } else if ((1 << smc->clcsock->sk->sk_state) &
1064 (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1065 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1066 if ((rc == -EPIPE) &&
1067 ((1 << smc->clcsock->sk->sk_state) &
1068 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
1071 release_sock(smc->clcsock->sk);
1072 lock_sock(&smc->sk);
1073 if (rc != 0 || smc->sk.sk_err) {
1074 smc->sk.sk_state = SMC_CLOSED;
1075 if (rc == -EPIPE || rc == -EAGAIN)
1076 smc->sk.sk_err = EPIPE;
1077 else if (signal_pending(current))
1078 smc->sk.sk_err = -sock_intr_errno(timeo);
1079 sock_put(&smc->sk); /* passive closing */
1083 rc = __smc_connect(smc);
1085 smc->sk.sk_err = -rc;
1088 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1089 if (smc->sk.sk_err) {
1090 smc->sk.sk_state_change(&smc->sk);
1091 } else { /* allow polling before and after fallback decision */
1092 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1093 smc->sk.sk_write_space(&smc->sk);
1096 release_sock(&smc->sk);
1099 static int smc_connect(struct socket *sock, struct sockaddr *addr,
1100 int alen, int flags)
1102 struct sock *sk = sock->sk;
1103 struct smc_sock *smc;
1108 /* separate smc parameter checking to be safe */
1109 if (alen < sizeof(addr->sa_family))
1111 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
1115 switch (sk->sk_state) {
1125 smc_copy_sock_settings_to_clc(smc);
1126 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1127 if (smc->connect_nonblock) {
1131 rc = kernel_connect(smc->clcsock, addr, alen, flags);
1132 if (rc && rc != -EINPROGRESS)
1135 sock_hold(&smc->sk); /* sock put in passive closing */
1136 if (smc->use_fallback)
1138 if (flags & O_NONBLOCK) {
1139 if (queue_work(smc_hs_wq, &smc->connect_work))
1140 smc->connect_nonblock = 1;
1143 rc = __smc_connect(smc);
1147 rc = 0; /* success cases including fallback */
1156 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
1158 struct socket *new_clcsock = NULL;
1159 struct sock *lsk = &lsmc->sk;
1160 struct sock *new_sk;
1164 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
1167 lsk->sk_err = ENOMEM;
1172 *new_smc = smc_sk(new_sk);
1174 mutex_lock(&lsmc->clcsock_release_lock);
1176 rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
1177 mutex_unlock(&lsmc->clcsock_release_lock);
1179 if (rc < 0 && rc != -EAGAIN)
1181 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
1182 new_sk->sk_prot->unhash(new_sk);
1184 sock_release(new_clcsock);
1185 new_sk->sk_state = SMC_CLOSED;
1186 sock_set_flag(new_sk, SOCK_DEAD);
1187 sock_put(new_sk); /* final */
1192 /* new clcsock has inherited the smc listen-specific sk_data_ready
1193 * function; switch it back to the original sk_data_ready function
1195 new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
1196 (*new_smc)->clcsock = new_clcsock;
1201 /* add a just created sock to the accept queue of the listen sock as
1202 * candidate for a following socket accept call from user space
1204 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
1206 struct smc_sock *par = smc_sk(parent);
1208 sock_hold(sk); /* sock_put in smc_accept_unlink () */
1209 spin_lock(&par->accept_q_lock);
1210 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
1211 spin_unlock(&par->accept_q_lock);
1212 sk_acceptq_added(parent);
1215 /* remove a socket from the accept queue of its parental listening socket */
1216 static void smc_accept_unlink(struct sock *sk)
1218 struct smc_sock *par = smc_sk(sk)->listen_smc;
1220 spin_lock(&par->accept_q_lock);
1221 list_del_init(&smc_sk(sk)->accept_q);
1222 spin_unlock(&par->accept_q_lock);
1223 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
1224 sock_put(sk); /* sock_hold in smc_accept_enqueue */
1227 /* remove a sock from the accept queue to bind it to a new socket created
1228 * for a socket accept call from user space
1230 struct sock *smc_accept_dequeue(struct sock *parent,
1231 struct socket *new_sock)
1233 struct smc_sock *isk, *n;
1234 struct sock *new_sk;
1236 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
1237 new_sk = (struct sock *)isk;
1239 smc_accept_unlink(new_sk);
1240 if (new_sk->sk_state == SMC_CLOSED) {
1241 new_sk->sk_prot->unhash(new_sk);
1243 sock_release(isk->clcsock);
1244 isk->clcsock = NULL;
1246 sock_put(new_sk); /* final */
1250 sock_graft(new_sk, new_sock);
1251 if (isk->use_fallback) {
1252 smc_sk(new_sk)->clcsock->file = new_sock->file;
1253 isk->clcsock->file->private_data = isk->clcsock;
1261 /* clean up for a created but never accepted sock */
1262 void smc_close_non_accepted(struct sock *sk)
1264 struct smc_sock *smc = smc_sk(sk);
1266 sock_hold(sk); /* sock_put below */
1268 if (!sk->sk_lingertime)
1269 /* wait for peer closing */
1270 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
1273 sock_put(sk); /* sock_hold above */
1274 sock_put(sk); /* final sock_put */
1277 static int smcr_serv_conf_first_link(struct smc_sock *smc)
1279 struct smc_link *link = smc->conn.lnk;
1280 struct smc_llc_qentry *qentry;
1283 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
1284 return SMC_CLC_DECL_ERR_REGRMB;
1286 /* send CONFIRM LINK request to client over the RoCE fabric */
1287 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1289 return SMC_CLC_DECL_TIMEOUT_CL;
1291 /* receive CONFIRM LINK response from client over the RoCE fabric */
1292 qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
1293 SMC_LLC_CONFIRM_LINK);
1295 struct smc_clc_msg_decline dclc;
1297 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1298 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1299 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1301 smc_llc_save_peer_uid(qentry);
1302 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
1303 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
1305 return SMC_CLC_DECL_RMBE_EC;
1307 /* confirm_rkey is implicit on 1st contact */
1308 smc->conn.rmb_desc->is_conf_rkey = true;
1310 smc_llc_link_active(link);
1311 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
1313 /* initial contact - try to establish second link */
1314 smc_llc_srv_add_link(link);
1318 /* listen worker: finish */
1319 static void smc_listen_out(struct smc_sock *new_smc)
1321 struct smc_sock *lsmc = new_smc->listen_smc;
1322 struct sock *newsmcsk = &new_smc->sk;
1324 if (lsmc->sk.sk_state == SMC_LISTEN) {
1325 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1326 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1327 release_sock(&lsmc->sk);
1328 } else { /* no longer listening */
1329 smc_close_non_accepted(newsmcsk);
1332 /* Wake up accept */
1333 lsmc->sk.sk_data_ready(&lsmc->sk);
1334 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1337 /* listen worker: finish in state connected */
1338 static void smc_listen_out_connected(struct smc_sock *new_smc)
1340 struct sock *newsmcsk = &new_smc->sk;
1342 sk_refcnt_debug_inc(newsmcsk);
1343 if (newsmcsk->sk_state == SMC_INIT)
1344 newsmcsk->sk_state = SMC_ACTIVE;
1346 smc_listen_out(new_smc);
1349 /* listen worker: finish in error state */
1350 static void smc_listen_out_err(struct smc_sock *new_smc)
1352 struct sock *newsmcsk = &new_smc->sk;
1353 struct net *net = sock_net(newsmcsk);
1355 this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
1356 if (newsmcsk->sk_state == SMC_INIT)
1357 sock_put(&new_smc->sk); /* passive closing */
1358 newsmcsk->sk_state = SMC_CLOSED;
1360 smc_listen_out(new_smc);
1363 /* listen worker: decline and fall back if possible */
1364 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1365 int local_first, u8 version)
1367 /* RDMA setup failed, switch back to TCP */
1368 smc_conn_abort(new_smc, local_first);
1369 if (reason_code < 0) { /* error, no fallback possible */
1370 smc_listen_out_err(new_smc);
1373 smc_switch_to_fallback(new_smc, reason_code);
1374 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1375 if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
1376 smc_listen_out_err(new_smc);
1380 smc_listen_out_connected(new_smc);
1383 /* listen worker: version checking */
1384 static int smc_listen_v2_check(struct smc_sock *new_smc,
1385 struct smc_clc_msg_proposal *pclc,
1386 struct smc_init_info *ini)
1388 struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
1389 struct smc_clc_v2_extension *pclc_v2_ext;
1390 int rc = SMC_CLC_DECL_PEERNOSMC;
1392 ini->smc_type_v1 = pclc->hdr.typev1;
1393 ini->smc_type_v2 = pclc->hdr.typev2;
1394 ini->smcd_version = ini->smc_type_v1 != SMC_TYPE_N ? SMC_V1 : 0;
1395 if (pclc->hdr.version > SMC_V1)
1396 ini->smcd_version |=
1397 ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0;
1398 if (!(ini->smcd_version & SMC_V2)) {
1399 rc = SMC_CLC_DECL_PEERNOSMC;
1402 if (!smc_ism_is_v2_capable()) {
1403 ini->smcd_version &= ~SMC_V2;
1404 rc = SMC_CLC_DECL_NOISM2SUPP;
1407 pclc_v2_ext = smc_get_clc_v2_ext(pclc);
1409 ini->smcd_version &= ~SMC_V2;
1410 rc = SMC_CLC_DECL_NOV2EXT;
1413 pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
1414 if (!pclc_smcd_v2_ext) {
1415 ini->smcd_version &= ~SMC_V2;
1416 rc = SMC_CLC_DECL_NOV2DEXT;
1420 if (!ini->smcd_version)
1426 /* listen worker: check prefixes */
1427 static int smc_listen_prfx_check(struct smc_sock *new_smc,
1428 struct smc_clc_msg_proposal *pclc)
1430 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1431 struct socket *newclcsock = new_smc->clcsock;
1433 if (pclc->hdr.typev1 == SMC_TYPE_N)
1435 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1436 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1437 return SMC_CLC_DECL_DIFFPREFIX;
1442 /* listen worker: initialize connection and buffers */
1443 static int smc_listen_rdma_init(struct smc_sock *new_smc,
1444 struct smc_init_info *ini)
1448 /* allocate connection / link group */
1449 rc = smc_conn_create(new_smc, ini);
1453 /* create send buffer and rmb */
1454 if (smc_buf_create(new_smc, false))
1455 return SMC_CLC_DECL_MEM;
1460 /* listen worker: initialize connection and buffers for SMC-D */
1461 static int smc_listen_ism_init(struct smc_sock *new_smc,
1462 struct smc_init_info *ini)
1466 rc = smc_conn_create(new_smc, ini);
1470 /* Create send and receive buffers */
1471 rc = smc_buf_create(new_smc, true);
1473 smc_conn_abort(new_smc, ini->first_contact_local);
1474 return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
1481 static bool smc_is_already_selected(struct smcd_dev *smcd,
1482 struct smc_init_info *ini,
1487 for (i = 0; i < matches; i++)
1488 if (smcd == ini->ism_dev[i])
1494 /* check for ISM devices matching proposed ISM devices */
1495 static void smc_check_ism_v2_match(struct smc_init_info *ini,
1496 u16 proposed_chid, u64 proposed_gid,
1497 unsigned int *matches)
1499 struct smcd_dev *smcd;
1501 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1502 if (smcd->going_away)
1504 if (smc_is_already_selected(smcd, ini, *matches))
1506 if (smc_ism_get_chid(smcd) == proposed_chid &&
1507 !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
1508 ini->ism_peer_gid[*matches] = proposed_gid;
1509 ini->ism_dev[*matches] = smcd;
1516 static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini)
1522 static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
1523 struct smc_clc_msg_proposal *pclc,
1524 struct smc_init_info *ini)
1526 struct smc_clc_smcd_v2_extension *smcd_v2_ext;
1527 struct smc_clc_v2_extension *smc_v2_ext;
1528 struct smc_clc_msg_smcd *pclc_smcd;
1529 unsigned int matches = 0;
1534 if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
1537 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1538 smc_v2_ext = smc_get_clc_v2_ext(pclc);
1539 smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
1541 !smc_v2_ext->hdr.flag.seid) { /* no system EID support for SMCD */
1542 smc_find_ism_store_rc(SMC_CLC_DECL_NOSEID, ini);
1546 mutex_lock(&smcd_dev_list.mutex);
1547 if (pclc_smcd->ism.chid)
1548 /* check for ISM device matching proposed native ISM device */
1549 smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
1550 ntohll(pclc_smcd->ism.gid), &matches);
1551 for (i = 1; i <= smc_v2_ext->hdr.ism_gid_cnt; i++) {
1552 /* check for ISM devices matching proposed non-native ISM
1555 smc_check_ism_v2_match(ini,
1556 ntohs(smcd_v2_ext->gidchid[i - 1].chid),
1557 ntohll(smcd_v2_ext->gidchid[i - 1].gid),
1560 mutex_unlock(&smcd_dev_list.mutex);
1562 if (ini->ism_dev[0]) {
1563 smc_ism_get_system_eid(ini->ism_dev[0], &eid);
1564 if (memcmp(eid, smcd_v2_ext->system_eid, SMC_MAX_EID_LEN))
1570 /* separate - outside the smcd_dev_list.lock */
1571 smcd_version = ini->smcd_version;
1572 for (i = 0; i < matches; i++) {
1573 ini->smcd_version = SMC_V2;
1574 ini->is_smcd = true;
1575 ini->ism_selected = i;
1576 rc = smc_listen_ism_init(new_smc, ini);
1578 smc_find_ism_store_rc(rc, ini);
1579 /* try next active ISM device */
1582 return; /* matching and usable V2 ISM device found */
1584 /* no V2 ISM device could be initialized */
1585 ini->smcd_version = smcd_version; /* restore original value */
1588 ini->smcd_version &= ~SMC_V2;
1589 ini->ism_dev[0] = NULL;
1590 ini->is_smcd = false;
1593 static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
1594 struct smc_clc_msg_proposal *pclc,
1595 struct smc_init_info *ini)
1597 struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
1600 /* check if ISM V1 is available */
1601 if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
1603 ini->is_smcd = true; /* prepare ISM check */
1604 ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid);
1605 rc = smc_find_ism_device(new_smc, ini);
1608 ini->ism_selected = 0;
1609 rc = smc_listen_ism_init(new_smc, ini);
1611 return; /* V1 ISM device found */
1614 smc_find_ism_store_rc(rc, ini);
1615 ini->ism_dev[0] = NULL;
1616 ini->is_smcd = false;
1619 /* listen worker: register buffers */
1620 static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
1622 struct smc_connection *conn = &new_smc->conn;
1625 if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
1626 return SMC_CLC_DECL_ERR_REGRMB;
1628 smc_rmb_sync_sg_for_device(&new_smc->conn);
1633 static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
1634 struct smc_clc_msg_proposal *pclc,
1635 struct smc_init_info *ini)
1639 if (!smcr_indicated(ini->smc_type_v1))
1640 return SMC_CLC_DECL_NOSMCDEV;
1642 /* prepare RDMA check */
1643 ini->ib_lcl = &pclc->lcl;
1644 rc = smc_find_rdma_device(new_smc, ini);
1646 /* no RDMA device found */
1647 if (ini->smc_type_v1 == SMC_TYPE_B)
1648 /* neither ISM nor RDMA device found */
1649 rc = SMC_CLC_DECL_NOSMCDEV;
1652 rc = smc_listen_rdma_init(new_smc, ini);
1655 return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
1658 /* determine the local device matching to proposal */
1659 static int smc_listen_find_device(struct smc_sock *new_smc,
1660 struct smc_clc_msg_proposal *pclc,
1661 struct smc_init_info *ini)
1665 /* check for ISM device matching V2 proposed device */
1666 smc_find_ism_v2_device_serv(new_smc, pclc, ini);
1667 if (ini->ism_dev[0])
1670 if (!(ini->smcd_version & SMC_V1))
1671 return ini->rc ?: SMC_CLC_DECL_NOSMCD2DEV;
1673 /* check for matching IP prefix and subnet length */
1674 rc = smc_listen_prfx_check(new_smc, pclc);
1676 return ini->rc ?: rc;
1678 /* get vlan id from IP device */
1679 if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
1680 return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
1682 /* check for ISM device matching V1 proposed device */
1683 smc_find_ism_v1_device_serv(new_smc, pclc, ini);
1684 if (ini->ism_dev[0])
1687 if (pclc->hdr.typev1 == SMC_TYPE_D)
1688 /* skip RDMA and decline */
1689 return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
1691 /* check if RDMA is available */
1692 rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
1693 smc_find_ism_store_rc(rc, ini);
1695 return (!rc) ? 0 : ini->rc;
1698 /* listen worker: finish RDMA setup */
1699 static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1700 struct smc_clc_msg_accept_confirm *cclc,
1703 struct smc_link *link = new_smc->conn.lnk;
1704 int reason_code = 0;
1707 smc_link_save_peer_info(link, cclc);
1709 if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
1710 return SMC_CLC_DECL_ERR_RTOK;
1713 if (smc_ib_ready_link(link))
1714 return SMC_CLC_DECL_ERR_RDYLNK;
1715 /* QP confirmation over RoCE fabric */
1716 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
1717 reason_code = smcr_serv_conf_first_link(new_smc);
1718 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
1723 /* setup for connection of server */
1724 static void smc_listen_work(struct work_struct *work)
1726 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1728 u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
1729 struct socket *newclcsock = new_smc->clcsock;
1730 struct smc_clc_msg_accept_confirm *cclc;
1731 struct smc_clc_msg_proposal_area *buf;
1732 struct smc_clc_msg_proposal *pclc;
1733 struct smc_init_info *ini = NULL;
1736 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
1737 return smc_listen_out_err(new_smc);
1739 if (new_smc->use_fallback) {
1740 smc_listen_out_connected(new_smc);
1744 /* check if peer is smc capable */
1745 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1746 smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
1747 smc_listen_out_connected(new_smc);
1751 /* do inband token exchange -
1752 * wait for and receive SMC Proposal CLC message
1754 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
1756 rc = SMC_CLC_DECL_MEM;
1759 pclc = (struct smc_clc_msg_proposal *)buf;
1760 rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
1761 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
1764 version = pclc->hdr.version == SMC_V1 ? SMC_V1 : version;
1766 /* IPSec connections opt out of SMC optimizations */
1767 if (using_ipsec(new_smc)) {
1768 rc = SMC_CLC_DECL_IPSEC;
1772 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1774 rc = SMC_CLC_DECL_MEM;
1778 /* initial version checking */
1779 rc = smc_listen_v2_check(new_smc, pclc, ini);
1783 mutex_lock(&smc_server_lgr_pending);
1784 smc_close_init(new_smc);
1785 smc_rx_init(new_smc);
1786 smc_tx_init(new_smc);
1788 /* determine ISM or RoCE device used for connection */
1789 rc = smc_listen_find_device(new_smc, pclc, ini);
1793 /* send SMC Accept CLC message */
1794 rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
1795 ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1);
1799 /* SMC-D does not need this lock any more */
1801 mutex_unlock(&smc_server_lgr_pending);
1803 /* receive SMC Confirm CLC message */
1804 memset(buf, 0, sizeof(*buf));
1805 cclc = (struct smc_clc_msg_accept_confirm *)buf;
1806 rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
1807 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
1815 if (!ini->is_smcd) {
1816 rc = smc_listen_rdma_finish(new_smc, cclc,
1817 ini->first_contact_local);
1820 mutex_unlock(&smc_server_lgr_pending);
1822 smc_conn_save_peer_info(new_smc, cclc);
1823 smc_listen_out_connected(new_smc);
1824 SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
1828 mutex_unlock(&smc_server_lgr_pending);
1830 smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
1837 static void smc_tcp_listen_work(struct work_struct *work)
1839 struct smc_sock *lsmc = container_of(work, struct smc_sock,
1841 struct sock *lsk = &lsmc->sk;
1842 struct smc_sock *new_smc;
1846 while (lsk->sk_state == SMC_LISTEN) {
1847 rc = smc_clcsock_accept(lsmc, &new_smc);
1848 if (rc) /* clcsock accept queue empty or error */
1853 new_smc->listen_smc = lsmc;
1854 new_smc->use_fallback = lsmc->use_fallback;
1855 new_smc->fallback_rsn = lsmc->fallback_rsn;
1856 sock_hold(lsk); /* sock_put in smc_listen_work */
1857 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
1858 smc_copy_sock_settings_to_smc(new_smc);
1859 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
1860 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
1861 sock_hold(&new_smc->sk); /* sock_put in passive closing */
1862 if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
1863 sock_put(&new_smc->sk);
1868 sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
1871 static void smc_clcsock_data_ready(struct sock *listen_clcsock)
1873 struct smc_sock *lsmc;
1875 lsmc = (struct smc_sock *)
1876 ((uintptr_t)listen_clcsock->sk_user_data & ~SK_USER_DATA_NOCOPY);
1879 lsmc->clcsk_data_ready(listen_clcsock);
1880 if (lsmc->sk.sk_state == SMC_LISTEN) {
1881 sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
1882 if (!queue_work(smc_hs_wq, &lsmc->tcp_listen_work))
1883 sock_put(&lsmc->sk);
1887 static int smc_listen(struct socket *sock, int backlog)
1889 struct sock *sk = sock->sk;
1890 struct smc_sock *smc;
1897 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
1898 smc->connect_nonblock)
1902 if (sk->sk_state == SMC_LISTEN) {
1903 sk->sk_max_ack_backlog = backlog;
1906 /* some socket options are handled in core, so we could not apply
1907 * them to the clc socket -- copy smc socket options to clc socket
1909 smc_copy_sock_settings_to_clc(smc);
1910 if (!smc->use_fallback)
1911 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1913 /* save original sk_data_ready function and establish
1914 * smc-specific sk_data_ready function
1916 smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
1917 smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
1918 smc->clcsock->sk->sk_user_data =
1919 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
1920 rc = kernel_listen(smc->clcsock, backlog);
1922 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
1925 sk->sk_max_ack_backlog = backlog;
1926 sk->sk_ack_backlog = 0;
1927 sk->sk_state = SMC_LISTEN;
1934 static int smc_accept(struct socket *sock, struct socket *new_sock,
1935 int flags, bool kern)
1937 struct sock *sk = sock->sk, *nsk;
1938 DECLARE_WAITQUEUE(wait, current);
1939 struct smc_sock *lsmc;
1944 sock_hold(sk); /* sock_put below */
1947 if (lsmc->sk.sk_state != SMC_LISTEN) {
1953 /* Wait for an incoming connection */
1954 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1955 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1956 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
1957 set_current_state(TASK_INTERRUPTIBLE);
1963 timeo = schedule_timeout(timeo);
1964 /* wakeup by sk_data_ready in smc_listen_work() */
1965 sched_annotate_sleep();
1967 if (signal_pending(current)) {
1968 rc = sock_intr_errno(timeo);
1972 set_current_state(TASK_RUNNING);
1973 remove_wait_queue(sk_sleep(sk), &wait);
1976 rc = sock_error(nsk);
1981 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
1982 /* wait till data arrives on the socket */
1983 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
1985 if (smc_sk(nsk)->use_fallback) {
1986 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
1989 if (skb_queue_empty(&clcsk->sk_receive_queue))
1990 sk_wait_data(clcsk, &timeo, NULL);
1991 release_sock(clcsk);
1992 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
1994 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
2000 sock_put(sk); /* sock_hold above */
2004 static int smc_getname(struct socket *sock, struct sockaddr *addr,
2007 struct smc_sock *smc;
2009 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
2010 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
2013 smc = smc_sk(sock->sk);
2015 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
2018 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2020 struct sock *sk = sock->sk;
2021 struct smc_sock *smc;
2026 if ((sk->sk_state != SMC_ACTIVE) &&
2027 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2028 (sk->sk_state != SMC_INIT))
2031 if (msg->msg_flags & MSG_FASTOPEN) {
2032 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2033 smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2040 if (smc->use_fallback) {
2041 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2043 rc = smc_tx_sendmsg(smc, msg, len);
2044 SMC_STAT_TX_PAYLOAD(smc, len, rc);
2051 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2054 struct sock *sk = sock->sk;
2055 struct smc_sock *smc;
2060 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2061 /* socket was connected before, no more data to read */
2065 if ((sk->sk_state == SMC_INIT) ||
2066 (sk->sk_state == SMC_LISTEN) ||
2067 (sk->sk_state == SMC_CLOSED))
2070 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2075 if (smc->use_fallback) {
2076 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2078 msg->msg_namelen = 0;
2079 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2080 SMC_STAT_RX_PAYLOAD(smc, rc, rc);
2088 static __poll_t smc_accept_poll(struct sock *parent)
2090 struct smc_sock *isk = smc_sk(parent);
2093 spin_lock(&isk->accept_q_lock);
2094 if (!list_empty(&isk->accept_q))
2095 mask = EPOLLIN | EPOLLRDNORM;
2096 spin_unlock(&isk->accept_q_lock);
2101 static __poll_t smc_poll(struct file *file, struct socket *sock,
2104 struct sock *sk = sock->sk;
2105 struct smc_sock *smc;
2111 smc = smc_sk(sock->sk);
2112 if (smc->use_fallback) {
2113 /* delegate to CLC child sock */
2114 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2115 sk->sk_err = smc->clcsock->sk->sk_err;
2117 if (sk->sk_state != SMC_CLOSED)
2118 sock_poll_wait(file, sock, wait);
2121 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
2122 (sk->sk_state == SMC_CLOSED))
2124 if (sk->sk_state == SMC_LISTEN) {
2125 /* woken up by sk_data_ready in smc_listen_work() */
2126 mask |= smc_accept_poll(sk);
2127 } else if (smc->use_fallback) { /* as result of connect_work()*/
2128 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2130 sk->sk_err = smc->clcsock->sk->sk_err;
2132 if ((sk->sk_state != SMC_INIT &&
2133 atomic_read(&smc->conn.sndbuf_space)) ||
2134 sk->sk_shutdown & SEND_SHUTDOWN) {
2135 mask |= EPOLLOUT | EPOLLWRNORM;
2137 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2138 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2140 if (atomic_read(&smc->conn.bytes_to_rcv))
2141 mask |= EPOLLIN | EPOLLRDNORM;
2142 if (sk->sk_shutdown & RCV_SHUTDOWN)
2143 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2144 if (sk->sk_state == SMC_APPCLOSEWAIT1)
2146 if (smc->conn.urg_state == SMC_URG_VALID)
2154 static int smc_shutdown(struct socket *sock, int how)
2156 struct sock *sk = sock->sk;
2157 bool do_shutdown = true;
2158 struct smc_sock *smc;
2165 if ((how < SHUT_RD) || (how > SHUT_RDWR))
2171 if ((sk->sk_state != SMC_ACTIVE) &&
2172 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
2173 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
2174 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2175 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
2176 (sk->sk_state != SMC_APPFINCLOSEWAIT))
2178 if (smc->use_fallback) {
2179 rc = kernel_sock_shutdown(smc->clcsock, how);
2180 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2181 if (sk->sk_shutdown == SHUTDOWN_MASK)
2182 sk->sk_state = SMC_CLOSED;
2186 case SHUT_RDWR: /* shutdown in both directions */
2187 old_state = sk->sk_state;
2188 rc = smc_close_active(smc);
2189 if (old_state == SMC_ACTIVE &&
2190 sk->sk_state == SMC_PEERCLOSEWAIT1)
2191 do_shutdown = false;
2194 rc = smc_close_shutdown_write(smc);
2198 /* nothing more to do because peer is not involved */
2201 if (do_shutdown && smc->clcsock)
2202 rc1 = kernel_sock_shutdown(smc->clcsock, how);
2203 /* map sock_shutdown_cmd constants to sk_shutdown value range */
2204 sk->sk_shutdown |= how + 1;
2208 return rc ? rc : rc1;
2211 static int smc_setsockopt(struct socket *sock, int level, int optname,
2212 sockptr_t optval, unsigned int optlen)
2214 struct sock *sk = sock->sk;
2215 struct smc_sock *smc;
2218 if (level == SOL_TCP && optname == TCP_ULP)
2223 /* generic setsockopts reaching us here always apply to the
2226 if (unlikely(!smc->clcsock->ops->setsockopt))
2229 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
2231 if (smc->clcsock->sk->sk_err) {
2232 sk->sk_err = smc->clcsock->sk->sk_err;
2233 sk_error_report(sk);
2236 if (optlen < sizeof(int))
2238 if (copy_from_sockptr(&val, optval, sizeof(int)))
2242 if (rc || smc->use_fallback)
2246 case TCP_FASTOPEN_CONNECT:
2247 case TCP_FASTOPEN_KEY:
2248 case TCP_FASTOPEN_NO_COOKIE:
2249 /* option not supported by SMC */
2250 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2251 smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2257 if (sk->sk_state != SMC_INIT &&
2258 sk->sk_state != SMC_LISTEN &&
2259 sk->sk_state != SMC_CLOSED) {
2261 SMC_STAT_INC(smc, ndly_cnt);
2262 mod_delayed_work(smc->conn.lgr->tx_wq,
2263 &smc->conn.tx_work, 0);
2268 if (sk->sk_state != SMC_INIT &&
2269 sk->sk_state != SMC_LISTEN &&
2270 sk->sk_state != SMC_CLOSED) {
2272 SMC_STAT_INC(smc, cork_cnt);
2273 mod_delayed_work(smc->conn.lgr->tx_wq,
2274 &smc->conn.tx_work, 0);
2278 case TCP_DEFER_ACCEPT:
2279 smc->sockopt_defer_accept = val;
2290 static int smc_getsockopt(struct socket *sock, int level, int optname,
2291 char __user *optval, int __user *optlen)
2293 struct smc_sock *smc;
2295 smc = smc_sk(sock->sk);
2296 /* socket options apply to the CLC socket */
2297 if (unlikely(!smc->clcsock->ops->getsockopt))
2299 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
2303 static int smc_ioctl(struct socket *sock, unsigned int cmd,
2306 union smc_host_cursor cons, urg;
2307 struct smc_connection *conn;
2308 struct smc_sock *smc;
2311 smc = smc_sk(sock->sk);
2313 lock_sock(&smc->sk);
2314 if (smc->use_fallback) {
2315 if (!smc->clcsock) {
2316 release_sock(&smc->sk);
2319 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
2320 release_sock(&smc->sk);
2324 case SIOCINQ: /* same as FIONREAD */
2325 if (smc->sk.sk_state == SMC_LISTEN) {
2326 release_sock(&smc->sk);
2329 if (smc->sk.sk_state == SMC_INIT ||
2330 smc->sk.sk_state == SMC_CLOSED)
2333 answ = atomic_read(&smc->conn.bytes_to_rcv);
2336 /* output queue size (not send + not acked) */
2337 if (smc->sk.sk_state == SMC_LISTEN) {
2338 release_sock(&smc->sk);
2341 if (smc->sk.sk_state == SMC_INIT ||
2342 smc->sk.sk_state == SMC_CLOSED)
2345 answ = smc->conn.sndbuf_desc->len -
2346 atomic_read(&smc->conn.sndbuf_space);
2349 /* output queue size (not send only) */
2350 if (smc->sk.sk_state == SMC_LISTEN) {
2351 release_sock(&smc->sk);
2354 if (smc->sk.sk_state == SMC_INIT ||
2355 smc->sk.sk_state == SMC_CLOSED)
2358 answ = smc_tx_prepared_sends(&smc->conn);
2361 if (smc->sk.sk_state == SMC_LISTEN) {
2362 release_sock(&smc->sk);
2365 if (smc->sk.sk_state == SMC_INIT ||
2366 smc->sk.sk_state == SMC_CLOSED) {
2369 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
2370 smc_curs_copy(&urg, &conn->urg_curs, conn);
2371 answ = smc_curs_diff(conn->rmb_desc->len,
2376 release_sock(&smc->sk);
2377 return -ENOIOCTLCMD;
2379 release_sock(&smc->sk);
2381 return put_user(answ, (int __user *)arg);
2384 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
2385 int offset, size_t size, int flags)
2387 struct sock *sk = sock->sk;
2388 struct smc_sock *smc;
2393 if (sk->sk_state != SMC_ACTIVE) {
2398 if (smc->use_fallback) {
2399 rc = kernel_sendpage(smc->clcsock, page, offset,
2402 SMC_STAT_INC(smc, sendpage_cnt);
2403 rc = sock_no_sendpage(sock, page, offset, size, flags);
2410 /* Map the affected portions of the rmbe into an spd, note the number of bytes
2411 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
2412 * updates till whenever a respective page has been fully processed.
2413 * Note that subsequent recv() calls have to wait till all splice() processing
2416 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
2417 struct pipe_inode_info *pipe, size_t len,
2420 struct sock *sk = sock->sk;
2421 struct smc_sock *smc;
2426 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2427 /* socket was connected before, no more data to read */
2431 if (sk->sk_state == SMC_INIT ||
2432 sk->sk_state == SMC_LISTEN ||
2433 sk->sk_state == SMC_CLOSED)
2436 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2441 if (smc->use_fallback) {
2442 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
2449 if (flags & SPLICE_F_NONBLOCK)
2450 flags = MSG_DONTWAIT;
2453 SMC_STAT_INC(smc, splice_cnt);
2454 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
2462 /* must look like tcp */
2463 static const struct proto_ops smc_sock_ops = {
2465 .owner = THIS_MODULE,
2466 .release = smc_release,
2468 .connect = smc_connect,
2469 .socketpair = sock_no_socketpair,
2470 .accept = smc_accept,
2471 .getname = smc_getname,
2474 .listen = smc_listen,
2475 .shutdown = smc_shutdown,
2476 .setsockopt = smc_setsockopt,
2477 .getsockopt = smc_getsockopt,
2478 .sendmsg = smc_sendmsg,
2479 .recvmsg = smc_recvmsg,
2480 .mmap = sock_no_mmap,
2481 .sendpage = smc_sendpage,
2482 .splice_read = smc_splice_read,
2485 static int smc_create(struct net *net, struct socket *sock, int protocol,
2488 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
2489 struct smc_sock *smc;
2493 rc = -ESOCKTNOSUPPORT;
2494 if (sock->type != SOCK_STREAM)
2497 rc = -EPROTONOSUPPORT;
2498 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
2502 sock->ops = &smc_sock_ops;
2503 sk = smc_sock_alloc(net, sock, protocol);
2507 /* create internal TCP socket for CLC handshake and fallback */
2509 smc->use_fallback = false; /* assume rdma capability first */
2510 smc->fallback_rsn = 0;
2511 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
2514 sk_common_release(sk);
2517 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
2518 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
2524 static const struct net_proto_family smc_sock_family_ops = {
2526 .owner = THIS_MODULE,
2527 .create = smc_create,
2530 unsigned int smc_net_id;
2532 static __net_init int smc_net_init(struct net *net)
2534 return smc_pnet_net_init(net);
2537 static void __net_exit smc_net_exit(struct net *net)
2539 smc_pnet_net_exit(net);
2542 static __net_init int smc_net_stat_init(struct net *net)
2544 return smc_stats_init(net);
2547 static void __net_exit smc_net_stat_exit(struct net *net)
2549 smc_stats_exit(net);
2552 static struct pernet_operations smc_net_ops = {
2553 .init = smc_net_init,
2554 .exit = smc_net_exit,
2556 .size = sizeof(struct smc_net),
2559 static struct pernet_operations smc_net_stat_ops = {
2560 .init = smc_net_stat_init,
2561 .exit = smc_net_stat_exit,
2564 static int __init smc_init(void)
2568 rc = register_pernet_subsys(&smc_net_ops);
2572 rc = register_pernet_subsys(&smc_net_stat_ops);
2581 goto out_pernet_subsys;
2583 rc = smc_pnet_init();
2588 smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
2592 smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
2594 goto out_alloc_hs_wq;
2596 rc = smc_core_init();
2598 pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
2602 rc = smc_llc_init();
2604 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
2608 rc = smc_cdc_init();
2610 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
2614 rc = proto_register(&smc_proto, 1);
2616 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
2620 rc = proto_register(&smc_proto6, 1);
2622 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
2626 rc = sock_register(&smc_sock_family_ops);
2628 pr_err("%s: sock_register fails with %d\n", __func__, rc);
2631 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
2632 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
2634 rc = smc_ib_register_client();
2636 pr_err("%s: ib_register fails with %d\n", __func__, rc);
2640 static_branch_enable(&tcp_have_smc);
2644 sock_unregister(PF_SMC);
2646 proto_unregister(&smc_proto6);
2648 proto_unregister(&smc_proto);
2652 destroy_workqueue(smc_close_wq);
2654 destroy_workqueue(smc_hs_wq);
2660 unregister_pernet_subsys(&smc_net_ops);
2665 static void __exit smc_exit(void)
2667 static_branch_disable(&tcp_have_smc);
2668 sock_unregister(PF_SMC);
2670 smc_ib_unregister_client();
2671 destroy_workqueue(smc_close_wq);
2672 destroy_workqueue(smc_hs_wq);
2673 proto_unregister(&smc_proto6);
2674 proto_unregister(&smc_proto);
2677 unregister_pernet_subsys(&smc_net_stat_ops);
2678 unregister_pernet_subsys(&smc_net_ops);
2682 module_init(smc_init);
2683 module_exit(smc_exit);
2685 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2686 MODULE_DESCRIPTION("smc socket address family");
2687 MODULE_LICENSE("GPL");
2688 MODULE_ALIAS_NETPROTO(PF_SMC);